xref: /openbmc/linux/drivers/ata/libata-core.c (revision 12eb4683)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/platform_device.h>
71 
72 #include "libata.h"
73 #include "libata-transport.h"
74 
75 /* debounce timing parameters in msecs { interval, duration, timeout } */
76 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
77 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
78 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
79 
80 const struct ata_port_operations ata_base_port_ops = {
81 	.prereset		= ata_std_prereset,
82 	.postreset		= ata_std_postreset,
83 	.error_handler		= ata_std_error_handler,
84 	.sched_eh		= ata_std_sched_eh,
85 	.end_eh			= ata_std_end_eh,
86 };
87 
88 const struct ata_port_operations sata_port_ops = {
89 	.inherits		= &ata_base_port_ops,
90 
91 	.qc_defer		= ata_std_qc_defer,
92 	.hardreset		= sata_std_hardreset,
93 };
94 
95 static unsigned int ata_dev_init_params(struct ata_device *dev,
96 					u16 heads, u16 sectors);
97 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
98 static void ata_dev_xfermask(struct ata_device *dev);
99 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
100 
101 atomic_t ata_print_id = ATOMIC_INIT(0);
102 
103 struct ata_force_param {
104 	const char	*name;
105 	unsigned int	cbl;
106 	int		spd_limit;
107 	unsigned long	xfer_mask;
108 	unsigned int	horkage_on;
109 	unsigned int	horkage_off;
110 	unsigned int	lflags;
111 };
112 
113 struct ata_force_ent {
114 	int			port;
115 	int			device;
116 	struct ata_force_param	param;
117 };
118 
119 static struct ata_force_ent *ata_force_tbl;
120 static int ata_force_tbl_size;
121 
122 static char ata_force_param_buf[PAGE_SIZE] __initdata;
123 /* param_buf is thrown away after initialization, disallow read */
124 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
125 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
126 
127 static int atapi_enabled = 1;
128 module_param(atapi_enabled, int, 0444);
129 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
130 
131 static int atapi_dmadir = 0;
132 module_param(atapi_dmadir, int, 0444);
133 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
134 
135 int atapi_passthru16 = 1;
136 module_param(atapi_passthru16, int, 0444);
137 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
138 
139 int libata_fua = 0;
140 module_param_named(fua, libata_fua, int, 0444);
141 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
142 
143 static int ata_ignore_hpa;
144 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
145 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
146 
147 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
148 module_param_named(dma, libata_dma_mask, int, 0444);
149 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
150 
151 static int ata_probe_timeout;
152 module_param(ata_probe_timeout, int, 0444);
153 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
154 
155 int libata_noacpi = 0;
156 module_param_named(noacpi, libata_noacpi, int, 0444);
157 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
158 
159 int libata_allow_tpm = 0;
160 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162 
163 static int atapi_an;
164 module_param(atapi_an, int, 0444);
165 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
166 
167 MODULE_AUTHOR("Jeff Garzik");
168 MODULE_DESCRIPTION("Library module for ATA devices");
169 MODULE_LICENSE("GPL");
170 MODULE_VERSION(DRV_VERSION);
171 
172 
173 static bool ata_sstatus_online(u32 sstatus)
174 {
175 	return (sstatus & 0xf) == 0x3;
176 }
177 
178 /**
179  *	ata_link_next - link iteration helper
180  *	@link: the previous link, NULL to start
181  *	@ap: ATA port containing links to iterate
182  *	@mode: iteration mode, one of ATA_LITER_*
183  *
184  *	LOCKING:
185  *	Host lock or EH context.
186  *
187  *	RETURNS:
188  *	Pointer to the next link.
189  */
190 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
191 			       enum ata_link_iter_mode mode)
192 {
193 	BUG_ON(mode != ATA_LITER_EDGE &&
194 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
195 
196 	/* NULL link indicates start of iteration */
197 	if (!link)
198 		switch (mode) {
199 		case ATA_LITER_EDGE:
200 		case ATA_LITER_PMP_FIRST:
201 			if (sata_pmp_attached(ap))
202 				return ap->pmp_link;
203 			/* fall through */
204 		case ATA_LITER_HOST_FIRST:
205 			return &ap->link;
206 		}
207 
208 	/* we just iterated over the host link, what's next? */
209 	if (link == &ap->link)
210 		switch (mode) {
211 		case ATA_LITER_HOST_FIRST:
212 			if (sata_pmp_attached(ap))
213 				return ap->pmp_link;
214 			/* fall through */
215 		case ATA_LITER_PMP_FIRST:
216 			if (unlikely(ap->slave_link))
217 				return ap->slave_link;
218 			/* fall through */
219 		case ATA_LITER_EDGE:
220 			return NULL;
221 		}
222 
223 	/* slave_link excludes PMP */
224 	if (unlikely(link == ap->slave_link))
225 		return NULL;
226 
227 	/* we were over a PMP link */
228 	if (++link < ap->pmp_link + ap->nr_pmp_links)
229 		return link;
230 
231 	if (mode == ATA_LITER_PMP_FIRST)
232 		return &ap->link;
233 
234 	return NULL;
235 }
236 
237 /**
238  *	ata_dev_next - device iteration helper
239  *	@dev: the previous device, NULL to start
240  *	@link: ATA link containing devices to iterate
241  *	@mode: iteration mode, one of ATA_DITER_*
242  *
243  *	LOCKING:
244  *	Host lock or EH context.
245  *
246  *	RETURNS:
247  *	Pointer to the next device.
248  */
249 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
250 				enum ata_dev_iter_mode mode)
251 {
252 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
253 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
254 
255 	/* NULL dev indicates start of iteration */
256 	if (!dev)
257 		switch (mode) {
258 		case ATA_DITER_ENABLED:
259 		case ATA_DITER_ALL:
260 			dev = link->device;
261 			goto check;
262 		case ATA_DITER_ENABLED_REVERSE:
263 		case ATA_DITER_ALL_REVERSE:
264 			dev = link->device + ata_link_max_devices(link) - 1;
265 			goto check;
266 		}
267 
268  next:
269 	/* move to the next one */
270 	switch (mode) {
271 	case ATA_DITER_ENABLED:
272 	case ATA_DITER_ALL:
273 		if (++dev < link->device + ata_link_max_devices(link))
274 			goto check;
275 		return NULL;
276 	case ATA_DITER_ENABLED_REVERSE:
277 	case ATA_DITER_ALL_REVERSE:
278 		if (--dev >= link->device)
279 			goto check;
280 		return NULL;
281 	}
282 
283  check:
284 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
285 	    !ata_dev_enabled(dev))
286 		goto next;
287 	return dev;
288 }
289 
290 /**
291  *	ata_dev_phys_link - find physical link for a device
292  *	@dev: ATA device to look up physical link for
293  *
294  *	Look up physical link which @dev is attached to.  Note that
295  *	this is different from @dev->link only when @dev is on slave
296  *	link.  For all other cases, it's the same as @dev->link.
297  *
298  *	LOCKING:
299  *	Don't care.
300  *
301  *	RETURNS:
302  *	Pointer to the found physical link.
303  */
304 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
305 {
306 	struct ata_port *ap = dev->link->ap;
307 
308 	if (!ap->slave_link)
309 		return dev->link;
310 	if (!dev->devno)
311 		return &ap->link;
312 	return ap->slave_link;
313 }
314 
315 /**
316  *	ata_force_cbl - force cable type according to libata.force
317  *	@ap: ATA port of interest
318  *
319  *	Force cable type according to libata.force and whine about it.
320  *	The last entry which has matching port number is used, so it
321  *	can be specified as part of device force parameters.  For
322  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
323  *	same effect.
324  *
325  *	LOCKING:
326  *	EH context.
327  */
328 void ata_force_cbl(struct ata_port *ap)
329 {
330 	int i;
331 
332 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 		const struct ata_force_ent *fe = &ata_force_tbl[i];
334 
335 		if (fe->port != -1 && fe->port != ap->print_id)
336 			continue;
337 
338 		if (fe->param.cbl == ATA_CBL_NONE)
339 			continue;
340 
341 		ap->cbl = fe->param.cbl;
342 		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
343 		return;
344 	}
345 }
346 
347 /**
348  *	ata_force_link_limits - force link limits according to libata.force
349  *	@link: ATA link of interest
350  *
351  *	Force link flags and SATA spd limit according to libata.force
352  *	and whine about it.  When only the port part is specified
353  *	(e.g. 1:), the limit applies to all links connected to both
354  *	the host link and all fan-out ports connected via PMP.  If the
355  *	device part is specified as 0 (e.g. 1.00:), it specifies the
356  *	first fan-out link not the host link.  Device number 15 always
357  *	points to the host link whether PMP is attached or not.  If the
358  *	controller has slave link, device number 16 points to it.
359  *
360  *	LOCKING:
361  *	EH context.
362  */
363 static void ata_force_link_limits(struct ata_link *link)
364 {
365 	bool did_spd = false;
366 	int linkno = link->pmp;
367 	int i;
368 
369 	if (ata_is_host_link(link))
370 		linkno += 15;
371 
372 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373 		const struct ata_force_ent *fe = &ata_force_tbl[i];
374 
375 		if (fe->port != -1 && fe->port != link->ap->print_id)
376 			continue;
377 
378 		if (fe->device != -1 && fe->device != linkno)
379 			continue;
380 
381 		/* only honor the first spd limit */
382 		if (!did_spd && fe->param.spd_limit) {
383 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
384 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
385 					fe->param.name);
386 			did_spd = true;
387 		}
388 
389 		/* let lflags stack */
390 		if (fe->param.lflags) {
391 			link->flags |= fe->param.lflags;
392 			ata_link_notice(link,
393 					"FORCE: link flag 0x%x forced -> 0x%x\n",
394 					fe->param.lflags, link->flags);
395 		}
396 	}
397 }
398 
399 /**
400  *	ata_force_xfermask - force xfermask according to libata.force
401  *	@dev: ATA device of interest
402  *
403  *	Force xfer_mask according to libata.force and whine about it.
404  *	For consistency with link selection, device number 15 selects
405  *	the first device connected to the host link.
406  *
407  *	LOCKING:
408  *	EH context.
409  */
410 static void ata_force_xfermask(struct ata_device *dev)
411 {
412 	int devno = dev->link->pmp + dev->devno;
413 	int alt_devno = devno;
414 	int i;
415 
416 	/* allow n.15/16 for devices attached to host port */
417 	if (ata_is_host_link(dev->link))
418 		alt_devno += 15;
419 
420 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
421 		const struct ata_force_ent *fe = &ata_force_tbl[i];
422 		unsigned long pio_mask, mwdma_mask, udma_mask;
423 
424 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
425 			continue;
426 
427 		if (fe->device != -1 && fe->device != devno &&
428 		    fe->device != alt_devno)
429 			continue;
430 
431 		if (!fe->param.xfer_mask)
432 			continue;
433 
434 		ata_unpack_xfermask(fe->param.xfer_mask,
435 				    &pio_mask, &mwdma_mask, &udma_mask);
436 		if (udma_mask)
437 			dev->udma_mask = udma_mask;
438 		else if (mwdma_mask) {
439 			dev->udma_mask = 0;
440 			dev->mwdma_mask = mwdma_mask;
441 		} else {
442 			dev->udma_mask = 0;
443 			dev->mwdma_mask = 0;
444 			dev->pio_mask = pio_mask;
445 		}
446 
447 		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
448 			       fe->param.name);
449 		return;
450 	}
451 }
452 
453 /**
454  *	ata_force_horkage - force horkage according to libata.force
455  *	@dev: ATA device of interest
456  *
457  *	Force horkage according to libata.force and whine about it.
458  *	For consistency with link selection, device number 15 selects
459  *	the first device connected to the host link.
460  *
461  *	LOCKING:
462  *	EH context.
463  */
464 static void ata_force_horkage(struct ata_device *dev)
465 {
466 	int devno = dev->link->pmp + dev->devno;
467 	int alt_devno = devno;
468 	int i;
469 
470 	/* allow n.15/16 for devices attached to host port */
471 	if (ata_is_host_link(dev->link))
472 		alt_devno += 15;
473 
474 	for (i = 0; i < ata_force_tbl_size; i++) {
475 		const struct ata_force_ent *fe = &ata_force_tbl[i];
476 
477 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
478 			continue;
479 
480 		if (fe->device != -1 && fe->device != devno &&
481 		    fe->device != alt_devno)
482 			continue;
483 
484 		if (!(~dev->horkage & fe->param.horkage_on) &&
485 		    !(dev->horkage & fe->param.horkage_off))
486 			continue;
487 
488 		dev->horkage |= fe->param.horkage_on;
489 		dev->horkage &= ~fe->param.horkage_off;
490 
491 		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
492 			       fe->param.name);
493 	}
494 }
495 
496 /**
497  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
498  *	@opcode: SCSI opcode
499  *
500  *	Determine ATAPI command type from @opcode.
501  *
502  *	LOCKING:
503  *	None.
504  *
505  *	RETURNS:
506  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
507  */
508 int atapi_cmd_type(u8 opcode)
509 {
510 	switch (opcode) {
511 	case GPCMD_READ_10:
512 	case GPCMD_READ_12:
513 		return ATAPI_READ;
514 
515 	case GPCMD_WRITE_10:
516 	case GPCMD_WRITE_12:
517 	case GPCMD_WRITE_AND_VERIFY_10:
518 		return ATAPI_WRITE;
519 
520 	case GPCMD_READ_CD:
521 	case GPCMD_READ_CD_MSF:
522 		return ATAPI_READ_CD;
523 
524 	case ATA_16:
525 	case ATA_12:
526 		if (atapi_passthru16)
527 			return ATAPI_PASS_THRU;
528 		/* fall thru */
529 	default:
530 		return ATAPI_MISC;
531 	}
532 }
533 
534 /**
535  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
536  *	@tf: Taskfile to convert
537  *	@pmp: Port multiplier port
538  *	@is_cmd: This FIS is for command
539  *	@fis: Buffer into which data will output
540  *
541  *	Converts a standard ATA taskfile to a Serial ATA
542  *	FIS structure (Register - Host to Device).
543  *
544  *	LOCKING:
545  *	Inherited from caller.
546  */
547 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
548 {
549 	fis[0] = 0x27;			/* Register - Host to Device FIS */
550 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
551 	if (is_cmd)
552 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
553 
554 	fis[2] = tf->command;
555 	fis[3] = tf->feature;
556 
557 	fis[4] = tf->lbal;
558 	fis[5] = tf->lbam;
559 	fis[6] = tf->lbah;
560 	fis[7] = tf->device;
561 
562 	fis[8] = tf->hob_lbal;
563 	fis[9] = tf->hob_lbam;
564 	fis[10] = tf->hob_lbah;
565 	fis[11] = tf->hob_feature;
566 
567 	fis[12] = tf->nsect;
568 	fis[13] = tf->hob_nsect;
569 	fis[14] = 0;
570 	fis[15] = tf->ctl;
571 
572 	fis[16] = tf->auxiliary & 0xff;
573 	fis[17] = (tf->auxiliary >> 8) & 0xff;
574 	fis[18] = (tf->auxiliary >> 16) & 0xff;
575 	fis[19] = (tf->auxiliary >> 24) & 0xff;
576 }
577 
578 /**
579  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
580  *	@fis: Buffer from which data will be input
581  *	@tf: Taskfile to output
582  *
583  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
584  *
585  *	LOCKING:
586  *	Inherited from caller.
587  */
588 
589 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
590 {
591 	tf->command	= fis[2];	/* status */
592 	tf->feature	= fis[3];	/* error */
593 
594 	tf->lbal	= fis[4];
595 	tf->lbam	= fis[5];
596 	tf->lbah	= fis[6];
597 	tf->device	= fis[7];
598 
599 	tf->hob_lbal	= fis[8];
600 	tf->hob_lbam	= fis[9];
601 	tf->hob_lbah	= fis[10];
602 
603 	tf->nsect	= fis[12];
604 	tf->hob_nsect	= fis[13];
605 }
606 
607 static const u8 ata_rw_cmds[] = {
608 	/* pio multi */
609 	ATA_CMD_READ_MULTI,
610 	ATA_CMD_WRITE_MULTI,
611 	ATA_CMD_READ_MULTI_EXT,
612 	ATA_CMD_WRITE_MULTI_EXT,
613 	0,
614 	0,
615 	0,
616 	ATA_CMD_WRITE_MULTI_FUA_EXT,
617 	/* pio */
618 	ATA_CMD_PIO_READ,
619 	ATA_CMD_PIO_WRITE,
620 	ATA_CMD_PIO_READ_EXT,
621 	ATA_CMD_PIO_WRITE_EXT,
622 	0,
623 	0,
624 	0,
625 	0,
626 	/* dma */
627 	ATA_CMD_READ,
628 	ATA_CMD_WRITE,
629 	ATA_CMD_READ_EXT,
630 	ATA_CMD_WRITE_EXT,
631 	0,
632 	0,
633 	0,
634 	ATA_CMD_WRITE_FUA_EXT
635 };
636 
637 /**
638  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
639  *	@tf: command to examine and configure
640  *	@dev: device tf belongs to
641  *
642  *	Examine the device configuration and tf->flags to calculate
643  *	the proper read/write commands and protocol to use.
644  *
645  *	LOCKING:
646  *	caller.
647  */
648 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
649 {
650 	u8 cmd;
651 
652 	int index, fua, lba48, write;
653 
654 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
655 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
656 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
657 
658 	if (dev->flags & ATA_DFLAG_PIO) {
659 		tf->protocol = ATA_PROT_PIO;
660 		index = dev->multi_count ? 0 : 8;
661 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
662 		/* Unable to use DMA due to host limitation */
663 		tf->protocol = ATA_PROT_PIO;
664 		index = dev->multi_count ? 0 : 8;
665 	} else {
666 		tf->protocol = ATA_PROT_DMA;
667 		index = 16;
668 	}
669 
670 	cmd = ata_rw_cmds[index + fua + lba48 + write];
671 	if (cmd) {
672 		tf->command = cmd;
673 		return 0;
674 	}
675 	return -1;
676 }
677 
678 /**
679  *	ata_tf_read_block - Read block address from ATA taskfile
680  *	@tf: ATA taskfile of interest
681  *	@dev: ATA device @tf belongs to
682  *
683  *	LOCKING:
684  *	None.
685  *
686  *	Read block address from @tf.  This function can handle all
687  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
688  *	flags select the address format to use.
689  *
690  *	RETURNS:
691  *	Block address read from @tf.
692  */
693 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
694 {
695 	u64 block = 0;
696 
697 	if (tf->flags & ATA_TFLAG_LBA) {
698 		if (tf->flags & ATA_TFLAG_LBA48) {
699 			block |= (u64)tf->hob_lbah << 40;
700 			block |= (u64)tf->hob_lbam << 32;
701 			block |= (u64)tf->hob_lbal << 24;
702 		} else
703 			block |= (tf->device & 0xf) << 24;
704 
705 		block |= tf->lbah << 16;
706 		block |= tf->lbam << 8;
707 		block |= tf->lbal;
708 	} else {
709 		u32 cyl, head, sect;
710 
711 		cyl = tf->lbam | (tf->lbah << 8);
712 		head = tf->device & 0xf;
713 		sect = tf->lbal;
714 
715 		if (!sect) {
716 			ata_dev_warn(dev,
717 				     "device reported invalid CHS sector 0\n");
718 			sect = 1; /* oh well */
719 		}
720 
721 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
722 	}
723 
724 	return block;
725 }
726 
727 /**
728  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
729  *	@tf: Target ATA taskfile
730  *	@dev: ATA device @tf belongs to
731  *	@block: Block address
732  *	@n_block: Number of blocks
733  *	@tf_flags: RW/FUA etc...
734  *	@tag: tag
735  *
736  *	LOCKING:
737  *	None.
738  *
739  *	Build ATA taskfile @tf for read/write request described by
740  *	@block, @n_block, @tf_flags and @tag on @dev.
741  *
742  *	RETURNS:
743  *
744  *	0 on success, -ERANGE if the request is too large for @dev,
745  *	-EINVAL if the request is invalid.
746  */
747 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
748 		    u64 block, u32 n_block, unsigned int tf_flags,
749 		    unsigned int tag)
750 {
751 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
752 	tf->flags |= tf_flags;
753 
754 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
755 		/* yay, NCQ */
756 		if (!lba_48_ok(block, n_block))
757 			return -ERANGE;
758 
759 		tf->protocol = ATA_PROT_NCQ;
760 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
761 
762 		if (tf->flags & ATA_TFLAG_WRITE)
763 			tf->command = ATA_CMD_FPDMA_WRITE;
764 		else
765 			tf->command = ATA_CMD_FPDMA_READ;
766 
767 		tf->nsect = tag << 3;
768 		tf->hob_feature = (n_block >> 8) & 0xff;
769 		tf->feature = n_block & 0xff;
770 
771 		tf->hob_lbah = (block >> 40) & 0xff;
772 		tf->hob_lbam = (block >> 32) & 0xff;
773 		tf->hob_lbal = (block >> 24) & 0xff;
774 		tf->lbah = (block >> 16) & 0xff;
775 		tf->lbam = (block >> 8) & 0xff;
776 		tf->lbal = block & 0xff;
777 
778 		tf->device = ATA_LBA;
779 		if (tf->flags & ATA_TFLAG_FUA)
780 			tf->device |= 1 << 7;
781 	} else if (dev->flags & ATA_DFLAG_LBA) {
782 		tf->flags |= ATA_TFLAG_LBA;
783 
784 		if (lba_28_ok(block, n_block)) {
785 			/* use LBA28 */
786 			tf->device |= (block >> 24) & 0xf;
787 		} else if (lba_48_ok(block, n_block)) {
788 			if (!(dev->flags & ATA_DFLAG_LBA48))
789 				return -ERANGE;
790 
791 			/* use LBA48 */
792 			tf->flags |= ATA_TFLAG_LBA48;
793 
794 			tf->hob_nsect = (n_block >> 8) & 0xff;
795 
796 			tf->hob_lbah = (block >> 40) & 0xff;
797 			tf->hob_lbam = (block >> 32) & 0xff;
798 			tf->hob_lbal = (block >> 24) & 0xff;
799 		} else
800 			/* request too large even for LBA48 */
801 			return -ERANGE;
802 
803 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
804 			return -EINVAL;
805 
806 		tf->nsect = n_block & 0xff;
807 
808 		tf->lbah = (block >> 16) & 0xff;
809 		tf->lbam = (block >> 8) & 0xff;
810 		tf->lbal = block & 0xff;
811 
812 		tf->device |= ATA_LBA;
813 	} else {
814 		/* CHS */
815 		u32 sect, head, cyl, track;
816 
817 		/* The request -may- be too large for CHS addressing. */
818 		if (!lba_28_ok(block, n_block))
819 			return -ERANGE;
820 
821 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
822 			return -EINVAL;
823 
824 		/* Convert LBA to CHS */
825 		track = (u32)block / dev->sectors;
826 		cyl   = track / dev->heads;
827 		head  = track % dev->heads;
828 		sect  = (u32)block % dev->sectors + 1;
829 
830 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
831 			(u32)block, track, cyl, head, sect);
832 
833 		/* Check whether the converted CHS can fit.
834 		   Cylinder: 0-65535
835 		   Head: 0-15
836 		   Sector: 1-255*/
837 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
838 			return -ERANGE;
839 
840 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
841 		tf->lbal = sect;
842 		tf->lbam = cyl;
843 		tf->lbah = cyl >> 8;
844 		tf->device |= head;
845 	}
846 
847 	return 0;
848 }
849 
850 /**
851  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
852  *	@pio_mask: pio_mask
853  *	@mwdma_mask: mwdma_mask
854  *	@udma_mask: udma_mask
855  *
856  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
857  *	unsigned int xfer_mask.
858  *
859  *	LOCKING:
860  *	None.
861  *
862  *	RETURNS:
863  *	Packed xfer_mask.
864  */
865 unsigned long ata_pack_xfermask(unsigned long pio_mask,
866 				unsigned long mwdma_mask,
867 				unsigned long udma_mask)
868 {
869 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
870 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
871 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
872 }
873 
874 /**
875  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
876  *	@xfer_mask: xfer_mask to unpack
877  *	@pio_mask: resulting pio_mask
878  *	@mwdma_mask: resulting mwdma_mask
879  *	@udma_mask: resulting udma_mask
880  *
881  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
882  *	Any NULL distination masks will be ignored.
883  */
884 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
885 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
886 {
887 	if (pio_mask)
888 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
889 	if (mwdma_mask)
890 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
891 	if (udma_mask)
892 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
893 }
894 
895 static const struct ata_xfer_ent {
896 	int shift, bits;
897 	u8 base;
898 } ata_xfer_tbl[] = {
899 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
900 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
901 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
902 	{ -1, },
903 };
904 
905 /**
906  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
907  *	@xfer_mask: xfer_mask of interest
908  *
909  *	Return matching XFER_* value for @xfer_mask.  Only the highest
910  *	bit of @xfer_mask is considered.
911  *
912  *	LOCKING:
913  *	None.
914  *
915  *	RETURNS:
916  *	Matching XFER_* value, 0xff if no match found.
917  */
918 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
919 {
920 	int highbit = fls(xfer_mask) - 1;
921 	const struct ata_xfer_ent *ent;
922 
923 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
924 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
925 			return ent->base + highbit - ent->shift;
926 	return 0xff;
927 }
928 
929 /**
930  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
931  *	@xfer_mode: XFER_* of interest
932  *
933  *	Return matching xfer_mask for @xfer_mode.
934  *
935  *	LOCKING:
936  *	None.
937  *
938  *	RETURNS:
939  *	Matching xfer_mask, 0 if no match found.
940  */
941 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
942 {
943 	const struct ata_xfer_ent *ent;
944 
945 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
946 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
947 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
948 				& ~((1 << ent->shift) - 1);
949 	return 0;
950 }
951 
952 /**
953  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
954  *	@xfer_mode: XFER_* of interest
955  *
956  *	Return matching xfer_shift for @xfer_mode.
957  *
958  *	LOCKING:
959  *	None.
960  *
961  *	RETURNS:
962  *	Matching xfer_shift, -1 if no match found.
963  */
964 int ata_xfer_mode2shift(unsigned long xfer_mode)
965 {
966 	const struct ata_xfer_ent *ent;
967 
968 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
969 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
970 			return ent->shift;
971 	return -1;
972 }
973 
974 /**
975  *	ata_mode_string - convert xfer_mask to string
976  *	@xfer_mask: mask of bits supported; only highest bit counts.
977  *
978  *	Determine string which represents the highest speed
979  *	(highest bit in @modemask).
980  *
981  *	LOCKING:
982  *	None.
983  *
984  *	RETURNS:
985  *	Constant C string representing highest speed listed in
986  *	@mode_mask, or the constant C string "<n/a>".
987  */
988 const char *ata_mode_string(unsigned long xfer_mask)
989 {
990 	static const char * const xfer_mode_str[] = {
991 		"PIO0",
992 		"PIO1",
993 		"PIO2",
994 		"PIO3",
995 		"PIO4",
996 		"PIO5",
997 		"PIO6",
998 		"MWDMA0",
999 		"MWDMA1",
1000 		"MWDMA2",
1001 		"MWDMA3",
1002 		"MWDMA4",
1003 		"UDMA/16",
1004 		"UDMA/25",
1005 		"UDMA/33",
1006 		"UDMA/44",
1007 		"UDMA/66",
1008 		"UDMA/100",
1009 		"UDMA/133",
1010 		"UDMA7",
1011 	};
1012 	int highbit;
1013 
1014 	highbit = fls(xfer_mask) - 1;
1015 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1016 		return xfer_mode_str[highbit];
1017 	return "<n/a>";
1018 }
1019 
1020 const char *sata_spd_string(unsigned int spd)
1021 {
1022 	static const char * const spd_str[] = {
1023 		"1.5 Gbps",
1024 		"3.0 Gbps",
1025 		"6.0 Gbps",
1026 	};
1027 
1028 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1029 		return "<unknown>";
1030 	return spd_str[spd - 1];
1031 }
1032 
1033 /**
1034  *	ata_dev_classify - determine device type based on ATA-spec signature
1035  *	@tf: ATA taskfile register set for device to be identified
1036  *
1037  *	Determine from taskfile register contents whether a device is
1038  *	ATA or ATAPI, as per "Signature and persistence" section
1039  *	of ATA/PI spec (volume 1, sect 5.14).
1040  *
1041  *	LOCKING:
1042  *	None.
1043  *
1044  *	RETURNS:
1045  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1046  *	%ATA_DEV_UNKNOWN the event of failure.
1047  */
1048 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1049 {
1050 	/* Apple's open source Darwin code hints that some devices only
1051 	 * put a proper signature into the LBA mid/high registers,
1052 	 * So, we only check those.  It's sufficient for uniqueness.
1053 	 *
1054 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1055 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1056 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1057 	 * spec has never mentioned about using different signatures
1058 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1059 	 * Multiplier specification began to use 0x69/0x96 to identify
1060 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1061 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1062 	 * 0x69/0x96 shortly and described them as reserved for
1063 	 * SerialATA.
1064 	 *
1065 	 * We follow the current spec and consider that 0x69/0x96
1066 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1067 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1068 	 * SEMB signature.  This is worked around in
1069 	 * ata_dev_read_id().
1070 	 */
1071 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1072 		DPRINTK("found ATA device by sig\n");
1073 		return ATA_DEV_ATA;
1074 	}
1075 
1076 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1077 		DPRINTK("found ATAPI device by sig\n");
1078 		return ATA_DEV_ATAPI;
1079 	}
1080 
1081 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1082 		DPRINTK("found PMP device by sig\n");
1083 		return ATA_DEV_PMP;
1084 	}
1085 
1086 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1087 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1088 		return ATA_DEV_SEMB;
1089 	}
1090 
1091 	DPRINTK("unknown device\n");
1092 	return ATA_DEV_UNKNOWN;
1093 }
1094 
1095 /**
1096  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1097  *	@id: IDENTIFY DEVICE results we will examine
1098  *	@s: string into which data is output
1099  *	@ofs: offset into identify device page
1100  *	@len: length of string to return. must be an even number.
1101  *
1102  *	The strings in the IDENTIFY DEVICE page are broken up into
1103  *	16-bit chunks.  Run through the string, and output each
1104  *	8-bit chunk linearly, regardless of platform.
1105  *
1106  *	LOCKING:
1107  *	caller.
1108  */
1109 
1110 void ata_id_string(const u16 *id, unsigned char *s,
1111 		   unsigned int ofs, unsigned int len)
1112 {
1113 	unsigned int c;
1114 
1115 	BUG_ON(len & 1);
1116 
1117 	while (len > 0) {
1118 		c = id[ofs] >> 8;
1119 		*s = c;
1120 		s++;
1121 
1122 		c = id[ofs] & 0xff;
1123 		*s = c;
1124 		s++;
1125 
1126 		ofs++;
1127 		len -= 2;
1128 	}
1129 }
1130 
1131 /**
1132  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1133  *	@id: IDENTIFY DEVICE results we will examine
1134  *	@s: string into which data is output
1135  *	@ofs: offset into identify device page
1136  *	@len: length of string to return. must be an odd number.
1137  *
1138  *	This function is identical to ata_id_string except that it
1139  *	trims trailing spaces and terminates the resulting string with
1140  *	null.  @len must be actual maximum length (even number) + 1.
1141  *
1142  *	LOCKING:
1143  *	caller.
1144  */
1145 void ata_id_c_string(const u16 *id, unsigned char *s,
1146 		     unsigned int ofs, unsigned int len)
1147 {
1148 	unsigned char *p;
1149 
1150 	ata_id_string(id, s, ofs, len - 1);
1151 
1152 	p = s + strnlen(s, len - 1);
1153 	while (p > s && p[-1] == ' ')
1154 		p--;
1155 	*p = '\0';
1156 }
1157 
1158 static u64 ata_id_n_sectors(const u16 *id)
1159 {
1160 	if (ata_id_has_lba(id)) {
1161 		if (ata_id_has_lba48(id))
1162 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1163 		else
1164 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1165 	} else {
1166 		if (ata_id_current_chs_valid(id))
1167 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1168 			       id[ATA_ID_CUR_SECTORS];
1169 		else
1170 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1171 			       id[ATA_ID_SECTORS];
1172 	}
1173 }
1174 
1175 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1176 {
1177 	u64 sectors = 0;
1178 
1179 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1180 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1181 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1182 	sectors |= (tf->lbah & 0xff) << 16;
1183 	sectors |= (tf->lbam & 0xff) << 8;
1184 	sectors |= (tf->lbal & 0xff);
1185 
1186 	return sectors;
1187 }
1188 
1189 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1190 {
1191 	u64 sectors = 0;
1192 
1193 	sectors |= (tf->device & 0x0f) << 24;
1194 	sectors |= (tf->lbah & 0xff) << 16;
1195 	sectors |= (tf->lbam & 0xff) << 8;
1196 	sectors |= (tf->lbal & 0xff);
1197 
1198 	return sectors;
1199 }
1200 
1201 /**
1202  *	ata_read_native_max_address - Read native max address
1203  *	@dev: target device
1204  *	@max_sectors: out parameter for the result native max address
1205  *
1206  *	Perform an LBA48 or LBA28 native size query upon the device in
1207  *	question.
1208  *
1209  *	RETURNS:
1210  *	0 on success, -EACCES if command is aborted by the drive.
1211  *	-EIO on other errors.
1212  */
1213 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1214 {
1215 	unsigned int err_mask;
1216 	struct ata_taskfile tf;
1217 	int lba48 = ata_id_has_lba48(dev->id);
1218 
1219 	ata_tf_init(dev, &tf);
1220 
1221 	/* always clear all address registers */
1222 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1223 
1224 	if (lba48) {
1225 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1226 		tf.flags |= ATA_TFLAG_LBA48;
1227 	} else
1228 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1229 
1230 	tf.protocol |= ATA_PROT_NODATA;
1231 	tf.device |= ATA_LBA;
1232 
1233 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1234 	if (err_mask) {
1235 		ata_dev_warn(dev,
1236 			     "failed to read native max address (err_mask=0x%x)\n",
1237 			     err_mask);
1238 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1239 			return -EACCES;
1240 		return -EIO;
1241 	}
1242 
1243 	if (lba48)
1244 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1245 	else
1246 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1247 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1248 		(*max_sectors)--;
1249 	return 0;
1250 }
1251 
1252 /**
1253  *	ata_set_max_sectors - Set max sectors
1254  *	@dev: target device
1255  *	@new_sectors: new max sectors value to set for the device
1256  *
1257  *	Set max sectors of @dev to @new_sectors.
1258  *
1259  *	RETURNS:
1260  *	0 on success, -EACCES if command is aborted or denied (due to
1261  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1262  *	errors.
1263  */
1264 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1265 {
1266 	unsigned int err_mask;
1267 	struct ata_taskfile tf;
1268 	int lba48 = ata_id_has_lba48(dev->id);
1269 
1270 	new_sectors--;
1271 
1272 	ata_tf_init(dev, &tf);
1273 
1274 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1275 
1276 	if (lba48) {
1277 		tf.command = ATA_CMD_SET_MAX_EXT;
1278 		tf.flags |= ATA_TFLAG_LBA48;
1279 
1280 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1281 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1282 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1283 	} else {
1284 		tf.command = ATA_CMD_SET_MAX;
1285 
1286 		tf.device |= (new_sectors >> 24) & 0xf;
1287 	}
1288 
1289 	tf.protocol |= ATA_PROT_NODATA;
1290 	tf.device |= ATA_LBA;
1291 
1292 	tf.lbal = (new_sectors >> 0) & 0xff;
1293 	tf.lbam = (new_sectors >> 8) & 0xff;
1294 	tf.lbah = (new_sectors >> 16) & 0xff;
1295 
1296 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1297 	if (err_mask) {
1298 		ata_dev_warn(dev,
1299 			     "failed to set max address (err_mask=0x%x)\n",
1300 			     err_mask);
1301 		if (err_mask == AC_ERR_DEV &&
1302 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1303 			return -EACCES;
1304 		return -EIO;
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 /**
1311  *	ata_hpa_resize		-	Resize a device with an HPA set
1312  *	@dev: Device to resize
1313  *
1314  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1315  *	it if required to the full size of the media. The caller must check
1316  *	the drive has the HPA feature set enabled.
1317  *
1318  *	RETURNS:
1319  *	0 on success, -errno on failure.
1320  */
1321 static int ata_hpa_resize(struct ata_device *dev)
1322 {
1323 	struct ata_eh_context *ehc = &dev->link->eh_context;
1324 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1325 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1326 	u64 sectors = ata_id_n_sectors(dev->id);
1327 	u64 native_sectors;
1328 	int rc;
1329 
1330 	/* do we need to do it? */
1331 	if (dev->class != ATA_DEV_ATA ||
1332 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1333 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1334 		return 0;
1335 
1336 	/* read native max address */
1337 	rc = ata_read_native_max_address(dev, &native_sectors);
1338 	if (rc) {
1339 		/* If device aborted the command or HPA isn't going to
1340 		 * be unlocked, skip HPA resizing.
1341 		 */
1342 		if (rc == -EACCES || !unlock_hpa) {
1343 			ata_dev_warn(dev,
1344 				     "HPA support seems broken, skipping HPA handling\n");
1345 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1346 
1347 			/* we can continue if device aborted the command */
1348 			if (rc == -EACCES)
1349 				rc = 0;
1350 		}
1351 
1352 		return rc;
1353 	}
1354 	dev->n_native_sectors = native_sectors;
1355 
1356 	/* nothing to do? */
1357 	if (native_sectors <= sectors || !unlock_hpa) {
1358 		if (!print_info || native_sectors == sectors)
1359 			return 0;
1360 
1361 		if (native_sectors > sectors)
1362 			ata_dev_info(dev,
1363 				"HPA detected: current %llu, native %llu\n",
1364 				(unsigned long long)sectors,
1365 				(unsigned long long)native_sectors);
1366 		else if (native_sectors < sectors)
1367 			ata_dev_warn(dev,
1368 				"native sectors (%llu) is smaller than sectors (%llu)\n",
1369 				(unsigned long long)native_sectors,
1370 				(unsigned long long)sectors);
1371 		return 0;
1372 	}
1373 
1374 	/* let's unlock HPA */
1375 	rc = ata_set_max_sectors(dev, native_sectors);
1376 	if (rc == -EACCES) {
1377 		/* if device aborted the command, skip HPA resizing */
1378 		ata_dev_warn(dev,
1379 			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1380 			     (unsigned long long)sectors,
1381 			     (unsigned long long)native_sectors);
1382 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1383 		return 0;
1384 	} else if (rc)
1385 		return rc;
1386 
1387 	/* re-read IDENTIFY data */
1388 	rc = ata_dev_reread_id(dev, 0);
1389 	if (rc) {
1390 		ata_dev_err(dev,
1391 			    "failed to re-read IDENTIFY data after HPA resizing\n");
1392 		return rc;
1393 	}
1394 
1395 	if (print_info) {
1396 		u64 new_sectors = ata_id_n_sectors(dev->id);
1397 		ata_dev_info(dev,
1398 			"HPA unlocked: %llu -> %llu, native %llu\n",
1399 			(unsigned long long)sectors,
1400 			(unsigned long long)new_sectors,
1401 			(unsigned long long)native_sectors);
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 /**
1408  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1409  *	@id: IDENTIFY DEVICE page to dump
1410  *
1411  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1412  *	page.
1413  *
1414  *	LOCKING:
1415  *	caller.
1416  */
1417 
1418 static inline void ata_dump_id(const u16 *id)
1419 {
1420 	DPRINTK("49==0x%04x  "
1421 		"53==0x%04x  "
1422 		"63==0x%04x  "
1423 		"64==0x%04x  "
1424 		"75==0x%04x  \n",
1425 		id[49],
1426 		id[53],
1427 		id[63],
1428 		id[64],
1429 		id[75]);
1430 	DPRINTK("80==0x%04x  "
1431 		"81==0x%04x  "
1432 		"82==0x%04x  "
1433 		"83==0x%04x  "
1434 		"84==0x%04x  \n",
1435 		id[80],
1436 		id[81],
1437 		id[82],
1438 		id[83],
1439 		id[84]);
1440 	DPRINTK("88==0x%04x  "
1441 		"93==0x%04x\n",
1442 		id[88],
1443 		id[93]);
1444 }
1445 
1446 /**
1447  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1448  *	@id: IDENTIFY data to compute xfer mask from
1449  *
1450  *	Compute the xfermask for this device. This is not as trivial
1451  *	as it seems if we must consider early devices correctly.
1452  *
1453  *	FIXME: pre IDE drive timing (do we care ?).
1454  *
1455  *	LOCKING:
1456  *	None.
1457  *
1458  *	RETURNS:
1459  *	Computed xfermask
1460  */
1461 unsigned long ata_id_xfermask(const u16 *id)
1462 {
1463 	unsigned long pio_mask, mwdma_mask, udma_mask;
1464 
1465 	/* Usual case. Word 53 indicates word 64 is valid */
1466 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1467 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1468 		pio_mask <<= 3;
1469 		pio_mask |= 0x7;
1470 	} else {
1471 		/* If word 64 isn't valid then Word 51 high byte holds
1472 		 * the PIO timing number for the maximum. Turn it into
1473 		 * a mask.
1474 		 */
1475 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1476 		if (mode < 5)	/* Valid PIO range */
1477 			pio_mask = (2 << mode) - 1;
1478 		else
1479 			pio_mask = 1;
1480 
1481 		/* But wait.. there's more. Design your standards by
1482 		 * committee and you too can get a free iordy field to
1483 		 * process. However its the speeds not the modes that
1484 		 * are supported... Note drivers using the timing API
1485 		 * will get this right anyway
1486 		 */
1487 	}
1488 
1489 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1490 
1491 	if (ata_id_is_cfa(id)) {
1492 		/*
1493 		 *	Process compact flash extended modes
1494 		 */
1495 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1496 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1497 
1498 		if (pio)
1499 			pio_mask |= (1 << 5);
1500 		if (pio > 1)
1501 			pio_mask |= (1 << 6);
1502 		if (dma)
1503 			mwdma_mask |= (1 << 3);
1504 		if (dma > 1)
1505 			mwdma_mask |= (1 << 4);
1506 	}
1507 
1508 	udma_mask = 0;
1509 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1510 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1511 
1512 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1513 }
1514 
1515 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1516 {
1517 	struct completion *waiting = qc->private_data;
1518 
1519 	complete(waiting);
1520 }
1521 
1522 /**
1523  *	ata_exec_internal_sg - execute libata internal command
1524  *	@dev: Device to which the command is sent
1525  *	@tf: Taskfile registers for the command and the result
1526  *	@cdb: CDB for packet command
1527  *	@dma_dir: Data tranfer direction of the command
1528  *	@sgl: sg list for the data buffer of the command
1529  *	@n_elem: Number of sg entries
1530  *	@timeout: Timeout in msecs (0 for default)
1531  *
1532  *	Executes libata internal command with timeout.  @tf contains
1533  *	command on entry and result on return.  Timeout and error
1534  *	conditions are reported via return value.  No recovery action
1535  *	is taken after a command times out.  It's caller's duty to
1536  *	clean up after timeout.
1537  *
1538  *	LOCKING:
1539  *	None.  Should be called with kernel context, might sleep.
1540  *
1541  *	RETURNS:
1542  *	Zero on success, AC_ERR_* mask on failure
1543  */
1544 unsigned ata_exec_internal_sg(struct ata_device *dev,
1545 			      struct ata_taskfile *tf, const u8 *cdb,
1546 			      int dma_dir, struct scatterlist *sgl,
1547 			      unsigned int n_elem, unsigned long timeout)
1548 {
1549 	struct ata_link *link = dev->link;
1550 	struct ata_port *ap = link->ap;
1551 	u8 command = tf->command;
1552 	int auto_timeout = 0;
1553 	struct ata_queued_cmd *qc;
1554 	unsigned int tag, preempted_tag;
1555 	u32 preempted_sactive, preempted_qc_active;
1556 	int preempted_nr_active_links;
1557 	DECLARE_COMPLETION_ONSTACK(wait);
1558 	unsigned long flags;
1559 	unsigned int err_mask;
1560 	int rc;
1561 
1562 	spin_lock_irqsave(ap->lock, flags);
1563 
1564 	/* no internal command while frozen */
1565 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1566 		spin_unlock_irqrestore(ap->lock, flags);
1567 		return AC_ERR_SYSTEM;
1568 	}
1569 
1570 	/* initialize internal qc */
1571 
1572 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1573 	 * drivers choke if any other tag is given.  This breaks
1574 	 * ata_tag_internal() test for those drivers.  Don't use new
1575 	 * EH stuff without converting to it.
1576 	 */
1577 	if (ap->ops->error_handler)
1578 		tag = ATA_TAG_INTERNAL;
1579 	else
1580 		tag = 0;
1581 
1582 	if (test_and_set_bit(tag, &ap->qc_allocated))
1583 		BUG();
1584 	qc = __ata_qc_from_tag(ap, tag);
1585 
1586 	qc->tag = tag;
1587 	qc->scsicmd = NULL;
1588 	qc->ap = ap;
1589 	qc->dev = dev;
1590 	ata_qc_reinit(qc);
1591 
1592 	preempted_tag = link->active_tag;
1593 	preempted_sactive = link->sactive;
1594 	preempted_qc_active = ap->qc_active;
1595 	preempted_nr_active_links = ap->nr_active_links;
1596 	link->active_tag = ATA_TAG_POISON;
1597 	link->sactive = 0;
1598 	ap->qc_active = 0;
1599 	ap->nr_active_links = 0;
1600 
1601 	/* prepare & issue qc */
1602 	qc->tf = *tf;
1603 	if (cdb)
1604 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1605 
1606 	/* some SATA bridges need us to indicate data xfer direction */
1607 	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1608 	    dma_dir == DMA_FROM_DEVICE)
1609 		qc->tf.feature |= ATAPI_DMADIR;
1610 
1611 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1612 	qc->dma_dir = dma_dir;
1613 	if (dma_dir != DMA_NONE) {
1614 		unsigned int i, buflen = 0;
1615 		struct scatterlist *sg;
1616 
1617 		for_each_sg(sgl, sg, n_elem, i)
1618 			buflen += sg->length;
1619 
1620 		ata_sg_init(qc, sgl, n_elem);
1621 		qc->nbytes = buflen;
1622 	}
1623 
1624 	qc->private_data = &wait;
1625 	qc->complete_fn = ata_qc_complete_internal;
1626 
1627 	ata_qc_issue(qc);
1628 
1629 	spin_unlock_irqrestore(ap->lock, flags);
1630 
1631 	if (!timeout) {
1632 		if (ata_probe_timeout)
1633 			timeout = ata_probe_timeout * 1000;
1634 		else {
1635 			timeout = ata_internal_cmd_timeout(dev, command);
1636 			auto_timeout = 1;
1637 		}
1638 	}
1639 
1640 	if (ap->ops->error_handler)
1641 		ata_eh_release(ap);
1642 
1643 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1644 
1645 	if (ap->ops->error_handler)
1646 		ata_eh_acquire(ap);
1647 
1648 	ata_sff_flush_pio_task(ap);
1649 
1650 	if (!rc) {
1651 		spin_lock_irqsave(ap->lock, flags);
1652 
1653 		/* We're racing with irq here.  If we lose, the
1654 		 * following test prevents us from completing the qc
1655 		 * twice.  If we win, the port is frozen and will be
1656 		 * cleaned up by ->post_internal_cmd().
1657 		 */
1658 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1659 			qc->err_mask |= AC_ERR_TIMEOUT;
1660 
1661 			if (ap->ops->error_handler)
1662 				ata_port_freeze(ap);
1663 			else
1664 				ata_qc_complete(qc);
1665 
1666 			if (ata_msg_warn(ap))
1667 				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1668 					     command);
1669 		}
1670 
1671 		spin_unlock_irqrestore(ap->lock, flags);
1672 	}
1673 
1674 	/* do post_internal_cmd */
1675 	if (ap->ops->post_internal_cmd)
1676 		ap->ops->post_internal_cmd(qc);
1677 
1678 	/* perform minimal error analysis */
1679 	if (qc->flags & ATA_QCFLAG_FAILED) {
1680 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1681 			qc->err_mask |= AC_ERR_DEV;
1682 
1683 		if (!qc->err_mask)
1684 			qc->err_mask |= AC_ERR_OTHER;
1685 
1686 		if (qc->err_mask & ~AC_ERR_OTHER)
1687 			qc->err_mask &= ~AC_ERR_OTHER;
1688 	}
1689 
1690 	/* finish up */
1691 	spin_lock_irqsave(ap->lock, flags);
1692 
1693 	*tf = qc->result_tf;
1694 	err_mask = qc->err_mask;
1695 
1696 	ata_qc_free(qc);
1697 	link->active_tag = preempted_tag;
1698 	link->sactive = preempted_sactive;
1699 	ap->qc_active = preempted_qc_active;
1700 	ap->nr_active_links = preempted_nr_active_links;
1701 
1702 	spin_unlock_irqrestore(ap->lock, flags);
1703 
1704 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1705 		ata_internal_cmd_timed_out(dev, command);
1706 
1707 	return err_mask;
1708 }
1709 
1710 /**
1711  *	ata_exec_internal - execute libata internal command
1712  *	@dev: Device to which the command is sent
1713  *	@tf: Taskfile registers for the command and the result
1714  *	@cdb: CDB for packet command
1715  *	@dma_dir: Data tranfer direction of the command
1716  *	@buf: Data buffer of the command
1717  *	@buflen: Length of data buffer
1718  *	@timeout: Timeout in msecs (0 for default)
1719  *
1720  *	Wrapper around ata_exec_internal_sg() which takes simple
1721  *	buffer instead of sg list.
1722  *
1723  *	LOCKING:
1724  *	None.  Should be called with kernel context, might sleep.
1725  *
1726  *	RETURNS:
1727  *	Zero on success, AC_ERR_* mask on failure
1728  */
1729 unsigned ata_exec_internal(struct ata_device *dev,
1730 			   struct ata_taskfile *tf, const u8 *cdb,
1731 			   int dma_dir, void *buf, unsigned int buflen,
1732 			   unsigned long timeout)
1733 {
1734 	struct scatterlist *psg = NULL, sg;
1735 	unsigned int n_elem = 0;
1736 
1737 	if (dma_dir != DMA_NONE) {
1738 		WARN_ON(!buf);
1739 		sg_init_one(&sg, buf, buflen);
1740 		psg = &sg;
1741 		n_elem++;
1742 	}
1743 
1744 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1745 				    timeout);
1746 }
1747 
1748 /**
1749  *	ata_do_simple_cmd - execute simple internal command
1750  *	@dev: Device to which the command is sent
1751  *	@cmd: Opcode to execute
1752  *
1753  *	Execute a 'simple' command, that only consists of the opcode
1754  *	'cmd' itself, without filling any other registers
1755  *
1756  *	LOCKING:
1757  *	Kernel thread context (may sleep).
1758  *
1759  *	RETURNS:
1760  *	Zero on success, AC_ERR_* mask on failure
1761  */
1762 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1763 {
1764 	struct ata_taskfile tf;
1765 
1766 	ata_tf_init(dev, &tf);
1767 
1768 	tf.command = cmd;
1769 	tf.flags |= ATA_TFLAG_DEVICE;
1770 	tf.protocol = ATA_PROT_NODATA;
1771 
1772 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1773 }
1774 
1775 /**
1776  *	ata_pio_need_iordy	-	check if iordy needed
1777  *	@adev: ATA device
1778  *
1779  *	Check if the current speed of the device requires IORDY. Used
1780  *	by various controllers for chip configuration.
1781  */
1782 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1783 {
1784 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1785 	 * lead to controller lock up on certain controllers if the
1786 	 * port is not occupied.  See bko#11703 for details.
1787 	 */
1788 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1789 		return 0;
1790 	/* Controller doesn't support IORDY.  Probably a pointless
1791 	 * check as the caller should know this.
1792 	 */
1793 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1794 		return 0;
1795 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1796 	if (ata_id_is_cfa(adev->id)
1797 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1798 		return 0;
1799 	/* PIO3 and higher it is mandatory */
1800 	if (adev->pio_mode > XFER_PIO_2)
1801 		return 1;
1802 	/* We turn it on when possible */
1803 	if (ata_id_has_iordy(adev->id))
1804 		return 1;
1805 	return 0;
1806 }
1807 
1808 /**
1809  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1810  *	@adev: ATA device
1811  *
1812  *	Compute the highest mode possible if we are not using iordy. Return
1813  *	-1 if no iordy mode is available.
1814  */
1815 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1816 {
1817 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1818 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1819 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1820 		/* Is the speed faster than the drive allows non IORDY ? */
1821 		if (pio) {
1822 			/* This is cycle times not frequency - watch the logic! */
1823 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1824 				return 3 << ATA_SHIFT_PIO;
1825 			return 7 << ATA_SHIFT_PIO;
1826 		}
1827 	}
1828 	return 3 << ATA_SHIFT_PIO;
1829 }
1830 
1831 /**
1832  *	ata_do_dev_read_id		-	default ID read method
1833  *	@dev: device
1834  *	@tf: proposed taskfile
1835  *	@id: data buffer
1836  *
1837  *	Issue the identify taskfile and hand back the buffer containing
1838  *	identify data. For some RAID controllers and for pre ATA devices
1839  *	this function is wrapped or replaced by the driver
1840  */
1841 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1842 					struct ata_taskfile *tf, u16 *id)
1843 {
1844 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1845 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1846 }
1847 
1848 /**
1849  *	ata_dev_read_id - Read ID data from the specified device
1850  *	@dev: target device
1851  *	@p_class: pointer to class of the target device (may be changed)
1852  *	@flags: ATA_READID_* flags
1853  *	@id: buffer to read IDENTIFY data into
1854  *
1855  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1856  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1857  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1858  *	for pre-ATA4 drives.
1859  *
1860  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1861  *	now we abort if we hit that case.
1862  *
1863  *	LOCKING:
1864  *	Kernel thread context (may sleep)
1865  *
1866  *	RETURNS:
1867  *	0 on success, -errno otherwise.
1868  */
1869 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1870 		    unsigned int flags, u16 *id)
1871 {
1872 	struct ata_port *ap = dev->link->ap;
1873 	unsigned int class = *p_class;
1874 	struct ata_taskfile tf;
1875 	unsigned int err_mask = 0;
1876 	const char *reason;
1877 	bool is_semb = class == ATA_DEV_SEMB;
1878 	int may_fallback = 1, tried_spinup = 0;
1879 	int rc;
1880 
1881 	if (ata_msg_ctl(ap))
1882 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1883 
1884 retry:
1885 	ata_tf_init(dev, &tf);
1886 
1887 	switch (class) {
1888 	case ATA_DEV_SEMB:
1889 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1890 	case ATA_DEV_ATA:
1891 		tf.command = ATA_CMD_ID_ATA;
1892 		break;
1893 	case ATA_DEV_ATAPI:
1894 		tf.command = ATA_CMD_ID_ATAPI;
1895 		break;
1896 	default:
1897 		rc = -ENODEV;
1898 		reason = "unsupported class";
1899 		goto err_out;
1900 	}
1901 
1902 	tf.protocol = ATA_PROT_PIO;
1903 
1904 	/* Some devices choke if TF registers contain garbage.  Make
1905 	 * sure those are properly initialized.
1906 	 */
1907 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1908 
1909 	/* Device presence detection is unreliable on some
1910 	 * controllers.  Always poll IDENTIFY if available.
1911 	 */
1912 	tf.flags |= ATA_TFLAG_POLLING;
1913 
1914 	if (ap->ops->read_id)
1915 		err_mask = ap->ops->read_id(dev, &tf, id);
1916 	else
1917 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1918 
1919 	if (err_mask) {
1920 		if (err_mask & AC_ERR_NODEV_HINT) {
1921 			ata_dev_dbg(dev, "NODEV after polling detection\n");
1922 			return -ENOENT;
1923 		}
1924 
1925 		if (is_semb) {
1926 			ata_dev_info(dev,
1927 		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1928 			/* SEMB is not supported yet */
1929 			*p_class = ATA_DEV_SEMB_UNSUP;
1930 			return 0;
1931 		}
1932 
1933 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1934 			/* Device or controller might have reported
1935 			 * the wrong device class.  Give a shot at the
1936 			 * other IDENTIFY if the current one is
1937 			 * aborted by the device.
1938 			 */
1939 			if (may_fallback) {
1940 				may_fallback = 0;
1941 
1942 				if (class == ATA_DEV_ATA)
1943 					class = ATA_DEV_ATAPI;
1944 				else
1945 					class = ATA_DEV_ATA;
1946 				goto retry;
1947 			}
1948 
1949 			/* Control reaches here iff the device aborted
1950 			 * both flavors of IDENTIFYs which happens
1951 			 * sometimes with phantom devices.
1952 			 */
1953 			ata_dev_dbg(dev,
1954 				    "both IDENTIFYs aborted, assuming NODEV\n");
1955 			return -ENOENT;
1956 		}
1957 
1958 		rc = -EIO;
1959 		reason = "I/O error";
1960 		goto err_out;
1961 	}
1962 
1963 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1964 		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1965 			    "class=%d may_fallback=%d tried_spinup=%d\n",
1966 			    class, may_fallback, tried_spinup);
1967 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1968 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1969 	}
1970 
1971 	/* Falling back doesn't make sense if ID data was read
1972 	 * successfully at least once.
1973 	 */
1974 	may_fallback = 0;
1975 
1976 	swap_buf_le16(id, ATA_ID_WORDS);
1977 
1978 	/* sanity check */
1979 	rc = -EINVAL;
1980 	reason = "device reports invalid type";
1981 
1982 	if (class == ATA_DEV_ATA) {
1983 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1984 			goto err_out;
1985 		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1986 							ata_id_is_ata(id)) {
1987 			ata_dev_dbg(dev,
1988 				"host indicates ignore ATA devices, ignored\n");
1989 			return -ENOENT;
1990 		}
1991 	} else {
1992 		if (ata_id_is_ata(id))
1993 			goto err_out;
1994 	}
1995 
1996 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1997 		tried_spinup = 1;
1998 		/*
1999 		 * Drive powered-up in standby mode, and requires a specific
2000 		 * SET_FEATURES spin-up subcommand before it will accept
2001 		 * anything other than the original IDENTIFY command.
2002 		 */
2003 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2004 		if (err_mask && id[2] != 0x738c) {
2005 			rc = -EIO;
2006 			reason = "SPINUP failed";
2007 			goto err_out;
2008 		}
2009 		/*
2010 		 * If the drive initially returned incomplete IDENTIFY info,
2011 		 * we now must reissue the IDENTIFY command.
2012 		 */
2013 		if (id[2] == 0x37c8)
2014 			goto retry;
2015 	}
2016 
2017 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2018 		/*
2019 		 * The exact sequence expected by certain pre-ATA4 drives is:
2020 		 * SRST RESET
2021 		 * IDENTIFY (optional in early ATA)
2022 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2023 		 * anything else..
2024 		 * Some drives were very specific about that exact sequence.
2025 		 *
2026 		 * Note that ATA4 says lba is mandatory so the second check
2027 		 * should never trigger.
2028 		 */
2029 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2030 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2031 			if (err_mask) {
2032 				rc = -EIO;
2033 				reason = "INIT_DEV_PARAMS failed";
2034 				goto err_out;
2035 			}
2036 
2037 			/* current CHS translation info (id[53-58]) might be
2038 			 * changed. reread the identify device info.
2039 			 */
2040 			flags &= ~ATA_READID_POSTRESET;
2041 			goto retry;
2042 		}
2043 	}
2044 
2045 	*p_class = class;
2046 
2047 	return 0;
2048 
2049  err_out:
2050 	if (ata_msg_warn(ap))
2051 		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2052 			     reason, err_mask);
2053 	return rc;
2054 }
2055 
2056 static int ata_do_link_spd_horkage(struct ata_device *dev)
2057 {
2058 	struct ata_link *plink = ata_dev_phys_link(dev);
2059 	u32 target, target_limit;
2060 
2061 	if (!sata_scr_valid(plink))
2062 		return 0;
2063 
2064 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2065 		target = 1;
2066 	else
2067 		return 0;
2068 
2069 	target_limit = (1 << target) - 1;
2070 
2071 	/* if already on stricter limit, no need to push further */
2072 	if (plink->sata_spd_limit <= target_limit)
2073 		return 0;
2074 
2075 	plink->sata_spd_limit = target_limit;
2076 
2077 	/* Request another EH round by returning -EAGAIN if link is
2078 	 * going faster than the target speed.  Forward progress is
2079 	 * guaranteed by setting sata_spd_limit to target_limit above.
2080 	 */
2081 	if (plink->sata_spd > target) {
2082 		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2083 			     sata_spd_string(target));
2084 		return -EAGAIN;
2085 	}
2086 	return 0;
2087 }
2088 
2089 static inline u8 ata_dev_knobble(struct ata_device *dev)
2090 {
2091 	struct ata_port *ap = dev->link->ap;
2092 
2093 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2094 		return 0;
2095 
2096 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2097 }
2098 
2099 static int ata_dev_config_ncq(struct ata_device *dev,
2100 			       char *desc, size_t desc_sz)
2101 {
2102 	struct ata_port *ap = dev->link->ap;
2103 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2104 	unsigned int err_mask;
2105 	char *aa_desc = "";
2106 
2107 	if (!ata_id_has_ncq(dev->id)) {
2108 		desc[0] = '\0';
2109 		return 0;
2110 	}
2111 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2112 		snprintf(desc, desc_sz, "NCQ (not used)");
2113 		return 0;
2114 	}
2115 	if (ap->flags & ATA_FLAG_NCQ) {
2116 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2117 		dev->flags |= ATA_DFLAG_NCQ;
2118 	}
2119 
2120 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2121 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2122 		ata_id_has_fpdma_aa(dev->id)) {
2123 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2124 			SATA_FPDMA_AA);
2125 		if (err_mask) {
2126 			ata_dev_err(dev,
2127 				    "failed to enable AA (error_mask=0x%x)\n",
2128 				    err_mask);
2129 			if (err_mask != AC_ERR_DEV) {
2130 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2131 				return -EIO;
2132 			}
2133 		} else
2134 			aa_desc = ", AA";
2135 	}
2136 
2137 	if (hdepth >= ddepth)
2138 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2139 	else
2140 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2141 			ddepth, aa_desc);
2142 
2143 	if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2144 	    ata_id_has_ncq_send_and_recv(dev->id)) {
2145 		err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2146 					     0, ap->sector_buf, 1);
2147 		if (err_mask) {
2148 			ata_dev_dbg(dev,
2149 				    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2150 				    err_mask);
2151 		} else {
2152 			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2153 			memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
2154 				ATA_LOG_NCQ_SEND_RECV_SIZE);
2155 		}
2156 	}
2157 
2158 	return 0;
2159 }
2160 
2161 /**
2162  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2163  *	@dev: Target device to configure
2164  *
2165  *	Configure @dev according to @dev->id.  Generic and low-level
2166  *	driver specific fixups are also applied.
2167  *
2168  *	LOCKING:
2169  *	Kernel thread context (may sleep)
2170  *
2171  *	RETURNS:
2172  *	0 on success, -errno otherwise
2173  */
2174 int ata_dev_configure(struct ata_device *dev)
2175 {
2176 	struct ata_port *ap = dev->link->ap;
2177 	struct ata_eh_context *ehc = &dev->link->eh_context;
2178 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2179 	const u16 *id = dev->id;
2180 	unsigned long xfer_mask;
2181 	unsigned int err_mask;
2182 	char revbuf[7];		/* XYZ-99\0 */
2183 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2184 	char modelbuf[ATA_ID_PROD_LEN+1];
2185 	int rc;
2186 
2187 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2188 		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2189 		return 0;
2190 	}
2191 
2192 	if (ata_msg_probe(ap))
2193 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2194 
2195 	/* set horkage */
2196 	dev->horkage |= ata_dev_blacklisted(dev);
2197 	ata_force_horkage(dev);
2198 
2199 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2200 		ata_dev_info(dev, "unsupported device, disabling\n");
2201 		ata_dev_disable(dev);
2202 		return 0;
2203 	}
2204 
2205 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2206 	    dev->class == ATA_DEV_ATAPI) {
2207 		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2208 			     atapi_enabled ? "not supported with this driver"
2209 			     : "disabled");
2210 		ata_dev_disable(dev);
2211 		return 0;
2212 	}
2213 
2214 	rc = ata_do_link_spd_horkage(dev);
2215 	if (rc)
2216 		return rc;
2217 
2218 	/* let ACPI work its magic */
2219 	rc = ata_acpi_on_devcfg(dev);
2220 	if (rc)
2221 		return rc;
2222 
2223 	/* massage HPA, do it early as it might change IDENTIFY data */
2224 	rc = ata_hpa_resize(dev);
2225 	if (rc)
2226 		return rc;
2227 
2228 	/* print device capabilities */
2229 	if (ata_msg_probe(ap))
2230 		ata_dev_dbg(dev,
2231 			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2232 			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2233 			    __func__,
2234 			    id[49], id[82], id[83], id[84],
2235 			    id[85], id[86], id[87], id[88]);
2236 
2237 	/* initialize to-be-configured parameters */
2238 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2239 	dev->max_sectors = 0;
2240 	dev->cdb_len = 0;
2241 	dev->n_sectors = 0;
2242 	dev->cylinders = 0;
2243 	dev->heads = 0;
2244 	dev->sectors = 0;
2245 	dev->multi_count = 0;
2246 
2247 	/*
2248 	 * common ATA, ATAPI feature tests
2249 	 */
2250 
2251 	/* find max transfer mode; for printk only */
2252 	xfer_mask = ata_id_xfermask(id);
2253 
2254 	if (ata_msg_probe(ap))
2255 		ata_dump_id(id);
2256 
2257 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2258 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2259 			sizeof(fwrevbuf));
2260 
2261 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2262 			sizeof(modelbuf));
2263 
2264 	/* ATA-specific feature tests */
2265 	if (dev->class == ATA_DEV_ATA) {
2266 		if (ata_id_is_cfa(id)) {
2267 			/* CPRM may make this media unusable */
2268 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2269 				ata_dev_warn(dev,
2270 	"supports DRM functions and may not be fully accessible\n");
2271 			snprintf(revbuf, 7, "CFA");
2272 		} else {
2273 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2274 			/* Warn the user if the device has TPM extensions */
2275 			if (ata_id_has_tpm(id))
2276 				ata_dev_warn(dev,
2277 	"supports DRM functions and may not be fully accessible\n");
2278 		}
2279 
2280 		dev->n_sectors = ata_id_n_sectors(id);
2281 
2282 		/* get current R/W Multiple count setting */
2283 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2284 			unsigned int max = dev->id[47] & 0xff;
2285 			unsigned int cnt = dev->id[59] & 0xff;
2286 			/* only recognize/allow powers of two here */
2287 			if (is_power_of_2(max) && is_power_of_2(cnt))
2288 				if (cnt <= max)
2289 					dev->multi_count = cnt;
2290 		}
2291 
2292 		if (ata_id_has_lba(id)) {
2293 			const char *lba_desc;
2294 			char ncq_desc[24];
2295 
2296 			lba_desc = "LBA";
2297 			dev->flags |= ATA_DFLAG_LBA;
2298 			if (ata_id_has_lba48(id)) {
2299 				dev->flags |= ATA_DFLAG_LBA48;
2300 				lba_desc = "LBA48";
2301 
2302 				if (dev->n_sectors >= (1UL << 28) &&
2303 				    ata_id_has_flush_ext(id))
2304 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2305 			}
2306 
2307 			/* config NCQ */
2308 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2309 			if (rc)
2310 				return rc;
2311 
2312 			/* print device info to dmesg */
2313 			if (ata_msg_drv(ap) && print_info) {
2314 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2315 					     revbuf, modelbuf, fwrevbuf,
2316 					     ata_mode_string(xfer_mask));
2317 				ata_dev_info(dev,
2318 					     "%llu sectors, multi %u: %s %s\n",
2319 					(unsigned long long)dev->n_sectors,
2320 					dev->multi_count, lba_desc, ncq_desc);
2321 			}
2322 		} else {
2323 			/* CHS */
2324 
2325 			/* Default translation */
2326 			dev->cylinders	= id[1];
2327 			dev->heads	= id[3];
2328 			dev->sectors	= id[6];
2329 
2330 			if (ata_id_current_chs_valid(id)) {
2331 				/* Current CHS translation is valid. */
2332 				dev->cylinders = id[54];
2333 				dev->heads     = id[55];
2334 				dev->sectors   = id[56];
2335 			}
2336 
2337 			/* print device info to dmesg */
2338 			if (ata_msg_drv(ap) && print_info) {
2339 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2340 					     revbuf,	modelbuf, fwrevbuf,
2341 					     ata_mode_string(xfer_mask));
2342 				ata_dev_info(dev,
2343 					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2344 					     (unsigned long long)dev->n_sectors,
2345 					     dev->multi_count, dev->cylinders,
2346 					     dev->heads, dev->sectors);
2347 			}
2348 		}
2349 
2350 		/* Check and mark DevSlp capability. Get DevSlp timing variables
2351 		 * from SATA Settings page of Identify Device Data Log.
2352 		 */
2353 		if (ata_id_has_devslp(dev->id)) {
2354 			u8 *sata_setting = ap->sector_buf;
2355 			int i, j;
2356 
2357 			dev->flags |= ATA_DFLAG_DEVSLP;
2358 			err_mask = ata_read_log_page(dev,
2359 						     ATA_LOG_SATA_ID_DEV_DATA,
2360 						     ATA_LOG_SATA_SETTINGS,
2361 						     sata_setting,
2362 						     1);
2363 			if (err_mask)
2364 				ata_dev_dbg(dev,
2365 					    "failed to get Identify Device Data, Emask 0x%x\n",
2366 					    err_mask);
2367 			else
2368 				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2369 					j = ATA_LOG_DEVSLP_OFFSET + i;
2370 					dev->devslp_timing[i] = sata_setting[j];
2371 				}
2372 		}
2373 
2374 		dev->cdb_len = 16;
2375 	}
2376 
2377 	/* ATAPI-specific feature tests */
2378 	else if (dev->class == ATA_DEV_ATAPI) {
2379 		const char *cdb_intr_string = "";
2380 		const char *atapi_an_string = "";
2381 		const char *dma_dir_string = "";
2382 		u32 sntf;
2383 
2384 		rc = atapi_cdb_len(id);
2385 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2386 			if (ata_msg_warn(ap))
2387 				ata_dev_warn(dev, "unsupported CDB len\n");
2388 			rc = -EINVAL;
2389 			goto err_out_nosup;
2390 		}
2391 		dev->cdb_len = (unsigned int) rc;
2392 
2393 		/* Enable ATAPI AN if both the host and device have
2394 		 * the support.  If PMP is attached, SNTF is required
2395 		 * to enable ATAPI AN to discern between PHY status
2396 		 * changed notifications and ATAPI ANs.
2397 		 */
2398 		if (atapi_an &&
2399 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2400 		    (!sata_pmp_attached(ap) ||
2401 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2402 			/* issue SET feature command to turn this on */
2403 			err_mask = ata_dev_set_feature(dev,
2404 					SETFEATURES_SATA_ENABLE, SATA_AN);
2405 			if (err_mask)
2406 				ata_dev_err(dev,
2407 					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2408 					    err_mask);
2409 			else {
2410 				dev->flags |= ATA_DFLAG_AN;
2411 				atapi_an_string = ", ATAPI AN";
2412 			}
2413 		}
2414 
2415 		if (ata_id_cdb_intr(dev->id)) {
2416 			dev->flags |= ATA_DFLAG_CDB_INTR;
2417 			cdb_intr_string = ", CDB intr";
2418 		}
2419 
2420 		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2421 			dev->flags |= ATA_DFLAG_DMADIR;
2422 			dma_dir_string = ", DMADIR";
2423 		}
2424 
2425 		if (ata_id_has_da(dev->id)) {
2426 			dev->flags |= ATA_DFLAG_DA;
2427 			zpodd_init(dev);
2428 		}
2429 
2430 		/* print device info to dmesg */
2431 		if (ata_msg_drv(ap) && print_info)
2432 			ata_dev_info(dev,
2433 				     "ATAPI: %s, %s, max %s%s%s%s\n",
2434 				     modelbuf, fwrevbuf,
2435 				     ata_mode_string(xfer_mask),
2436 				     cdb_intr_string, atapi_an_string,
2437 				     dma_dir_string);
2438 	}
2439 
2440 	/* determine max_sectors */
2441 	dev->max_sectors = ATA_MAX_SECTORS;
2442 	if (dev->flags & ATA_DFLAG_LBA48)
2443 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2444 
2445 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2446 	   200 sectors */
2447 	if (ata_dev_knobble(dev)) {
2448 		if (ata_msg_drv(ap) && print_info)
2449 			ata_dev_info(dev, "applying bridge limits\n");
2450 		dev->udma_mask &= ATA_UDMA5;
2451 		dev->max_sectors = ATA_MAX_SECTORS;
2452 	}
2453 
2454 	if ((dev->class == ATA_DEV_ATAPI) &&
2455 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2456 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2457 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2458 	}
2459 
2460 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2461 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2462 					 dev->max_sectors);
2463 
2464 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2465 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2466 
2467 	if (ap->ops->dev_config)
2468 		ap->ops->dev_config(dev);
2469 
2470 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2471 		/* Let the user know. We don't want to disallow opens for
2472 		   rescue purposes, or in case the vendor is just a blithering
2473 		   idiot. Do this after the dev_config call as some controllers
2474 		   with buggy firmware may want to avoid reporting false device
2475 		   bugs */
2476 
2477 		if (print_info) {
2478 			ata_dev_warn(dev,
2479 "Drive reports diagnostics failure. This may indicate a drive\n");
2480 			ata_dev_warn(dev,
2481 "fault or invalid emulation. Contact drive vendor for information.\n");
2482 		}
2483 	}
2484 
2485 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2486 		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2487 		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2488 	}
2489 
2490 	return 0;
2491 
2492 err_out_nosup:
2493 	if (ata_msg_probe(ap))
2494 		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2495 	return rc;
2496 }
2497 
2498 /**
2499  *	ata_cable_40wire	-	return 40 wire cable type
2500  *	@ap: port
2501  *
2502  *	Helper method for drivers which want to hardwire 40 wire cable
2503  *	detection.
2504  */
2505 
2506 int ata_cable_40wire(struct ata_port *ap)
2507 {
2508 	return ATA_CBL_PATA40;
2509 }
2510 
2511 /**
2512  *	ata_cable_80wire	-	return 80 wire cable type
2513  *	@ap: port
2514  *
2515  *	Helper method for drivers which want to hardwire 80 wire cable
2516  *	detection.
2517  */
2518 
2519 int ata_cable_80wire(struct ata_port *ap)
2520 {
2521 	return ATA_CBL_PATA80;
2522 }
2523 
2524 /**
2525  *	ata_cable_unknown	-	return unknown PATA cable.
2526  *	@ap: port
2527  *
2528  *	Helper method for drivers which have no PATA cable detection.
2529  */
2530 
2531 int ata_cable_unknown(struct ata_port *ap)
2532 {
2533 	return ATA_CBL_PATA_UNK;
2534 }
2535 
2536 /**
2537  *	ata_cable_ignore	-	return ignored PATA cable.
2538  *	@ap: port
2539  *
2540  *	Helper method for drivers which don't use cable type to limit
2541  *	transfer mode.
2542  */
2543 int ata_cable_ignore(struct ata_port *ap)
2544 {
2545 	return ATA_CBL_PATA_IGN;
2546 }
2547 
2548 /**
2549  *	ata_cable_sata	-	return SATA cable type
2550  *	@ap: port
2551  *
2552  *	Helper method for drivers which have SATA cables
2553  */
2554 
2555 int ata_cable_sata(struct ata_port *ap)
2556 {
2557 	return ATA_CBL_SATA;
2558 }
2559 
2560 /**
2561  *	ata_bus_probe - Reset and probe ATA bus
2562  *	@ap: Bus to probe
2563  *
2564  *	Master ATA bus probing function.  Initiates a hardware-dependent
2565  *	bus reset, then attempts to identify any devices found on
2566  *	the bus.
2567  *
2568  *	LOCKING:
2569  *	PCI/etc. bus probe sem.
2570  *
2571  *	RETURNS:
2572  *	Zero on success, negative errno otherwise.
2573  */
2574 
2575 int ata_bus_probe(struct ata_port *ap)
2576 {
2577 	unsigned int classes[ATA_MAX_DEVICES];
2578 	int tries[ATA_MAX_DEVICES];
2579 	int rc;
2580 	struct ata_device *dev;
2581 
2582 	ata_for_each_dev(dev, &ap->link, ALL)
2583 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2584 
2585  retry:
2586 	ata_for_each_dev(dev, &ap->link, ALL) {
2587 		/* If we issue an SRST then an ATA drive (not ATAPI)
2588 		 * may change configuration and be in PIO0 timing. If
2589 		 * we do a hard reset (or are coming from power on)
2590 		 * this is true for ATA or ATAPI. Until we've set a
2591 		 * suitable controller mode we should not touch the
2592 		 * bus as we may be talking too fast.
2593 		 */
2594 		dev->pio_mode = XFER_PIO_0;
2595 		dev->dma_mode = 0xff;
2596 
2597 		/* If the controller has a pio mode setup function
2598 		 * then use it to set the chipset to rights. Don't
2599 		 * touch the DMA setup as that will be dealt with when
2600 		 * configuring devices.
2601 		 */
2602 		if (ap->ops->set_piomode)
2603 			ap->ops->set_piomode(ap, dev);
2604 	}
2605 
2606 	/* reset and determine device classes */
2607 	ap->ops->phy_reset(ap);
2608 
2609 	ata_for_each_dev(dev, &ap->link, ALL) {
2610 		if (dev->class != ATA_DEV_UNKNOWN)
2611 			classes[dev->devno] = dev->class;
2612 		else
2613 			classes[dev->devno] = ATA_DEV_NONE;
2614 
2615 		dev->class = ATA_DEV_UNKNOWN;
2616 	}
2617 
2618 	/* read IDENTIFY page and configure devices. We have to do the identify
2619 	   specific sequence bass-ackwards so that PDIAG- is released by
2620 	   the slave device */
2621 
2622 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2623 		if (tries[dev->devno])
2624 			dev->class = classes[dev->devno];
2625 
2626 		if (!ata_dev_enabled(dev))
2627 			continue;
2628 
2629 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2630 				     dev->id);
2631 		if (rc)
2632 			goto fail;
2633 	}
2634 
2635 	/* Now ask for the cable type as PDIAG- should have been released */
2636 	if (ap->ops->cable_detect)
2637 		ap->cbl = ap->ops->cable_detect(ap);
2638 
2639 	/* We may have SATA bridge glue hiding here irrespective of
2640 	 * the reported cable types and sensed types.  When SATA
2641 	 * drives indicate we have a bridge, we don't know which end
2642 	 * of the link the bridge is which is a problem.
2643 	 */
2644 	ata_for_each_dev(dev, &ap->link, ENABLED)
2645 		if (ata_id_is_sata(dev->id))
2646 			ap->cbl = ATA_CBL_SATA;
2647 
2648 	/* After the identify sequence we can now set up the devices. We do
2649 	   this in the normal order so that the user doesn't get confused */
2650 
2651 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2652 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2653 		rc = ata_dev_configure(dev);
2654 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2655 		if (rc)
2656 			goto fail;
2657 	}
2658 
2659 	/* configure transfer mode */
2660 	rc = ata_set_mode(&ap->link, &dev);
2661 	if (rc)
2662 		goto fail;
2663 
2664 	ata_for_each_dev(dev, &ap->link, ENABLED)
2665 		return 0;
2666 
2667 	return -ENODEV;
2668 
2669  fail:
2670 	tries[dev->devno]--;
2671 
2672 	switch (rc) {
2673 	case -EINVAL:
2674 		/* eeek, something went very wrong, give up */
2675 		tries[dev->devno] = 0;
2676 		break;
2677 
2678 	case -ENODEV:
2679 		/* give it just one more chance */
2680 		tries[dev->devno] = min(tries[dev->devno], 1);
2681 	case -EIO:
2682 		if (tries[dev->devno] == 1) {
2683 			/* This is the last chance, better to slow
2684 			 * down than lose it.
2685 			 */
2686 			sata_down_spd_limit(&ap->link, 0);
2687 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2688 		}
2689 	}
2690 
2691 	if (!tries[dev->devno])
2692 		ata_dev_disable(dev);
2693 
2694 	goto retry;
2695 }
2696 
2697 /**
2698  *	sata_print_link_status - Print SATA link status
2699  *	@link: SATA link to printk link status about
2700  *
2701  *	This function prints link speed and status of a SATA link.
2702  *
2703  *	LOCKING:
2704  *	None.
2705  */
2706 static void sata_print_link_status(struct ata_link *link)
2707 {
2708 	u32 sstatus, scontrol, tmp;
2709 
2710 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2711 		return;
2712 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2713 
2714 	if (ata_phys_link_online(link)) {
2715 		tmp = (sstatus >> 4) & 0xf;
2716 		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2717 			      sata_spd_string(tmp), sstatus, scontrol);
2718 	} else {
2719 		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2720 			      sstatus, scontrol);
2721 	}
2722 }
2723 
2724 /**
2725  *	ata_dev_pair		-	return other device on cable
2726  *	@adev: device
2727  *
2728  *	Obtain the other device on the same cable, or if none is
2729  *	present NULL is returned
2730  */
2731 
2732 struct ata_device *ata_dev_pair(struct ata_device *adev)
2733 {
2734 	struct ata_link *link = adev->link;
2735 	struct ata_device *pair = &link->device[1 - adev->devno];
2736 	if (!ata_dev_enabled(pair))
2737 		return NULL;
2738 	return pair;
2739 }
2740 
2741 /**
2742  *	sata_down_spd_limit - adjust SATA spd limit downward
2743  *	@link: Link to adjust SATA spd limit for
2744  *	@spd_limit: Additional limit
2745  *
2746  *	Adjust SATA spd limit of @link downward.  Note that this
2747  *	function only adjusts the limit.  The change must be applied
2748  *	using sata_set_spd().
2749  *
2750  *	If @spd_limit is non-zero, the speed is limited to equal to or
2751  *	lower than @spd_limit if such speed is supported.  If
2752  *	@spd_limit is slower than any supported speed, only the lowest
2753  *	supported speed is allowed.
2754  *
2755  *	LOCKING:
2756  *	Inherited from caller.
2757  *
2758  *	RETURNS:
2759  *	0 on success, negative errno on failure
2760  */
2761 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2762 {
2763 	u32 sstatus, spd, mask;
2764 	int rc, bit;
2765 
2766 	if (!sata_scr_valid(link))
2767 		return -EOPNOTSUPP;
2768 
2769 	/* If SCR can be read, use it to determine the current SPD.
2770 	 * If not, use cached value in link->sata_spd.
2771 	 */
2772 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2773 	if (rc == 0 && ata_sstatus_online(sstatus))
2774 		spd = (sstatus >> 4) & 0xf;
2775 	else
2776 		spd = link->sata_spd;
2777 
2778 	mask = link->sata_spd_limit;
2779 	if (mask <= 1)
2780 		return -EINVAL;
2781 
2782 	/* unconditionally mask off the highest bit */
2783 	bit = fls(mask) - 1;
2784 	mask &= ~(1 << bit);
2785 
2786 	/* Mask off all speeds higher than or equal to the current
2787 	 * one.  Force 1.5Gbps if current SPD is not available.
2788 	 */
2789 	if (spd > 1)
2790 		mask &= (1 << (spd - 1)) - 1;
2791 	else
2792 		mask &= 1;
2793 
2794 	/* were we already at the bottom? */
2795 	if (!mask)
2796 		return -EINVAL;
2797 
2798 	if (spd_limit) {
2799 		if (mask & ((1 << spd_limit) - 1))
2800 			mask &= (1 << spd_limit) - 1;
2801 		else {
2802 			bit = ffs(mask) - 1;
2803 			mask = 1 << bit;
2804 		}
2805 	}
2806 
2807 	link->sata_spd_limit = mask;
2808 
2809 	ata_link_warn(link, "limiting SATA link speed to %s\n",
2810 		      sata_spd_string(fls(mask)));
2811 
2812 	return 0;
2813 }
2814 
2815 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2816 {
2817 	struct ata_link *host_link = &link->ap->link;
2818 	u32 limit, target, spd;
2819 
2820 	limit = link->sata_spd_limit;
2821 
2822 	/* Don't configure downstream link faster than upstream link.
2823 	 * It doesn't speed up anything and some PMPs choke on such
2824 	 * configuration.
2825 	 */
2826 	if (!ata_is_host_link(link) && host_link->sata_spd)
2827 		limit &= (1 << host_link->sata_spd) - 1;
2828 
2829 	if (limit == UINT_MAX)
2830 		target = 0;
2831 	else
2832 		target = fls(limit);
2833 
2834 	spd = (*scontrol >> 4) & 0xf;
2835 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2836 
2837 	return spd != target;
2838 }
2839 
2840 /**
2841  *	sata_set_spd_needed - is SATA spd configuration needed
2842  *	@link: Link in question
2843  *
2844  *	Test whether the spd limit in SControl matches
2845  *	@link->sata_spd_limit.  This function is used to determine
2846  *	whether hardreset is necessary to apply SATA spd
2847  *	configuration.
2848  *
2849  *	LOCKING:
2850  *	Inherited from caller.
2851  *
2852  *	RETURNS:
2853  *	1 if SATA spd configuration is needed, 0 otherwise.
2854  */
2855 static int sata_set_spd_needed(struct ata_link *link)
2856 {
2857 	u32 scontrol;
2858 
2859 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2860 		return 1;
2861 
2862 	return __sata_set_spd_needed(link, &scontrol);
2863 }
2864 
2865 /**
2866  *	sata_set_spd - set SATA spd according to spd limit
2867  *	@link: Link to set SATA spd for
2868  *
2869  *	Set SATA spd of @link according to sata_spd_limit.
2870  *
2871  *	LOCKING:
2872  *	Inherited from caller.
2873  *
2874  *	RETURNS:
2875  *	0 if spd doesn't need to be changed, 1 if spd has been
2876  *	changed.  Negative errno if SCR registers are inaccessible.
2877  */
2878 int sata_set_spd(struct ata_link *link)
2879 {
2880 	u32 scontrol;
2881 	int rc;
2882 
2883 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2884 		return rc;
2885 
2886 	if (!__sata_set_spd_needed(link, &scontrol))
2887 		return 0;
2888 
2889 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2890 		return rc;
2891 
2892 	return 1;
2893 }
2894 
2895 /*
2896  * This mode timing computation functionality is ported over from
2897  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2898  */
2899 /*
2900  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2901  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2902  * for UDMA6, which is currently supported only by Maxtor drives.
2903  *
2904  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2905  */
2906 
2907 static const struct ata_timing ata_timing[] = {
2908 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
2909 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
2910 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
2911 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
2912 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
2913 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
2914 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
2915 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
2916 
2917 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
2918 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
2919 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
2920 
2921 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
2922 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
2923 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
2924 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
2925 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
2926 
2927 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
2928 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
2929 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
2930 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
2931 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
2932 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
2933 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
2934 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
2935 
2936 	{ 0xFF }
2937 };
2938 
2939 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2940 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2941 
2942 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2943 {
2944 	q->setup	= EZ(t->setup      * 1000,  T);
2945 	q->act8b	= EZ(t->act8b      * 1000,  T);
2946 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
2947 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
2948 	q->active	= EZ(t->active     * 1000,  T);
2949 	q->recover	= EZ(t->recover    * 1000,  T);
2950 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
2951 	q->cycle	= EZ(t->cycle      * 1000,  T);
2952 	q->udma		= EZ(t->udma       * 1000, UT);
2953 }
2954 
2955 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2956 		      struct ata_timing *m, unsigned int what)
2957 {
2958 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2959 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2960 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2961 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2962 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2963 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2964 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2965 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2966 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2967 }
2968 
2969 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2970 {
2971 	const struct ata_timing *t = ata_timing;
2972 
2973 	while (xfer_mode > t->mode)
2974 		t++;
2975 
2976 	if (xfer_mode == t->mode)
2977 		return t;
2978 
2979 	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2980 			__func__, xfer_mode);
2981 
2982 	return NULL;
2983 }
2984 
2985 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2986 		       struct ata_timing *t, int T, int UT)
2987 {
2988 	const u16 *id = adev->id;
2989 	const struct ata_timing *s;
2990 	struct ata_timing p;
2991 
2992 	/*
2993 	 * Find the mode.
2994 	 */
2995 
2996 	if (!(s = ata_timing_find_mode(speed)))
2997 		return -EINVAL;
2998 
2999 	memcpy(t, s, sizeof(*s));
3000 
3001 	/*
3002 	 * If the drive is an EIDE drive, it can tell us it needs extended
3003 	 * PIO/MW_DMA cycle timing.
3004 	 */
3005 
3006 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3007 		memset(&p, 0, sizeof(p));
3008 
3009 		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3010 			if (speed <= XFER_PIO_2)
3011 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3012 			else if ((speed <= XFER_PIO_4) ||
3013 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3014 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3015 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3016 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3017 
3018 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3019 	}
3020 
3021 	/*
3022 	 * Convert the timing to bus clock counts.
3023 	 */
3024 
3025 	ata_timing_quantize(t, t, T, UT);
3026 
3027 	/*
3028 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3029 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3030 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3031 	 */
3032 
3033 	if (speed > XFER_PIO_6) {
3034 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3035 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3036 	}
3037 
3038 	/*
3039 	 * Lengthen active & recovery time so that cycle time is correct.
3040 	 */
3041 
3042 	if (t->act8b + t->rec8b < t->cyc8b) {
3043 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3044 		t->rec8b = t->cyc8b - t->act8b;
3045 	}
3046 
3047 	if (t->active + t->recover < t->cycle) {
3048 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3049 		t->recover = t->cycle - t->active;
3050 	}
3051 
3052 	/* In a few cases quantisation may produce enough errors to
3053 	   leave t->cycle too low for the sum of active and recovery
3054 	   if so we must correct this */
3055 	if (t->active + t->recover > t->cycle)
3056 		t->cycle = t->active + t->recover;
3057 
3058 	return 0;
3059 }
3060 
3061 /**
3062  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3063  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3064  *	@cycle: cycle duration in ns
3065  *
3066  *	Return matching xfer mode for @cycle.  The returned mode is of
3067  *	the transfer type specified by @xfer_shift.  If @cycle is too
3068  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3069  *	than the fastest known mode, the fasted mode is returned.
3070  *
3071  *	LOCKING:
3072  *	None.
3073  *
3074  *	RETURNS:
3075  *	Matching xfer_mode, 0xff if no match found.
3076  */
3077 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3078 {
3079 	u8 base_mode = 0xff, last_mode = 0xff;
3080 	const struct ata_xfer_ent *ent;
3081 	const struct ata_timing *t;
3082 
3083 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3084 		if (ent->shift == xfer_shift)
3085 			base_mode = ent->base;
3086 
3087 	for (t = ata_timing_find_mode(base_mode);
3088 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3089 		unsigned short this_cycle;
3090 
3091 		switch (xfer_shift) {
3092 		case ATA_SHIFT_PIO:
3093 		case ATA_SHIFT_MWDMA:
3094 			this_cycle = t->cycle;
3095 			break;
3096 		case ATA_SHIFT_UDMA:
3097 			this_cycle = t->udma;
3098 			break;
3099 		default:
3100 			return 0xff;
3101 		}
3102 
3103 		if (cycle > this_cycle)
3104 			break;
3105 
3106 		last_mode = t->mode;
3107 	}
3108 
3109 	return last_mode;
3110 }
3111 
3112 /**
3113  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3114  *	@dev: Device to adjust xfer masks
3115  *	@sel: ATA_DNXFER_* selector
3116  *
3117  *	Adjust xfer masks of @dev downward.  Note that this function
3118  *	does not apply the change.  Invoking ata_set_mode() afterwards
3119  *	will apply the limit.
3120  *
3121  *	LOCKING:
3122  *	Inherited from caller.
3123  *
3124  *	RETURNS:
3125  *	0 on success, negative errno on failure
3126  */
3127 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3128 {
3129 	char buf[32];
3130 	unsigned long orig_mask, xfer_mask;
3131 	unsigned long pio_mask, mwdma_mask, udma_mask;
3132 	int quiet, highbit;
3133 
3134 	quiet = !!(sel & ATA_DNXFER_QUIET);
3135 	sel &= ~ATA_DNXFER_QUIET;
3136 
3137 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3138 						  dev->mwdma_mask,
3139 						  dev->udma_mask);
3140 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3141 
3142 	switch (sel) {
3143 	case ATA_DNXFER_PIO:
3144 		highbit = fls(pio_mask) - 1;
3145 		pio_mask &= ~(1 << highbit);
3146 		break;
3147 
3148 	case ATA_DNXFER_DMA:
3149 		if (udma_mask) {
3150 			highbit = fls(udma_mask) - 1;
3151 			udma_mask &= ~(1 << highbit);
3152 			if (!udma_mask)
3153 				return -ENOENT;
3154 		} else if (mwdma_mask) {
3155 			highbit = fls(mwdma_mask) - 1;
3156 			mwdma_mask &= ~(1 << highbit);
3157 			if (!mwdma_mask)
3158 				return -ENOENT;
3159 		}
3160 		break;
3161 
3162 	case ATA_DNXFER_40C:
3163 		udma_mask &= ATA_UDMA_MASK_40C;
3164 		break;
3165 
3166 	case ATA_DNXFER_FORCE_PIO0:
3167 		pio_mask &= 1;
3168 	case ATA_DNXFER_FORCE_PIO:
3169 		mwdma_mask = 0;
3170 		udma_mask = 0;
3171 		break;
3172 
3173 	default:
3174 		BUG();
3175 	}
3176 
3177 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3178 
3179 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3180 		return -ENOENT;
3181 
3182 	if (!quiet) {
3183 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3184 			snprintf(buf, sizeof(buf), "%s:%s",
3185 				 ata_mode_string(xfer_mask),
3186 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3187 		else
3188 			snprintf(buf, sizeof(buf), "%s",
3189 				 ata_mode_string(xfer_mask));
3190 
3191 		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3192 	}
3193 
3194 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3195 			    &dev->udma_mask);
3196 
3197 	return 0;
3198 }
3199 
3200 static int ata_dev_set_mode(struct ata_device *dev)
3201 {
3202 	struct ata_port *ap = dev->link->ap;
3203 	struct ata_eh_context *ehc = &dev->link->eh_context;
3204 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3205 	const char *dev_err_whine = "";
3206 	int ign_dev_err = 0;
3207 	unsigned int err_mask = 0;
3208 	int rc;
3209 
3210 	dev->flags &= ~ATA_DFLAG_PIO;
3211 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3212 		dev->flags |= ATA_DFLAG_PIO;
3213 
3214 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3215 		dev_err_whine = " (SET_XFERMODE skipped)";
3216 	else {
3217 		if (nosetxfer)
3218 			ata_dev_warn(dev,
3219 				     "NOSETXFER but PATA detected - can't "
3220 				     "skip SETXFER, might malfunction\n");
3221 		err_mask = ata_dev_set_xfermode(dev);
3222 	}
3223 
3224 	if (err_mask & ~AC_ERR_DEV)
3225 		goto fail;
3226 
3227 	/* revalidate */
3228 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3229 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3230 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3231 	if (rc)
3232 		return rc;
3233 
3234 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3235 		/* Old CFA may refuse this command, which is just fine */
3236 		if (ata_id_is_cfa(dev->id))
3237 			ign_dev_err = 1;
3238 		/* Catch several broken garbage emulations plus some pre
3239 		   ATA devices */
3240 		if (ata_id_major_version(dev->id) == 0 &&
3241 					dev->pio_mode <= XFER_PIO_2)
3242 			ign_dev_err = 1;
3243 		/* Some very old devices and some bad newer ones fail
3244 		   any kind of SET_XFERMODE request but support PIO0-2
3245 		   timings and no IORDY */
3246 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3247 			ign_dev_err = 1;
3248 	}
3249 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3250 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3251 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3252 	    dev->dma_mode == XFER_MW_DMA_0 &&
3253 	    (dev->id[63] >> 8) & 1)
3254 		ign_dev_err = 1;
3255 
3256 	/* if the device is actually configured correctly, ignore dev err */
3257 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3258 		ign_dev_err = 1;
3259 
3260 	if (err_mask & AC_ERR_DEV) {
3261 		if (!ign_dev_err)
3262 			goto fail;
3263 		else
3264 			dev_err_whine = " (device error ignored)";
3265 	}
3266 
3267 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3268 		dev->xfer_shift, (int)dev->xfer_mode);
3269 
3270 	ata_dev_info(dev, "configured for %s%s\n",
3271 		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3272 		     dev_err_whine);
3273 
3274 	return 0;
3275 
3276  fail:
3277 	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3278 	return -EIO;
3279 }
3280 
3281 /**
3282  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3283  *	@link: link on which timings will be programmed
3284  *	@r_failed_dev: out parameter for failed device
3285  *
3286  *	Standard implementation of the function used to tune and set
3287  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3288  *	ata_dev_set_mode() fails, pointer to the failing device is
3289  *	returned in @r_failed_dev.
3290  *
3291  *	LOCKING:
3292  *	PCI/etc. bus probe sem.
3293  *
3294  *	RETURNS:
3295  *	0 on success, negative errno otherwise
3296  */
3297 
3298 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3299 {
3300 	struct ata_port *ap = link->ap;
3301 	struct ata_device *dev;
3302 	int rc = 0, used_dma = 0, found = 0;
3303 
3304 	/* step 1: calculate xfer_mask */
3305 	ata_for_each_dev(dev, link, ENABLED) {
3306 		unsigned long pio_mask, dma_mask;
3307 		unsigned int mode_mask;
3308 
3309 		mode_mask = ATA_DMA_MASK_ATA;
3310 		if (dev->class == ATA_DEV_ATAPI)
3311 			mode_mask = ATA_DMA_MASK_ATAPI;
3312 		else if (ata_id_is_cfa(dev->id))
3313 			mode_mask = ATA_DMA_MASK_CFA;
3314 
3315 		ata_dev_xfermask(dev);
3316 		ata_force_xfermask(dev);
3317 
3318 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3319 
3320 		if (libata_dma_mask & mode_mask)
3321 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3322 						     dev->udma_mask);
3323 		else
3324 			dma_mask = 0;
3325 
3326 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3327 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3328 
3329 		found = 1;
3330 		if (ata_dma_enabled(dev))
3331 			used_dma = 1;
3332 	}
3333 	if (!found)
3334 		goto out;
3335 
3336 	/* step 2: always set host PIO timings */
3337 	ata_for_each_dev(dev, link, ENABLED) {
3338 		if (dev->pio_mode == 0xff) {
3339 			ata_dev_warn(dev, "no PIO support\n");
3340 			rc = -EINVAL;
3341 			goto out;
3342 		}
3343 
3344 		dev->xfer_mode = dev->pio_mode;
3345 		dev->xfer_shift = ATA_SHIFT_PIO;
3346 		if (ap->ops->set_piomode)
3347 			ap->ops->set_piomode(ap, dev);
3348 	}
3349 
3350 	/* step 3: set host DMA timings */
3351 	ata_for_each_dev(dev, link, ENABLED) {
3352 		if (!ata_dma_enabled(dev))
3353 			continue;
3354 
3355 		dev->xfer_mode = dev->dma_mode;
3356 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3357 		if (ap->ops->set_dmamode)
3358 			ap->ops->set_dmamode(ap, dev);
3359 	}
3360 
3361 	/* step 4: update devices' xfer mode */
3362 	ata_for_each_dev(dev, link, ENABLED) {
3363 		rc = ata_dev_set_mode(dev);
3364 		if (rc)
3365 			goto out;
3366 	}
3367 
3368 	/* Record simplex status. If we selected DMA then the other
3369 	 * host channels are not permitted to do so.
3370 	 */
3371 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3372 		ap->host->simplex_claimed = ap;
3373 
3374  out:
3375 	if (rc)
3376 		*r_failed_dev = dev;
3377 	return rc;
3378 }
3379 
3380 /**
3381  *	ata_wait_ready - wait for link to become ready
3382  *	@link: link to be waited on
3383  *	@deadline: deadline jiffies for the operation
3384  *	@check_ready: callback to check link readiness
3385  *
3386  *	Wait for @link to become ready.  @check_ready should return
3387  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3388  *	link doesn't seem to be occupied, other errno for other error
3389  *	conditions.
3390  *
3391  *	Transient -ENODEV conditions are allowed for
3392  *	ATA_TMOUT_FF_WAIT.
3393  *
3394  *	LOCKING:
3395  *	EH context.
3396  *
3397  *	RETURNS:
3398  *	0 if @linke is ready before @deadline; otherwise, -errno.
3399  */
3400 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3401 		   int (*check_ready)(struct ata_link *link))
3402 {
3403 	unsigned long start = jiffies;
3404 	unsigned long nodev_deadline;
3405 	int warned = 0;
3406 
3407 	/* choose which 0xff timeout to use, read comment in libata.h */
3408 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3409 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3410 	else
3411 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3412 
3413 	/* Slave readiness can't be tested separately from master.  On
3414 	 * M/S emulation configuration, this function should be called
3415 	 * only on the master and it will handle both master and slave.
3416 	 */
3417 	WARN_ON(link == link->ap->slave_link);
3418 
3419 	if (time_after(nodev_deadline, deadline))
3420 		nodev_deadline = deadline;
3421 
3422 	while (1) {
3423 		unsigned long now = jiffies;
3424 		int ready, tmp;
3425 
3426 		ready = tmp = check_ready(link);
3427 		if (ready > 0)
3428 			return 0;
3429 
3430 		/*
3431 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3432 		 * is online.  Also, some SATA devices take a long
3433 		 * time to clear 0xff after reset.  Wait for
3434 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3435 		 * offline.
3436 		 *
3437 		 * Note that some PATA controllers (pata_ali) explode
3438 		 * if status register is read more than once when
3439 		 * there's no device attached.
3440 		 */
3441 		if (ready == -ENODEV) {
3442 			if (ata_link_online(link))
3443 				ready = 0;
3444 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3445 				 !ata_link_offline(link) &&
3446 				 time_before(now, nodev_deadline))
3447 				ready = 0;
3448 		}
3449 
3450 		if (ready)
3451 			return ready;
3452 		if (time_after(now, deadline))
3453 			return -EBUSY;
3454 
3455 		if (!warned && time_after(now, start + 5 * HZ) &&
3456 		    (deadline - now > 3 * HZ)) {
3457 			ata_link_warn(link,
3458 				"link is slow to respond, please be patient "
3459 				"(ready=%d)\n", tmp);
3460 			warned = 1;
3461 		}
3462 
3463 		ata_msleep(link->ap, 50);
3464 	}
3465 }
3466 
3467 /**
3468  *	ata_wait_after_reset - wait for link to become ready after reset
3469  *	@link: link to be waited on
3470  *	@deadline: deadline jiffies for the operation
3471  *	@check_ready: callback to check link readiness
3472  *
3473  *	Wait for @link to become ready after reset.
3474  *
3475  *	LOCKING:
3476  *	EH context.
3477  *
3478  *	RETURNS:
3479  *	0 if @linke is ready before @deadline; otherwise, -errno.
3480  */
3481 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3482 				int (*check_ready)(struct ata_link *link))
3483 {
3484 	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3485 
3486 	return ata_wait_ready(link, deadline, check_ready);
3487 }
3488 
3489 /**
3490  *	sata_link_debounce - debounce SATA phy status
3491  *	@link: ATA link to debounce SATA phy status for
3492  *	@params: timing parameters { interval, duratinon, timeout } in msec
3493  *	@deadline: deadline jiffies for the operation
3494  *
3495  *	Make sure SStatus of @link reaches stable state, determined by
3496  *	holding the same value where DET is not 1 for @duration polled
3497  *	every @interval, before @timeout.  Timeout constraints the
3498  *	beginning of the stable state.  Because DET gets stuck at 1 on
3499  *	some controllers after hot unplugging, this functions waits
3500  *	until timeout then returns 0 if DET is stable at 1.
3501  *
3502  *	@timeout is further limited by @deadline.  The sooner of the
3503  *	two is used.
3504  *
3505  *	LOCKING:
3506  *	Kernel thread context (may sleep)
3507  *
3508  *	RETURNS:
3509  *	0 on success, -errno on failure.
3510  */
3511 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3512 		       unsigned long deadline)
3513 {
3514 	unsigned long interval = params[0];
3515 	unsigned long duration = params[1];
3516 	unsigned long last_jiffies, t;
3517 	u32 last, cur;
3518 	int rc;
3519 
3520 	t = ata_deadline(jiffies, params[2]);
3521 	if (time_before(t, deadline))
3522 		deadline = t;
3523 
3524 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3525 		return rc;
3526 	cur &= 0xf;
3527 
3528 	last = cur;
3529 	last_jiffies = jiffies;
3530 
3531 	while (1) {
3532 		ata_msleep(link->ap, interval);
3533 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3534 			return rc;
3535 		cur &= 0xf;
3536 
3537 		/* DET stable? */
3538 		if (cur == last) {
3539 			if (cur == 1 && time_before(jiffies, deadline))
3540 				continue;
3541 			if (time_after(jiffies,
3542 				       ata_deadline(last_jiffies, duration)))
3543 				return 0;
3544 			continue;
3545 		}
3546 
3547 		/* unstable, start over */
3548 		last = cur;
3549 		last_jiffies = jiffies;
3550 
3551 		/* Check deadline.  If debouncing failed, return
3552 		 * -EPIPE to tell upper layer to lower link speed.
3553 		 */
3554 		if (time_after(jiffies, deadline))
3555 			return -EPIPE;
3556 	}
3557 }
3558 
3559 /**
3560  *	sata_link_resume - resume SATA link
3561  *	@link: ATA link to resume SATA
3562  *	@params: timing parameters { interval, duratinon, timeout } in msec
3563  *	@deadline: deadline jiffies for the operation
3564  *
3565  *	Resume SATA phy @link and debounce it.
3566  *
3567  *	LOCKING:
3568  *	Kernel thread context (may sleep)
3569  *
3570  *	RETURNS:
3571  *	0 on success, -errno on failure.
3572  */
3573 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3574 		     unsigned long deadline)
3575 {
3576 	int tries = ATA_LINK_RESUME_TRIES;
3577 	u32 scontrol, serror;
3578 	int rc;
3579 
3580 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3581 		return rc;
3582 
3583 	/*
3584 	 * Writes to SControl sometimes get ignored under certain
3585 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3586 	 * cleared.
3587 	 */
3588 	do {
3589 		scontrol = (scontrol & 0x0f0) | 0x300;
3590 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3591 			return rc;
3592 		/*
3593 		 * Some PHYs react badly if SStatus is pounded
3594 		 * immediately after resuming.  Delay 200ms before
3595 		 * debouncing.
3596 		 */
3597 		ata_msleep(link->ap, 200);
3598 
3599 		/* is SControl restored correctly? */
3600 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3601 			return rc;
3602 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3603 
3604 	if ((scontrol & 0xf0f) != 0x300) {
3605 		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3606 			     scontrol);
3607 		return 0;
3608 	}
3609 
3610 	if (tries < ATA_LINK_RESUME_TRIES)
3611 		ata_link_warn(link, "link resume succeeded after %d retries\n",
3612 			      ATA_LINK_RESUME_TRIES - tries);
3613 
3614 	if ((rc = sata_link_debounce(link, params, deadline)))
3615 		return rc;
3616 
3617 	/* clear SError, some PHYs require this even for SRST to work */
3618 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3619 		rc = sata_scr_write(link, SCR_ERROR, serror);
3620 
3621 	return rc != -EINVAL ? rc : 0;
3622 }
3623 
3624 /**
3625  *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3626  *	@link: ATA link to manipulate SControl for
3627  *	@policy: LPM policy to configure
3628  *	@spm_wakeup: initiate LPM transition to active state
3629  *
3630  *	Manipulate the IPM field of the SControl register of @link
3631  *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3632  *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3633  *	the link.  This function also clears PHYRDY_CHG before
3634  *	returning.
3635  *
3636  *	LOCKING:
3637  *	EH context.
3638  *
3639  *	RETURNS:
3640  *	0 on succes, -errno otherwise.
3641  */
3642 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3643 		      bool spm_wakeup)
3644 {
3645 	struct ata_eh_context *ehc = &link->eh_context;
3646 	bool woken_up = false;
3647 	u32 scontrol;
3648 	int rc;
3649 
3650 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3651 	if (rc)
3652 		return rc;
3653 
3654 	switch (policy) {
3655 	case ATA_LPM_MAX_POWER:
3656 		/* disable all LPM transitions */
3657 		scontrol |= (0x7 << 8);
3658 		/* initiate transition to active state */
3659 		if (spm_wakeup) {
3660 			scontrol |= (0x4 << 12);
3661 			woken_up = true;
3662 		}
3663 		break;
3664 	case ATA_LPM_MED_POWER:
3665 		/* allow LPM to PARTIAL */
3666 		scontrol &= ~(0x1 << 8);
3667 		scontrol |= (0x6 << 8);
3668 		break;
3669 	case ATA_LPM_MIN_POWER:
3670 		if (ata_link_nr_enabled(link) > 0)
3671 			/* no restrictions on LPM transitions */
3672 			scontrol &= ~(0x7 << 8);
3673 		else {
3674 			/* empty port, power off */
3675 			scontrol &= ~0xf;
3676 			scontrol |= (0x1 << 2);
3677 		}
3678 		break;
3679 	default:
3680 		WARN_ON(1);
3681 	}
3682 
3683 	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3684 	if (rc)
3685 		return rc;
3686 
3687 	/* give the link time to transit out of LPM state */
3688 	if (woken_up)
3689 		msleep(10);
3690 
3691 	/* clear PHYRDY_CHG from SError */
3692 	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3693 	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3694 }
3695 
3696 /**
3697  *	ata_std_prereset - prepare for reset
3698  *	@link: ATA link to be reset
3699  *	@deadline: deadline jiffies for the operation
3700  *
3701  *	@link is about to be reset.  Initialize it.  Failure from
3702  *	prereset makes libata abort whole reset sequence and give up
3703  *	that port, so prereset should be best-effort.  It does its
3704  *	best to prepare for reset sequence but if things go wrong, it
3705  *	should just whine, not fail.
3706  *
3707  *	LOCKING:
3708  *	Kernel thread context (may sleep)
3709  *
3710  *	RETURNS:
3711  *	0 on success, -errno otherwise.
3712  */
3713 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3714 {
3715 	struct ata_port *ap = link->ap;
3716 	struct ata_eh_context *ehc = &link->eh_context;
3717 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3718 	int rc;
3719 
3720 	/* if we're about to do hardreset, nothing more to do */
3721 	if (ehc->i.action & ATA_EH_HARDRESET)
3722 		return 0;
3723 
3724 	/* if SATA, resume link */
3725 	if (ap->flags & ATA_FLAG_SATA) {
3726 		rc = sata_link_resume(link, timing, deadline);
3727 		/* whine about phy resume failure but proceed */
3728 		if (rc && rc != -EOPNOTSUPP)
3729 			ata_link_warn(link,
3730 				      "failed to resume link for reset (errno=%d)\n",
3731 				      rc);
3732 	}
3733 
3734 	/* no point in trying softreset on offline link */
3735 	if (ata_phys_link_offline(link))
3736 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3737 
3738 	return 0;
3739 }
3740 
3741 /**
3742  *	sata_link_hardreset - reset link via SATA phy reset
3743  *	@link: link to reset
3744  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3745  *	@deadline: deadline jiffies for the operation
3746  *	@online: optional out parameter indicating link onlineness
3747  *	@check_ready: optional callback to check link readiness
3748  *
3749  *	SATA phy-reset @link using DET bits of SControl register.
3750  *	After hardreset, link readiness is waited upon using
3751  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3752  *	allowed to not specify @check_ready and wait itself after this
3753  *	function returns.  Device classification is LLD's
3754  *	responsibility.
3755  *
3756  *	*@online is set to one iff reset succeeded and @link is online
3757  *	after reset.
3758  *
3759  *	LOCKING:
3760  *	Kernel thread context (may sleep)
3761  *
3762  *	RETURNS:
3763  *	0 on success, -errno otherwise.
3764  */
3765 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3766 			unsigned long deadline,
3767 			bool *online, int (*check_ready)(struct ata_link *))
3768 {
3769 	u32 scontrol;
3770 	int rc;
3771 
3772 	DPRINTK("ENTER\n");
3773 
3774 	if (online)
3775 		*online = false;
3776 
3777 	if (sata_set_spd_needed(link)) {
3778 		/* SATA spec says nothing about how to reconfigure
3779 		 * spd.  To be on the safe side, turn off phy during
3780 		 * reconfiguration.  This works for at least ICH7 AHCI
3781 		 * and Sil3124.
3782 		 */
3783 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3784 			goto out;
3785 
3786 		scontrol = (scontrol & 0x0f0) | 0x304;
3787 
3788 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3789 			goto out;
3790 
3791 		sata_set_spd(link);
3792 	}
3793 
3794 	/* issue phy wake/reset */
3795 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3796 		goto out;
3797 
3798 	scontrol = (scontrol & 0x0f0) | 0x301;
3799 
3800 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3801 		goto out;
3802 
3803 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3804 	 * 10.4.2 says at least 1 ms.
3805 	 */
3806 	ata_msleep(link->ap, 1);
3807 
3808 	/* bring link back */
3809 	rc = sata_link_resume(link, timing, deadline);
3810 	if (rc)
3811 		goto out;
3812 	/* if link is offline nothing more to do */
3813 	if (ata_phys_link_offline(link))
3814 		goto out;
3815 
3816 	/* Link is online.  From this point, -ENODEV too is an error. */
3817 	if (online)
3818 		*online = true;
3819 
3820 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3821 		/* If PMP is supported, we have to do follow-up SRST.
3822 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3823 		 * the first port is empty.  Wait only for
3824 		 * ATA_TMOUT_PMP_SRST_WAIT.
3825 		 */
3826 		if (check_ready) {
3827 			unsigned long pmp_deadline;
3828 
3829 			pmp_deadline = ata_deadline(jiffies,
3830 						    ATA_TMOUT_PMP_SRST_WAIT);
3831 			if (time_after(pmp_deadline, deadline))
3832 				pmp_deadline = deadline;
3833 			ata_wait_ready(link, pmp_deadline, check_ready);
3834 		}
3835 		rc = -EAGAIN;
3836 		goto out;
3837 	}
3838 
3839 	rc = 0;
3840 	if (check_ready)
3841 		rc = ata_wait_ready(link, deadline, check_ready);
3842  out:
3843 	if (rc && rc != -EAGAIN) {
3844 		/* online is set iff link is online && reset succeeded */
3845 		if (online)
3846 			*online = false;
3847 		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3848 	}
3849 	DPRINTK("EXIT, rc=%d\n", rc);
3850 	return rc;
3851 }
3852 
3853 /**
3854  *	sata_std_hardreset - COMRESET w/o waiting or classification
3855  *	@link: link to reset
3856  *	@class: resulting class of attached device
3857  *	@deadline: deadline jiffies for the operation
3858  *
3859  *	Standard SATA COMRESET w/o waiting or classification.
3860  *
3861  *	LOCKING:
3862  *	Kernel thread context (may sleep)
3863  *
3864  *	RETURNS:
3865  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3866  */
3867 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3868 		       unsigned long deadline)
3869 {
3870 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3871 	bool online;
3872 	int rc;
3873 
3874 	/* do hardreset */
3875 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3876 	return online ? -EAGAIN : rc;
3877 }
3878 
3879 /**
3880  *	ata_std_postreset - standard postreset callback
3881  *	@link: the target ata_link
3882  *	@classes: classes of attached devices
3883  *
3884  *	This function is invoked after a successful reset.  Note that
3885  *	the device might have been reset more than once using
3886  *	different reset methods before postreset is invoked.
3887  *
3888  *	LOCKING:
3889  *	Kernel thread context (may sleep)
3890  */
3891 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3892 {
3893 	u32 serror;
3894 
3895 	DPRINTK("ENTER\n");
3896 
3897 	/* reset complete, clear SError */
3898 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3899 		sata_scr_write(link, SCR_ERROR, serror);
3900 
3901 	/* print link status */
3902 	sata_print_link_status(link);
3903 
3904 	DPRINTK("EXIT\n");
3905 }
3906 
3907 /**
3908  *	ata_dev_same_device - Determine whether new ID matches configured device
3909  *	@dev: device to compare against
3910  *	@new_class: class of the new device
3911  *	@new_id: IDENTIFY page of the new device
3912  *
3913  *	Compare @new_class and @new_id against @dev and determine
3914  *	whether @dev is the device indicated by @new_class and
3915  *	@new_id.
3916  *
3917  *	LOCKING:
3918  *	None.
3919  *
3920  *	RETURNS:
3921  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3922  */
3923 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3924 			       const u16 *new_id)
3925 {
3926 	const u16 *old_id = dev->id;
3927 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3928 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3929 
3930 	if (dev->class != new_class) {
3931 		ata_dev_info(dev, "class mismatch %d != %d\n",
3932 			     dev->class, new_class);
3933 		return 0;
3934 	}
3935 
3936 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3937 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3938 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3939 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3940 
3941 	if (strcmp(model[0], model[1])) {
3942 		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3943 			     model[0], model[1]);
3944 		return 0;
3945 	}
3946 
3947 	if (strcmp(serial[0], serial[1])) {
3948 		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3949 			     serial[0], serial[1]);
3950 		return 0;
3951 	}
3952 
3953 	return 1;
3954 }
3955 
3956 /**
3957  *	ata_dev_reread_id - Re-read IDENTIFY data
3958  *	@dev: target ATA device
3959  *	@readid_flags: read ID flags
3960  *
3961  *	Re-read IDENTIFY page and make sure @dev is still attached to
3962  *	the port.
3963  *
3964  *	LOCKING:
3965  *	Kernel thread context (may sleep)
3966  *
3967  *	RETURNS:
3968  *	0 on success, negative errno otherwise
3969  */
3970 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3971 {
3972 	unsigned int class = dev->class;
3973 	u16 *id = (void *)dev->link->ap->sector_buf;
3974 	int rc;
3975 
3976 	/* read ID data */
3977 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3978 	if (rc)
3979 		return rc;
3980 
3981 	/* is the device still there? */
3982 	if (!ata_dev_same_device(dev, class, id))
3983 		return -ENODEV;
3984 
3985 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3986 	return 0;
3987 }
3988 
3989 /**
3990  *	ata_dev_revalidate - Revalidate ATA device
3991  *	@dev: device to revalidate
3992  *	@new_class: new class code
3993  *	@readid_flags: read ID flags
3994  *
3995  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3996  *	port and reconfigure it according to the new IDENTIFY page.
3997  *
3998  *	LOCKING:
3999  *	Kernel thread context (may sleep)
4000  *
4001  *	RETURNS:
4002  *	0 on success, negative errno otherwise
4003  */
4004 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4005 		       unsigned int readid_flags)
4006 {
4007 	u64 n_sectors = dev->n_sectors;
4008 	u64 n_native_sectors = dev->n_native_sectors;
4009 	int rc;
4010 
4011 	if (!ata_dev_enabled(dev))
4012 		return -ENODEV;
4013 
4014 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4015 	if (ata_class_enabled(new_class) &&
4016 	    new_class != ATA_DEV_ATA &&
4017 	    new_class != ATA_DEV_ATAPI &&
4018 	    new_class != ATA_DEV_SEMB) {
4019 		ata_dev_info(dev, "class mismatch %u != %u\n",
4020 			     dev->class, new_class);
4021 		rc = -ENODEV;
4022 		goto fail;
4023 	}
4024 
4025 	/* re-read ID */
4026 	rc = ata_dev_reread_id(dev, readid_flags);
4027 	if (rc)
4028 		goto fail;
4029 
4030 	/* configure device according to the new ID */
4031 	rc = ata_dev_configure(dev);
4032 	if (rc)
4033 		goto fail;
4034 
4035 	/* verify n_sectors hasn't changed */
4036 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4037 	    dev->n_sectors == n_sectors)
4038 		return 0;
4039 
4040 	/* n_sectors has changed */
4041 	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4042 		     (unsigned long long)n_sectors,
4043 		     (unsigned long long)dev->n_sectors);
4044 
4045 	/*
4046 	 * Something could have caused HPA to be unlocked
4047 	 * involuntarily.  If n_native_sectors hasn't changed and the
4048 	 * new size matches it, keep the device.
4049 	 */
4050 	if (dev->n_native_sectors == n_native_sectors &&
4051 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4052 		ata_dev_warn(dev,
4053 			     "new n_sectors matches native, probably "
4054 			     "late HPA unlock, n_sectors updated\n");
4055 		/* use the larger n_sectors */
4056 		return 0;
4057 	}
4058 
4059 	/*
4060 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4061 	 * unlocking HPA in those cases.
4062 	 *
4063 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4064 	 */
4065 	if (dev->n_native_sectors == n_native_sectors &&
4066 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4067 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4068 		ata_dev_warn(dev,
4069 			     "old n_sectors matches native, probably "
4070 			     "late HPA lock, will try to unlock HPA\n");
4071 		/* try unlocking HPA */
4072 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4073 		rc = -EIO;
4074 	} else
4075 		rc = -ENODEV;
4076 
4077 	/* restore original n_[native_]sectors and fail */
4078 	dev->n_native_sectors = n_native_sectors;
4079 	dev->n_sectors = n_sectors;
4080  fail:
4081 	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4082 	return rc;
4083 }
4084 
4085 struct ata_blacklist_entry {
4086 	const char *model_num;
4087 	const char *model_rev;
4088 	unsigned long horkage;
4089 };
4090 
4091 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4092 	/* Devices with DMA related problems under Linux */
4093 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4094 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4095 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4096 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4097 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4098 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4099 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4100 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4101 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4102 	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4103 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4104 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4105 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4106 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4107 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4108 	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4109 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4110 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4111 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4112 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4113 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4114 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4115 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4116 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4117 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4118 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4119 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4120 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4121 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4122 	/* Odd clown on sil3726/4726 PMPs */
4123 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4124 
4125 	/* Weird ATAPI devices */
4126 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4127 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4128 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4129 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4130 
4131 	/* Devices we expect to fail diagnostics */
4132 
4133 	/* Devices where NCQ should be avoided */
4134 	/* NCQ is slow */
4135 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4136 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4137 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4138 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4139 	/* NCQ is broken */
4140 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4141 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4142 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4143 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4144 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4145 
4146 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4147 	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4148 						ATA_HORKAGE_FIRMWARE_WARN },
4149 
4150 	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4151 						ATA_HORKAGE_FIRMWARE_WARN },
4152 
4153 	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4154 						ATA_HORKAGE_FIRMWARE_WARN },
4155 
4156 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4157 						ATA_HORKAGE_FIRMWARE_WARN },
4158 
4159 	/* Blacklist entries taken from Silicon Image 3124/3132
4160 	   Windows driver .inf file - also several Linux problem reports */
4161 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4162 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4163 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4164 
4165 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4166 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4167 
4168 	/* devices which puke on READ_NATIVE_MAX */
4169 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4170 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4171 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4172 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4173 
4174 	/* this one allows HPA unlocking but fails IOs on the area */
4175 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4176 
4177 	/* Devices which report 1 sector over size HPA */
4178 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4179 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4180 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4181 
4182 	/* Devices which get the IVB wrong */
4183 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4184 	/* Maybe we should just blacklist TSSTcorp... */
4185 	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4186 
4187 	/* Devices that do not need bridging limits applied */
4188 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4189 	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4190 
4191 	/* Devices which aren't very happy with higher link speeds */
4192 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4193 	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4194 
4195 	/*
4196 	 * Devices which choke on SETXFER.  Applies only if both the
4197 	 * device and controller are SATA.
4198 	 */
4199 	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4200 	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4201 	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4202 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4203 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4204 
4205 	/* End Marker */
4206 	{ }
4207 };
4208 
4209 /**
4210  *	glob_match - match a text string against a glob-style pattern
4211  *	@text: the string to be examined
4212  *	@pattern: the glob-style pattern to be matched against
4213  *
4214  *	Either/both of text and pattern can be empty strings.
4215  *
4216  *	Match text against a glob-style pattern, with wildcards and simple sets:
4217  *
4218  *		?	matches any single character.
4219  *		*	matches any run of characters.
4220  *		[xyz]	matches a single character from the set: x, y, or z.
4221  *		[a-d]	matches a single character from the range: a, b, c, or d.
4222  *		[a-d0-9] matches a single character from either range.
4223  *
4224  *	The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4225  *	Behaviour with malformed patterns is undefined, though generally reasonable.
4226  *
4227  *	Sample patterns:  "SD1?",  "SD1[0-5]",  "*R0",  "SD*1?[012]*xx"
4228  *
4229  *	This function uses one level of recursion per '*' in pattern.
4230  *	Since it calls _nothing_ else, and has _no_ explicit local variables,
4231  *	this will not cause stack problems for any reasonable use here.
4232  *
4233  *	RETURNS:
4234  *	0 on match, 1 otherwise.
4235  */
4236 static int glob_match (const char *text, const char *pattern)
4237 {
4238 	do {
4239 		/* Match single character or a '?' wildcard */
4240 		if (*text == *pattern || *pattern == '?') {
4241 			if (!*pattern++)
4242 				return 0;  /* End of both strings: match */
4243 		} else {
4244 			/* Match single char against a '[' bracketed ']' pattern set */
4245 			if (!*text || *pattern != '[')
4246 				break;  /* Not a pattern set */
4247 			while (*++pattern && *pattern != ']' && *text != *pattern) {
4248 				if (*pattern == '-' && *(pattern - 1) != '[')
4249 					if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4250 						++pattern;
4251 						break;
4252 					}
4253 			}
4254 			if (!*pattern || *pattern == ']')
4255 				return 1;  /* No match */
4256 			while (*pattern && *pattern++ != ']');
4257 		}
4258 	} while (*++text && *pattern);
4259 
4260 	/* Match any run of chars against a '*' wildcard */
4261 	if (*pattern == '*') {
4262 		if (!*++pattern)
4263 			return 0;  /* Match: avoid recursion at end of pattern */
4264 		/* Loop to handle additional pattern chars after the wildcard */
4265 		while (*text) {
4266 			if (glob_match(text, pattern) == 0)
4267 				return 0;  /* Remainder matched */
4268 			++text;  /* Absorb (match) this char and try again */
4269 		}
4270 	}
4271 	if (!*text && !*pattern)
4272 		return 0;  /* End of both strings: match */
4273 	return 1;  /* No match */
4274 }
4275 
4276 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4277 {
4278 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4279 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4280 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4281 
4282 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4283 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4284 
4285 	while (ad->model_num) {
4286 		if (!glob_match(model_num, ad->model_num)) {
4287 			if (ad->model_rev == NULL)
4288 				return ad->horkage;
4289 			if (!glob_match(model_rev, ad->model_rev))
4290 				return ad->horkage;
4291 		}
4292 		ad++;
4293 	}
4294 	return 0;
4295 }
4296 
4297 static int ata_dma_blacklisted(const struct ata_device *dev)
4298 {
4299 	/* We don't support polling DMA.
4300 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4301 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4302 	 */
4303 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4304 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4305 		return 1;
4306 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4307 }
4308 
4309 /**
4310  *	ata_is_40wire		-	check drive side detection
4311  *	@dev: device
4312  *
4313  *	Perform drive side detection decoding, allowing for device vendors
4314  *	who can't follow the documentation.
4315  */
4316 
4317 static int ata_is_40wire(struct ata_device *dev)
4318 {
4319 	if (dev->horkage & ATA_HORKAGE_IVB)
4320 		return ata_drive_40wire_relaxed(dev->id);
4321 	return ata_drive_40wire(dev->id);
4322 }
4323 
4324 /**
4325  *	cable_is_40wire		-	40/80/SATA decider
4326  *	@ap: port to consider
4327  *
4328  *	This function encapsulates the policy for speed management
4329  *	in one place. At the moment we don't cache the result but
4330  *	there is a good case for setting ap->cbl to the result when
4331  *	we are called with unknown cables (and figuring out if it
4332  *	impacts hotplug at all).
4333  *
4334  *	Return 1 if the cable appears to be 40 wire.
4335  */
4336 
4337 static int cable_is_40wire(struct ata_port *ap)
4338 {
4339 	struct ata_link *link;
4340 	struct ata_device *dev;
4341 
4342 	/* If the controller thinks we are 40 wire, we are. */
4343 	if (ap->cbl == ATA_CBL_PATA40)
4344 		return 1;
4345 
4346 	/* If the controller thinks we are 80 wire, we are. */
4347 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4348 		return 0;
4349 
4350 	/* If the system is known to be 40 wire short cable (eg
4351 	 * laptop), then we allow 80 wire modes even if the drive
4352 	 * isn't sure.
4353 	 */
4354 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4355 		return 0;
4356 
4357 	/* If the controller doesn't know, we scan.
4358 	 *
4359 	 * Note: We look for all 40 wire detects at this point.  Any
4360 	 *       80 wire detect is taken to be 80 wire cable because
4361 	 * - in many setups only the one drive (slave if present) will
4362 	 *   give a valid detect
4363 	 * - if you have a non detect capable drive you don't want it
4364 	 *   to colour the choice
4365 	 */
4366 	ata_for_each_link(link, ap, EDGE) {
4367 		ata_for_each_dev(dev, link, ENABLED) {
4368 			if (!ata_is_40wire(dev))
4369 				return 0;
4370 		}
4371 	}
4372 	return 1;
4373 }
4374 
4375 /**
4376  *	ata_dev_xfermask - Compute supported xfermask of the given device
4377  *	@dev: Device to compute xfermask for
4378  *
4379  *	Compute supported xfermask of @dev and store it in
4380  *	dev->*_mask.  This function is responsible for applying all
4381  *	known limits including host controller limits, device
4382  *	blacklist, etc...
4383  *
4384  *	LOCKING:
4385  *	None.
4386  */
4387 static void ata_dev_xfermask(struct ata_device *dev)
4388 {
4389 	struct ata_link *link = dev->link;
4390 	struct ata_port *ap = link->ap;
4391 	struct ata_host *host = ap->host;
4392 	unsigned long xfer_mask;
4393 
4394 	/* controller modes available */
4395 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4396 				      ap->mwdma_mask, ap->udma_mask);
4397 
4398 	/* drive modes available */
4399 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4400 				       dev->mwdma_mask, dev->udma_mask);
4401 	xfer_mask &= ata_id_xfermask(dev->id);
4402 
4403 	/*
4404 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4405 	 *	cable
4406 	 */
4407 	if (ata_dev_pair(dev)) {
4408 		/* No PIO5 or PIO6 */
4409 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4410 		/* No MWDMA3 or MWDMA 4 */
4411 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4412 	}
4413 
4414 	if (ata_dma_blacklisted(dev)) {
4415 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4416 		ata_dev_warn(dev,
4417 			     "device is on DMA blacklist, disabling DMA\n");
4418 	}
4419 
4420 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4421 	    host->simplex_claimed && host->simplex_claimed != ap) {
4422 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4423 		ata_dev_warn(dev,
4424 			     "simplex DMA is claimed by other device, disabling DMA\n");
4425 	}
4426 
4427 	if (ap->flags & ATA_FLAG_NO_IORDY)
4428 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4429 
4430 	if (ap->ops->mode_filter)
4431 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4432 
4433 	/* Apply cable rule here.  Don't apply it early because when
4434 	 * we handle hot plug the cable type can itself change.
4435 	 * Check this last so that we know if the transfer rate was
4436 	 * solely limited by the cable.
4437 	 * Unknown or 80 wire cables reported host side are checked
4438 	 * drive side as well. Cases where we know a 40wire cable
4439 	 * is used safely for 80 are not checked here.
4440 	 */
4441 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4442 		/* UDMA/44 or higher would be available */
4443 		if (cable_is_40wire(ap)) {
4444 			ata_dev_warn(dev,
4445 				     "limited to UDMA/33 due to 40-wire cable\n");
4446 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4447 		}
4448 
4449 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4450 			    &dev->mwdma_mask, &dev->udma_mask);
4451 }
4452 
4453 /**
4454  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4455  *	@dev: Device to which command will be sent
4456  *
4457  *	Issue SET FEATURES - XFER MODE command to device @dev
4458  *	on port @ap.
4459  *
4460  *	LOCKING:
4461  *	PCI/etc. bus probe sem.
4462  *
4463  *	RETURNS:
4464  *	0 on success, AC_ERR_* mask otherwise.
4465  */
4466 
4467 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4468 {
4469 	struct ata_taskfile tf;
4470 	unsigned int err_mask;
4471 
4472 	/* set up set-features taskfile */
4473 	DPRINTK("set features - xfer mode\n");
4474 
4475 	/* Some controllers and ATAPI devices show flaky interrupt
4476 	 * behavior after setting xfer mode.  Use polling instead.
4477 	 */
4478 	ata_tf_init(dev, &tf);
4479 	tf.command = ATA_CMD_SET_FEATURES;
4480 	tf.feature = SETFEATURES_XFER;
4481 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4482 	tf.protocol = ATA_PROT_NODATA;
4483 	/* If we are using IORDY we must send the mode setting command */
4484 	if (ata_pio_need_iordy(dev))
4485 		tf.nsect = dev->xfer_mode;
4486 	/* If the device has IORDY and the controller does not - turn it off */
4487  	else if (ata_id_has_iordy(dev->id))
4488 		tf.nsect = 0x01;
4489 	else /* In the ancient relic department - skip all of this */
4490 		return 0;
4491 
4492 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4493 
4494 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4495 	return err_mask;
4496 }
4497 
4498 /**
4499  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4500  *	@dev: Device to which command will be sent
4501  *	@enable: Whether to enable or disable the feature
4502  *	@feature: The sector count represents the feature to set
4503  *
4504  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4505  *	on port @ap with sector count
4506  *
4507  *	LOCKING:
4508  *	PCI/etc. bus probe sem.
4509  *
4510  *	RETURNS:
4511  *	0 on success, AC_ERR_* mask otherwise.
4512  */
4513 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4514 {
4515 	struct ata_taskfile tf;
4516 	unsigned int err_mask;
4517 
4518 	/* set up set-features taskfile */
4519 	DPRINTK("set features - SATA features\n");
4520 
4521 	ata_tf_init(dev, &tf);
4522 	tf.command = ATA_CMD_SET_FEATURES;
4523 	tf.feature = enable;
4524 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4525 	tf.protocol = ATA_PROT_NODATA;
4526 	tf.nsect = feature;
4527 
4528 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4529 
4530 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4531 	return err_mask;
4532 }
4533 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4534 
4535 /**
4536  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4537  *	@dev: Device to which command will be sent
4538  *	@heads: Number of heads (taskfile parameter)
4539  *	@sectors: Number of sectors (taskfile parameter)
4540  *
4541  *	LOCKING:
4542  *	Kernel thread context (may sleep)
4543  *
4544  *	RETURNS:
4545  *	0 on success, AC_ERR_* mask otherwise.
4546  */
4547 static unsigned int ata_dev_init_params(struct ata_device *dev,
4548 					u16 heads, u16 sectors)
4549 {
4550 	struct ata_taskfile tf;
4551 	unsigned int err_mask;
4552 
4553 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4554 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4555 		return AC_ERR_INVALID;
4556 
4557 	/* set up init dev params taskfile */
4558 	DPRINTK("init dev params \n");
4559 
4560 	ata_tf_init(dev, &tf);
4561 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4562 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4563 	tf.protocol = ATA_PROT_NODATA;
4564 	tf.nsect = sectors;
4565 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4566 
4567 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4568 	/* A clean abort indicates an original or just out of spec drive
4569 	   and we should continue as we issue the setup based on the
4570 	   drive reported working geometry */
4571 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4572 		err_mask = 0;
4573 
4574 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4575 	return err_mask;
4576 }
4577 
4578 /**
4579  *	ata_sg_clean - Unmap DMA memory associated with command
4580  *	@qc: Command containing DMA memory to be released
4581  *
4582  *	Unmap all mapped DMA memory associated with this command.
4583  *
4584  *	LOCKING:
4585  *	spin_lock_irqsave(host lock)
4586  */
4587 void ata_sg_clean(struct ata_queued_cmd *qc)
4588 {
4589 	struct ata_port *ap = qc->ap;
4590 	struct scatterlist *sg = qc->sg;
4591 	int dir = qc->dma_dir;
4592 
4593 	WARN_ON_ONCE(sg == NULL);
4594 
4595 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4596 
4597 	if (qc->n_elem)
4598 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4599 
4600 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4601 	qc->sg = NULL;
4602 }
4603 
4604 /**
4605  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4606  *	@qc: Metadata associated with taskfile to check
4607  *
4608  *	Allow low-level driver to filter ATA PACKET commands, returning
4609  *	a status indicating whether or not it is OK to use DMA for the
4610  *	supplied PACKET command.
4611  *
4612  *	LOCKING:
4613  *	spin_lock_irqsave(host lock)
4614  *
4615  *	RETURNS: 0 when ATAPI DMA can be used
4616  *               nonzero otherwise
4617  */
4618 int atapi_check_dma(struct ata_queued_cmd *qc)
4619 {
4620 	struct ata_port *ap = qc->ap;
4621 
4622 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4623 	 * few ATAPI devices choke on such DMA requests.
4624 	 */
4625 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4626 	    unlikely(qc->nbytes & 15))
4627 		return 1;
4628 
4629 	if (ap->ops->check_atapi_dma)
4630 		return ap->ops->check_atapi_dma(qc);
4631 
4632 	return 0;
4633 }
4634 
4635 /**
4636  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4637  *	@qc: ATA command in question
4638  *
4639  *	Non-NCQ commands cannot run with any other command, NCQ or
4640  *	not.  As upper layer only knows the queue depth, we are
4641  *	responsible for maintaining exclusion.  This function checks
4642  *	whether a new command @qc can be issued.
4643  *
4644  *	LOCKING:
4645  *	spin_lock_irqsave(host lock)
4646  *
4647  *	RETURNS:
4648  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4649  */
4650 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4651 {
4652 	struct ata_link *link = qc->dev->link;
4653 
4654 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4655 		if (!ata_tag_valid(link->active_tag))
4656 			return 0;
4657 	} else {
4658 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4659 			return 0;
4660 	}
4661 
4662 	return ATA_DEFER_LINK;
4663 }
4664 
4665 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4666 
4667 /**
4668  *	ata_sg_init - Associate command with scatter-gather table.
4669  *	@qc: Command to be associated
4670  *	@sg: Scatter-gather table.
4671  *	@n_elem: Number of elements in s/g table.
4672  *
4673  *	Initialize the data-related elements of queued_cmd @qc
4674  *	to point to a scatter-gather table @sg, containing @n_elem
4675  *	elements.
4676  *
4677  *	LOCKING:
4678  *	spin_lock_irqsave(host lock)
4679  */
4680 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4681 		 unsigned int n_elem)
4682 {
4683 	qc->sg = sg;
4684 	qc->n_elem = n_elem;
4685 	qc->cursg = qc->sg;
4686 }
4687 
4688 /**
4689  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4690  *	@qc: Command with scatter-gather table to be mapped.
4691  *
4692  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4693  *
4694  *	LOCKING:
4695  *	spin_lock_irqsave(host lock)
4696  *
4697  *	RETURNS:
4698  *	Zero on success, negative on error.
4699  *
4700  */
4701 static int ata_sg_setup(struct ata_queued_cmd *qc)
4702 {
4703 	struct ata_port *ap = qc->ap;
4704 	unsigned int n_elem;
4705 
4706 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4707 
4708 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4709 	if (n_elem < 1)
4710 		return -1;
4711 
4712 	DPRINTK("%d sg elements mapped\n", n_elem);
4713 	qc->orig_n_elem = qc->n_elem;
4714 	qc->n_elem = n_elem;
4715 	qc->flags |= ATA_QCFLAG_DMAMAP;
4716 
4717 	return 0;
4718 }
4719 
4720 /**
4721  *	swap_buf_le16 - swap halves of 16-bit words in place
4722  *	@buf:  Buffer to swap
4723  *	@buf_words:  Number of 16-bit words in buffer.
4724  *
4725  *	Swap halves of 16-bit words if needed to convert from
4726  *	little-endian byte order to native cpu byte order, or
4727  *	vice-versa.
4728  *
4729  *	LOCKING:
4730  *	Inherited from caller.
4731  */
4732 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4733 {
4734 #ifdef __BIG_ENDIAN
4735 	unsigned int i;
4736 
4737 	for (i = 0; i < buf_words; i++)
4738 		buf[i] = le16_to_cpu(buf[i]);
4739 #endif /* __BIG_ENDIAN */
4740 }
4741 
4742 /**
4743  *	ata_qc_new - Request an available ATA command, for queueing
4744  *	@ap: target port
4745  *
4746  *	LOCKING:
4747  *	None.
4748  */
4749 
4750 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4751 {
4752 	struct ata_queued_cmd *qc = NULL;
4753 	unsigned int i;
4754 
4755 	/* no command while frozen */
4756 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4757 		return NULL;
4758 
4759 	/* the last tag is reserved for internal command. */
4760 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4761 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4762 			qc = __ata_qc_from_tag(ap, i);
4763 			break;
4764 		}
4765 
4766 	if (qc)
4767 		qc->tag = i;
4768 
4769 	return qc;
4770 }
4771 
4772 /**
4773  *	ata_qc_new_init - Request an available ATA command, and initialize it
4774  *	@dev: Device from whom we request an available command structure
4775  *
4776  *	LOCKING:
4777  *	None.
4778  */
4779 
4780 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4781 {
4782 	struct ata_port *ap = dev->link->ap;
4783 	struct ata_queued_cmd *qc;
4784 
4785 	qc = ata_qc_new(ap);
4786 	if (qc) {
4787 		qc->scsicmd = NULL;
4788 		qc->ap = ap;
4789 		qc->dev = dev;
4790 
4791 		ata_qc_reinit(qc);
4792 	}
4793 
4794 	return qc;
4795 }
4796 
4797 /**
4798  *	ata_qc_free - free unused ata_queued_cmd
4799  *	@qc: Command to complete
4800  *
4801  *	Designed to free unused ata_queued_cmd object
4802  *	in case something prevents using it.
4803  *
4804  *	LOCKING:
4805  *	spin_lock_irqsave(host lock)
4806  */
4807 void ata_qc_free(struct ata_queued_cmd *qc)
4808 {
4809 	struct ata_port *ap;
4810 	unsigned int tag;
4811 
4812 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4813 	ap = qc->ap;
4814 
4815 	qc->flags = 0;
4816 	tag = qc->tag;
4817 	if (likely(ata_tag_valid(tag))) {
4818 		qc->tag = ATA_TAG_POISON;
4819 		clear_bit(tag, &ap->qc_allocated);
4820 	}
4821 }
4822 
4823 void __ata_qc_complete(struct ata_queued_cmd *qc)
4824 {
4825 	struct ata_port *ap;
4826 	struct ata_link *link;
4827 
4828 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4829 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4830 	ap = qc->ap;
4831 	link = qc->dev->link;
4832 
4833 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4834 		ata_sg_clean(qc);
4835 
4836 	/* command should be marked inactive atomically with qc completion */
4837 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4838 		link->sactive &= ~(1 << qc->tag);
4839 		if (!link->sactive)
4840 			ap->nr_active_links--;
4841 	} else {
4842 		link->active_tag = ATA_TAG_POISON;
4843 		ap->nr_active_links--;
4844 	}
4845 
4846 	/* clear exclusive status */
4847 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4848 		     ap->excl_link == link))
4849 		ap->excl_link = NULL;
4850 
4851 	/* atapi: mark qc as inactive to prevent the interrupt handler
4852 	 * from completing the command twice later, before the error handler
4853 	 * is called. (when rc != 0 and atapi request sense is needed)
4854 	 */
4855 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4856 	ap->qc_active &= ~(1 << qc->tag);
4857 
4858 	/* call completion callback */
4859 	qc->complete_fn(qc);
4860 }
4861 
4862 static void fill_result_tf(struct ata_queued_cmd *qc)
4863 {
4864 	struct ata_port *ap = qc->ap;
4865 
4866 	qc->result_tf.flags = qc->tf.flags;
4867 	ap->ops->qc_fill_rtf(qc);
4868 }
4869 
4870 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4871 {
4872 	struct ata_device *dev = qc->dev;
4873 
4874 	if (ata_is_nodata(qc->tf.protocol))
4875 		return;
4876 
4877 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4878 		return;
4879 
4880 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4881 }
4882 
4883 /**
4884  *	ata_qc_complete - Complete an active ATA command
4885  *	@qc: Command to complete
4886  *
4887  *	Indicate to the mid and upper layers that an ATA command has
4888  *	completed, with either an ok or not-ok status.
4889  *
4890  *	Refrain from calling this function multiple times when
4891  *	successfully completing multiple NCQ commands.
4892  *	ata_qc_complete_multiple() should be used instead, which will
4893  *	properly update IRQ expect state.
4894  *
4895  *	LOCKING:
4896  *	spin_lock_irqsave(host lock)
4897  */
4898 void ata_qc_complete(struct ata_queued_cmd *qc)
4899 {
4900 	struct ata_port *ap = qc->ap;
4901 
4902 	/* XXX: New EH and old EH use different mechanisms to
4903 	 * synchronize EH with regular execution path.
4904 	 *
4905 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4906 	 * Normal execution path is responsible for not accessing a
4907 	 * failed qc.  libata core enforces the rule by returning NULL
4908 	 * from ata_qc_from_tag() for failed qcs.
4909 	 *
4910 	 * Old EH depends on ata_qc_complete() nullifying completion
4911 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4912 	 * not synchronize with interrupt handler.  Only PIO task is
4913 	 * taken care of.
4914 	 */
4915 	if (ap->ops->error_handler) {
4916 		struct ata_device *dev = qc->dev;
4917 		struct ata_eh_info *ehi = &dev->link->eh_info;
4918 
4919 		if (unlikely(qc->err_mask))
4920 			qc->flags |= ATA_QCFLAG_FAILED;
4921 
4922 		/*
4923 		 * Finish internal commands without any further processing
4924 		 * and always with the result TF filled.
4925 		 */
4926 		if (unlikely(ata_tag_internal(qc->tag))) {
4927 			fill_result_tf(qc);
4928 			__ata_qc_complete(qc);
4929 			return;
4930 		}
4931 
4932 		/*
4933 		 * Non-internal qc has failed.  Fill the result TF and
4934 		 * summon EH.
4935 		 */
4936 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4937 			fill_result_tf(qc);
4938 			ata_qc_schedule_eh(qc);
4939 			return;
4940 		}
4941 
4942 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4943 
4944 		/* read result TF if requested */
4945 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4946 			fill_result_tf(qc);
4947 
4948 		/* Some commands need post-processing after successful
4949 		 * completion.
4950 		 */
4951 		switch (qc->tf.command) {
4952 		case ATA_CMD_SET_FEATURES:
4953 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4954 			    qc->tf.feature != SETFEATURES_WC_OFF)
4955 				break;
4956 			/* fall through */
4957 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4958 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4959 			/* revalidate device */
4960 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4961 			ata_port_schedule_eh(ap);
4962 			break;
4963 
4964 		case ATA_CMD_SLEEP:
4965 			dev->flags |= ATA_DFLAG_SLEEPING;
4966 			break;
4967 		}
4968 
4969 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4970 			ata_verify_xfer(qc);
4971 
4972 		__ata_qc_complete(qc);
4973 	} else {
4974 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4975 			return;
4976 
4977 		/* read result TF if failed or requested */
4978 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4979 			fill_result_tf(qc);
4980 
4981 		__ata_qc_complete(qc);
4982 	}
4983 }
4984 
4985 /**
4986  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4987  *	@ap: port in question
4988  *	@qc_active: new qc_active mask
4989  *
4990  *	Complete in-flight commands.  This functions is meant to be
4991  *	called from low-level driver's interrupt routine to complete
4992  *	requests normally.  ap->qc_active and @qc_active is compared
4993  *	and commands are completed accordingly.
4994  *
4995  *	Always use this function when completing multiple NCQ commands
4996  *	from IRQ handlers instead of calling ata_qc_complete()
4997  *	multiple times to keep IRQ expect status properly in sync.
4998  *
4999  *	LOCKING:
5000  *	spin_lock_irqsave(host lock)
5001  *
5002  *	RETURNS:
5003  *	Number of completed commands on success, -errno otherwise.
5004  */
5005 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5006 {
5007 	int nr_done = 0;
5008 	u32 done_mask;
5009 
5010 	done_mask = ap->qc_active ^ qc_active;
5011 
5012 	if (unlikely(done_mask & qc_active)) {
5013 		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5014 			     ap->qc_active, qc_active);
5015 		return -EINVAL;
5016 	}
5017 
5018 	while (done_mask) {
5019 		struct ata_queued_cmd *qc;
5020 		unsigned int tag = __ffs(done_mask);
5021 
5022 		qc = ata_qc_from_tag(ap, tag);
5023 		if (qc) {
5024 			ata_qc_complete(qc);
5025 			nr_done++;
5026 		}
5027 		done_mask &= ~(1 << tag);
5028 	}
5029 
5030 	return nr_done;
5031 }
5032 
5033 /**
5034  *	ata_qc_issue - issue taskfile to device
5035  *	@qc: command to issue to device
5036  *
5037  *	Prepare an ATA command to submission to device.
5038  *	This includes mapping the data into a DMA-able
5039  *	area, filling in the S/G table, and finally
5040  *	writing the taskfile to hardware, starting the command.
5041  *
5042  *	LOCKING:
5043  *	spin_lock_irqsave(host lock)
5044  */
5045 void ata_qc_issue(struct ata_queued_cmd *qc)
5046 {
5047 	struct ata_port *ap = qc->ap;
5048 	struct ata_link *link = qc->dev->link;
5049 	u8 prot = qc->tf.protocol;
5050 
5051 	/* Make sure only one non-NCQ command is outstanding.  The
5052 	 * check is skipped for old EH because it reuses active qc to
5053 	 * request ATAPI sense.
5054 	 */
5055 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5056 
5057 	if (ata_is_ncq(prot)) {
5058 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5059 
5060 		if (!link->sactive)
5061 			ap->nr_active_links++;
5062 		link->sactive |= 1 << qc->tag;
5063 	} else {
5064 		WARN_ON_ONCE(link->sactive);
5065 
5066 		ap->nr_active_links++;
5067 		link->active_tag = qc->tag;
5068 	}
5069 
5070 	qc->flags |= ATA_QCFLAG_ACTIVE;
5071 	ap->qc_active |= 1 << qc->tag;
5072 
5073 	/*
5074 	 * We guarantee to LLDs that they will have at least one
5075 	 * non-zero sg if the command is a data command.
5076 	 */
5077 	if (WARN_ON_ONCE(ata_is_data(prot) &&
5078 			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5079 		goto sys_err;
5080 
5081 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5082 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5083 		if (ata_sg_setup(qc))
5084 			goto sys_err;
5085 
5086 	/* if device is sleeping, schedule reset and abort the link */
5087 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5088 		link->eh_info.action |= ATA_EH_RESET;
5089 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5090 		ata_link_abort(link);
5091 		return;
5092 	}
5093 
5094 	ap->ops->qc_prep(qc);
5095 
5096 	qc->err_mask |= ap->ops->qc_issue(qc);
5097 	if (unlikely(qc->err_mask))
5098 		goto err;
5099 	return;
5100 
5101 sys_err:
5102 	qc->err_mask |= AC_ERR_SYSTEM;
5103 err:
5104 	ata_qc_complete(qc);
5105 }
5106 
5107 /**
5108  *	sata_scr_valid - test whether SCRs are accessible
5109  *	@link: ATA link to test SCR accessibility for
5110  *
5111  *	Test whether SCRs are accessible for @link.
5112  *
5113  *	LOCKING:
5114  *	None.
5115  *
5116  *	RETURNS:
5117  *	1 if SCRs are accessible, 0 otherwise.
5118  */
5119 int sata_scr_valid(struct ata_link *link)
5120 {
5121 	struct ata_port *ap = link->ap;
5122 
5123 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5124 }
5125 
5126 /**
5127  *	sata_scr_read - read SCR register of the specified port
5128  *	@link: ATA link to read SCR for
5129  *	@reg: SCR to read
5130  *	@val: Place to store read value
5131  *
5132  *	Read SCR register @reg of @link into *@val.  This function is
5133  *	guaranteed to succeed if @link is ap->link, the cable type of
5134  *	the port is SATA and the port implements ->scr_read.
5135  *
5136  *	LOCKING:
5137  *	None if @link is ap->link.  Kernel thread context otherwise.
5138  *
5139  *	RETURNS:
5140  *	0 on success, negative errno on failure.
5141  */
5142 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5143 {
5144 	if (ata_is_host_link(link)) {
5145 		if (sata_scr_valid(link))
5146 			return link->ap->ops->scr_read(link, reg, val);
5147 		return -EOPNOTSUPP;
5148 	}
5149 
5150 	return sata_pmp_scr_read(link, reg, val);
5151 }
5152 
5153 /**
5154  *	sata_scr_write - write SCR register of the specified port
5155  *	@link: ATA link to write SCR for
5156  *	@reg: SCR to write
5157  *	@val: value to write
5158  *
5159  *	Write @val to SCR register @reg of @link.  This function is
5160  *	guaranteed to succeed if @link is ap->link, the cable type of
5161  *	the port is SATA and the port implements ->scr_read.
5162  *
5163  *	LOCKING:
5164  *	None if @link is ap->link.  Kernel thread context otherwise.
5165  *
5166  *	RETURNS:
5167  *	0 on success, negative errno on failure.
5168  */
5169 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5170 {
5171 	if (ata_is_host_link(link)) {
5172 		if (sata_scr_valid(link))
5173 			return link->ap->ops->scr_write(link, reg, val);
5174 		return -EOPNOTSUPP;
5175 	}
5176 
5177 	return sata_pmp_scr_write(link, reg, val);
5178 }
5179 
5180 /**
5181  *	sata_scr_write_flush - write SCR register of the specified port and flush
5182  *	@link: ATA link to write SCR for
5183  *	@reg: SCR to write
5184  *	@val: value to write
5185  *
5186  *	This function is identical to sata_scr_write() except that this
5187  *	function performs flush after writing to the register.
5188  *
5189  *	LOCKING:
5190  *	None if @link is ap->link.  Kernel thread context otherwise.
5191  *
5192  *	RETURNS:
5193  *	0 on success, negative errno on failure.
5194  */
5195 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5196 {
5197 	if (ata_is_host_link(link)) {
5198 		int rc;
5199 
5200 		if (sata_scr_valid(link)) {
5201 			rc = link->ap->ops->scr_write(link, reg, val);
5202 			if (rc == 0)
5203 				rc = link->ap->ops->scr_read(link, reg, &val);
5204 			return rc;
5205 		}
5206 		return -EOPNOTSUPP;
5207 	}
5208 
5209 	return sata_pmp_scr_write(link, reg, val);
5210 }
5211 
5212 /**
5213  *	ata_phys_link_online - test whether the given link is online
5214  *	@link: ATA link to test
5215  *
5216  *	Test whether @link is online.  Note that this function returns
5217  *	0 if online status of @link cannot be obtained, so
5218  *	ata_link_online(link) != !ata_link_offline(link).
5219  *
5220  *	LOCKING:
5221  *	None.
5222  *
5223  *	RETURNS:
5224  *	True if the port online status is available and online.
5225  */
5226 bool ata_phys_link_online(struct ata_link *link)
5227 {
5228 	u32 sstatus;
5229 
5230 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5231 	    ata_sstatus_online(sstatus))
5232 		return true;
5233 	return false;
5234 }
5235 
5236 /**
5237  *	ata_phys_link_offline - test whether the given link is offline
5238  *	@link: ATA link to test
5239  *
5240  *	Test whether @link is offline.  Note that this function
5241  *	returns 0 if offline status of @link cannot be obtained, so
5242  *	ata_link_online(link) != !ata_link_offline(link).
5243  *
5244  *	LOCKING:
5245  *	None.
5246  *
5247  *	RETURNS:
5248  *	True if the port offline status is available and offline.
5249  */
5250 bool ata_phys_link_offline(struct ata_link *link)
5251 {
5252 	u32 sstatus;
5253 
5254 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5255 	    !ata_sstatus_online(sstatus))
5256 		return true;
5257 	return false;
5258 }
5259 
5260 /**
5261  *	ata_link_online - test whether the given link is online
5262  *	@link: ATA link to test
5263  *
5264  *	Test whether @link is online.  This is identical to
5265  *	ata_phys_link_online() when there's no slave link.  When
5266  *	there's a slave link, this function should only be called on
5267  *	the master link and will return true if any of M/S links is
5268  *	online.
5269  *
5270  *	LOCKING:
5271  *	None.
5272  *
5273  *	RETURNS:
5274  *	True if the port online status is available and online.
5275  */
5276 bool ata_link_online(struct ata_link *link)
5277 {
5278 	struct ata_link *slave = link->ap->slave_link;
5279 
5280 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5281 
5282 	return ata_phys_link_online(link) ||
5283 		(slave && ata_phys_link_online(slave));
5284 }
5285 
5286 /**
5287  *	ata_link_offline - test whether the given link is offline
5288  *	@link: ATA link to test
5289  *
5290  *	Test whether @link is offline.  This is identical to
5291  *	ata_phys_link_offline() when there's no slave link.  When
5292  *	there's a slave link, this function should only be called on
5293  *	the master link and will return true if both M/S links are
5294  *	offline.
5295  *
5296  *	LOCKING:
5297  *	None.
5298  *
5299  *	RETURNS:
5300  *	True if the port offline status is available and offline.
5301  */
5302 bool ata_link_offline(struct ata_link *link)
5303 {
5304 	struct ata_link *slave = link->ap->slave_link;
5305 
5306 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5307 
5308 	return ata_phys_link_offline(link) &&
5309 		(!slave || ata_phys_link_offline(slave));
5310 }
5311 
5312 #ifdef CONFIG_PM
5313 static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5314 			       unsigned int action, unsigned int ehi_flags,
5315 			       int *async)
5316 {
5317 	struct ata_link *link;
5318 	unsigned long flags;
5319 	int rc = 0;
5320 
5321 	/* Previous resume operation might still be in
5322 	 * progress.  Wait for PM_PENDING to clear.
5323 	 */
5324 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5325 		if (async) {
5326 			*async = -EAGAIN;
5327 			return 0;
5328 		}
5329 		ata_port_wait_eh(ap);
5330 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5331 	}
5332 
5333 	/* request PM ops to EH */
5334 	spin_lock_irqsave(ap->lock, flags);
5335 
5336 	ap->pm_mesg = mesg;
5337 	if (async)
5338 		ap->pm_result = async;
5339 	else
5340 		ap->pm_result = &rc;
5341 
5342 	ap->pflags |= ATA_PFLAG_PM_PENDING;
5343 	ata_for_each_link(link, ap, HOST_FIRST) {
5344 		link->eh_info.action |= action;
5345 		link->eh_info.flags |= ehi_flags;
5346 	}
5347 
5348 	ata_port_schedule_eh(ap);
5349 
5350 	spin_unlock_irqrestore(ap->lock, flags);
5351 
5352 	/* wait and check result */
5353 	if (!async) {
5354 		ata_port_wait_eh(ap);
5355 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5356 	}
5357 
5358 	return rc;
5359 }
5360 
5361 static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
5362 {
5363 	/*
5364 	 * On some hardware, device fails to respond after spun down
5365 	 * for suspend.  As the device won't be used before being
5366 	 * resumed, we don't need to touch the device.  Ask EH to skip
5367 	 * the usual stuff and proceed directly to suspend.
5368 	 *
5369 	 * http://thread.gmane.org/gmane.linux.ide/46764
5370 	 */
5371 	unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
5372 				 ATA_EHI_NO_RECOVERY;
5373 	return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
5374 }
5375 
5376 static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5377 {
5378 	struct ata_port *ap = to_ata_port(dev);
5379 
5380 	return __ata_port_suspend_common(ap, mesg, NULL);
5381 }
5382 
5383 static int ata_port_suspend(struct device *dev)
5384 {
5385 	if (pm_runtime_suspended(dev))
5386 		return 0;
5387 
5388 	return ata_port_suspend_common(dev, PMSG_SUSPEND);
5389 }
5390 
5391 static int ata_port_do_freeze(struct device *dev)
5392 {
5393 	if (pm_runtime_suspended(dev))
5394 		return 0;
5395 
5396 	return ata_port_suspend_common(dev, PMSG_FREEZE);
5397 }
5398 
5399 static int ata_port_poweroff(struct device *dev)
5400 {
5401 	return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5402 }
5403 
5404 static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
5405 				    int *async)
5406 {
5407 	int rc;
5408 
5409 	rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
5410 		ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
5411 	return rc;
5412 }
5413 
5414 static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
5415 {
5416 	struct ata_port *ap = to_ata_port(dev);
5417 
5418 	return __ata_port_resume_common(ap, mesg, NULL);
5419 }
5420 
5421 static int ata_port_resume(struct device *dev)
5422 {
5423 	int rc;
5424 
5425 	rc = ata_port_resume_common(dev, PMSG_RESUME);
5426 	if (!rc) {
5427 		pm_runtime_disable(dev);
5428 		pm_runtime_set_active(dev);
5429 		pm_runtime_enable(dev);
5430 	}
5431 
5432 	return rc;
5433 }
5434 
5435 /*
5436  * For ODDs, the upper layer will poll for media change every few seconds,
5437  * which will make it enter and leave suspend state every few seconds. And
5438  * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5439  * is very little and the ODD may malfunction after constantly being reset.
5440  * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5441  * ODD is attached to the port.
5442  */
5443 static int ata_port_runtime_idle(struct device *dev)
5444 {
5445 	struct ata_port *ap = to_ata_port(dev);
5446 	struct ata_link *link;
5447 	struct ata_device *adev;
5448 
5449 	ata_for_each_link(link, ap, HOST_FIRST) {
5450 		ata_for_each_dev(adev, link, ENABLED)
5451 			if (adev->class == ATA_DEV_ATAPI &&
5452 			    !zpodd_dev_enabled(adev))
5453 				return -EBUSY;
5454 	}
5455 
5456 	return 0;
5457 }
5458 
5459 static int ata_port_runtime_suspend(struct device *dev)
5460 {
5461 	return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
5462 }
5463 
5464 static int ata_port_runtime_resume(struct device *dev)
5465 {
5466 	return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
5467 }
5468 
5469 static const struct dev_pm_ops ata_port_pm_ops = {
5470 	.suspend = ata_port_suspend,
5471 	.resume = ata_port_resume,
5472 	.freeze = ata_port_do_freeze,
5473 	.thaw = ata_port_resume,
5474 	.poweroff = ata_port_poweroff,
5475 	.restore = ata_port_resume,
5476 
5477 	.runtime_suspend = ata_port_runtime_suspend,
5478 	.runtime_resume = ata_port_runtime_resume,
5479 	.runtime_idle = ata_port_runtime_idle,
5480 };
5481 
5482 /* sas ports don't participate in pm runtime management of ata_ports,
5483  * and need to resume ata devices at the domain level, not the per-port
5484  * level. sas suspend/resume is async to allow parallel port recovery
5485  * since sas has multiple ata_port instances per Scsi_Host.
5486  */
5487 int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
5488 {
5489 	return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
5490 }
5491 EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
5492 
5493 int ata_sas_port_async_resume(struct ata_port *ap, int *async)
5494 {
5495 	return __ata_port_resume_common(ap, PMSG_RESUME, async);
5496 }
5497 EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
5498 
5499 
5500 /**
5501  *	ata_host_suspend - suspend host
5502  *	@host: host to suspend
5503  *	@mesg: PM message
5504  *
5505  *	Suspend @host.  Actual operation is performed by port suspend.
5506  */
5507 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5508 {
5509 	host->dev->power.power_state = mesg;
5510 	return 0;
5511 }
5512 
5513 /**
5514  *	ata_host_resume - resume host
5515  *	@host: host to resume
5516  *
5517  *	Resume @host.  Actual operation is performed by port resume.
5518  */
5519 void ata_host_resume(struct ata_host *host)
5520 {
5521 	host->dev->power.power_state = PMSG_ON;
5522 }
5523 #endif
5524 
5525 struct device_type ata_port_type = {
5526 	.name = "ata_port",
5527 #ifdef CONFIG_PM
5528 	.pm = &ata_port_pm_ops,
5529 #endif
5530 };
5531 
5532 /**
5533  *	ata_dev_init - Initialize an ata_device structure
5534  *	@dev: Device structure to initialize
5535  *
5536  *	Initialize @dev in preparation for probing.
5537  *
5538  *	LOCKING:
5539  *	Inherited from caller.
5540  */
5541 void ata_dev_init(struct ata_device *dev)
5542 {
5543 	struct ata_link *link = ata_dev_phys_link(dev);
5544 	struct ata_port *ap = link->ap;
5545 	unsigned long flags;
5546 
5547 	/* SATA spd limit is bound to the attached device, reset together */
5548 	link->sata_spd_limit = link->hw_sata_spd_limit;
5549 	link->sata_spd = 0;
5550 
5551 	/* High bits of dev->flags are used to record warm plug
5552 	 * requests which occur asynchronously.  Synchronize using
5553 	 * host lock.
5554 	 */
5555 	spin_lock_irqsave(ap->lock, flags);
5556 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5557 	dev->horkage = 0;
5558 	spin_unlock_irqrestore(ap->lock, flags);
5559 
5560 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5561 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5562 	dev->pio_mask = UINT_MAX;
5563 	dev->mwdma_mask = UINT_MAX;
5564 	dev->udma_mask = UINT_MAX;
5565 }
5566 
5567 /**
5568  *	ata_link_init - Initialize an ata_link structure
5569  *	@ap: ATA port link is attached to
5570  *	@link: Link structure to initialize
5571  *	@pmp: Port multiplier port number
5572  *
5573  *	Initialize @link.
5574  *
5575  *	LOCKING:
5576  *	Kernel thread context (may sleep)
5577  */
5578 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5579 {
5580 	int i;
5581 
5582 	/* clear everything except for devices */
5583 	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5584 	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5585 
5586 	link->ap = ap;
5587 	link->pmp = pmp;
5588 	link->active_tag = ATA_TAG_POISON;
5589 	link->hw_sata_spd_limit = UINT_MAX;
5590 
5591 	/* can't use iterator, ap isn't initialized yet */
5592 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5593 		struct ata_device *dev = &link->device[i];
5594 
5595 		dev->link = link;
5596 		dev->devno = dev - link->device;
5597 #ifdef CONFIG_ATA_ACPI
5598 		dev->gtf_filter = ata_acpi_gtf_filter;
5599 #endif
5600 		ata_dev_init(dev);
5601 	}
5602 }
5603 
5604 /**
5605  *	sata_link_init_spd - Initialize link->sata_spd_limit
5606  *	@link: Link to configure sata_spd_limit for
5607  *
5608  *	Initialize @link->[hw_]sata_spd_limit to the currently
5609  *	configured value.
5610  *
5611  *	LOCKING:
5612  *	Kernel thread context (may sleep).
5613  *
5614  *	RETURNS:
5615  *	0 on success, -errno on failure.
5616  */
5617 int sata_link_init_spd(struct ata_link *link)
5618 {
5619 	u8 spd;
5620 	int rc;
5621 
5622 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5623 	if (rc)
5624 		return rc;
5625 
5626 	spd = (link->saved_scontrol >> 4) & 0xf;
5627 	if (spd)
5628 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5629 
5630 	ata_force_link_limits(link);
5631 
5632 	link->sata_spd_limit = link->hw_sata_spd_limit;
5633 
5634 	return 0;
5635 }
5636 
5637 /**
5638  *	ata_port_alloc - allocate and initialize basic ATA port resources
5639  *	@host: ATA host this allocated port belongs to
5640  *
5641  *	Allocate and initialize basic ATA port resources.
5642  *
5643  *	RETURNS:
5644  *	Allocate ATA port on success, NULL on failure.
5645  *
5646  *	LOCKING:
5647  *	Inherited from calling layer (may sleep).
5648  */
5649 struct ata_port *ata_port_alloc(struct ata_host *host)
5650 {
5651 	struct ata_port *ap;
5652 
5653 	DPRINTK("ENTER\n");
5654 
5655 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5656 	if (!ap)
5657 		return NULL;
5658 
5659 	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5660 	ap->lock = &host->lock;
5661 	ap->print_id = -1;
5662 	ap->local_port_no = -1;
5663 	ap->host = host;
5664 	ap->dev = host->dev;
5665 
5666 #if defined(ATA_VERBOSE_DEBUG)
5667 	/* turn on all debugging levels */
5668 	ap->msg_enable = 0x00FF;
5669 #elif defined(ATA_DEBUG)
5670 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5671 #else
5672 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5673 #endif
5674 
5675 	mutex_init(&ap->scsi_scan_mutex);
5676 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5677 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5678 	INIT_LIST_HEAD(&ap->eh_done_q);
5679 	init_waitqueue_head(&ap->eh_wait_q);
5680 	init_completion(&ap->park_req_pending);
5681 	init_timer_deferrable(&ap->fastdrain_timer);
5682 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5683 	ap->fastdrain_timer.data = (unsigned long)ap;
5684 
5685 	ap->cbl = ATA_CBL_NONE;
5686 
5687 	ata_link_init(ap, &ap->link, 0);
5688 
5689 #ifdef ATA_IRQ_TRAP
5690 	ap->stats.unhandled_irq = 1;
5691 	ap->stats.idle_irq = 1;
5692 #endif
5693 	ata_sff_port_init(ap);
5694 
5695 	return ap;
5696 }
5697 
5698 static void ata_host_release(struct device *gendev, void *res)
5699 {
5700 	struct ata_host *host = dev_get_drvdata(gendev);
5701 	int i;
5702 
5703 	for (i = 0; i < host->n_ports; i++) {
5704 		struct ata_port *ap = host->ports[i];
5705 
5706 		if (!ap)
5707 			continue;
5708 
5709 		if (ap->scsi_host)
5710 			scsi_host_put(ap->scsi_host);
5711 
5712 		kfree(ap->pmp_link);
5713 		kfree(ap->slave_link);
5714 		kfree(ap);
5715 		host->ports[i] = NULL;
5716 	}
5717 
5718 	dev_set_drvdata(gendev, NULL);
5719 }
5720 
5721 /**
5722  *	ata_host_alloc - allocate and init basic ATA host resources
5723  *	@dev: generic device this host is associated with
5724  *	@max_ports: maximum number of ATA ports associated with this host
5725  *
5726  *	Allocate and initialize basic ATA host resources.  LLD calls
5727  *	this function to allocate a host, initializes it fully and
5728  *	attaches it using ata_host_register().
5729  *
5730  *	@max_ports ports are allocated and host->n_ports is
5731  *	initialized to @max_ports.  The caller is allowed to decrease
5732  *	host->n_ports before calling ata_host_register().  The unused
5733  *	ports will be automatically freed on registration.
5734  *
5735  *	RETURNS:
5736  *	Allocate ATA host on success, NULL on failure.
5737  *
5738  *	LOCKING:
5739  *	Inherited from calling layer (may sleep).
5740  */
5741 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5742 {
5743 	struct ata_host *host;
5744 	size_t sz;
5745 	int i;
5746 
5747 	DPRINTK("ENTER\n");
5748 
5749 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5750 		return NULL;
5751 
5752 	/* alloc a container for our list of ATA ports (buses) */
5753 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5754 	/* alloc a container for our list of ATA ports (buses) */
5755 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5756 	if (!host)
5757 		goto err_out;
5758 
5759 	devres_add(dev, host);
5760 	dev_set_drvdata(dev, host);
5761 
5762 	spin_lock_init(&host->lock);
5763 	mutex_init(&host->eh_mutex);
5764 	host->dev = dev;
5765 	host->n_ports = max_ports;
5766 
5767 	/* allocate ports bound to this host */
5768 	for (i = 0; i < max_ports; i++) {
5769 		struct ata_port *ap;
5770 
5771 		ap = ata_port_alloc(host);
5772 		if (!ap)
5773 			goto err_out;
5774 
5775 		ap->port_no = i;
5776 		host->ports[i] = ap;
5777 	}
5778 
5779 	devres_remove_group(dev, NULL);
5780 	return host;
5781 
5782  err_out:
5783 	devres_release_group(dev, NULL);
5784 	return NULL;
5785 }
5786 
5787 /**
5788  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5789  *	@dev: generic device this host is associated with
5790  *	@ppi: array of ATA port_info to initialize host with
5791  *	@n_ports: number of ATA ports attached to this host
5792  *
5793  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5794  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5795  *	last entry will be used for the remaining ports.
5796  *
5797  *	RETURNS:
5798  *	Allocate ATA host on success, NULL on failure.
5799  *
5800  *	LOCKING:
5801  *	Inherited from calling layer (may sleep).
5802  */
5803 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5804 				      const struct ata_port_info * const * ppi,
5805 				      int n_ports)
5806 {
5807 	const struct ata_port_info *pi;
5808 	struct ata_host *host;
5809 	int i, j;
5810 
5811 	host = ata_host_alloc(dev, n_ports);
5812 	if (!host)
5813 		return NULL;
5814 
5815 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5816 		struct ata_port *ap = host->ports[i];
5817 
5818 		if (ppi[j])
5819 			pi = ppi[j++];
5820 
5821 		ap->pio_mask = pi->pio_mask;
5822 		ap->mwdma_mask = pi->mwdma_mask;
5823 		ap->udma_mask = pi->udma_mask;
5824 		ap->flags |= pi->flags;
5825 		ap->link.flags |= pi->link_flags;
5826 		ap->ops = pi->port_ops;
5827 
5828 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5829 			host->ops = pi->port_ops;
5830 	}
5831 
5832 	return host;
5833 }
5834 
5835 /**
5836  *	ata_slave_link_init - initialize slave link
5837  *	@ap: port to initialize slave link for
5838  *
5839  *	Create and initialize slave link for @ap.  This enables slave
5840  *	link handling on the port.
5841  *
5842  *	In libata, a port contains links and a link contains devices.
5843  *	There is single host link but if a PMP is attached to it,
5844  *	there can be multiple fan-out links.  On SATA, there's usually
5845  *	a single device connected to a link but PATA and SATA
5846  *	controllers emulating TF based interface can have two - master
5847  *	and slave.
5848  *
5849  *	However, there are a few controllers which don't fit into this
5850  *	abstraction too well - SATA controllers which emulate TF
5851  *	interface with both master and slave devices but also have
5852  *	separate SCR register sets for each device.  These controllers
5853  *	need separate links for physical link handling
5854  *	(e.g. onlineness, link speed) but should be treated like a
5855  *	traditional M/S controller for everything else (e.g. command
5856  *	issue, softreset).
5857  *
5858  *	slave_link is libata's way of handling this class of
5859  *	controllers without impacting core layer too much.  For
5860  *	anything other than physical link handling, the default host
5861  *	link is used for both master and slave.  For physical link
5862  *	handling, separate @ap->slave_link is used.  All dirty details
5863  *	are implemented inside libata core layer.  From LLD's POV, the
5864  *	only difference is that prereset, hardreset and postreset are
5865  *	called once more for the slave link, so the reset sequence
5866  *	looks like the following.
5867  *
5868  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5869  *	softreset(M) -> postreset(M) -> postreset(S)
5870  *
5871  *	Note that softreset is called only for the master.  Softreset
5872  *	resets both M/S by definition, so SRST on master should handle
5873  *	both (the standard method will work just fine).
5874  *
5875  *	LOCKING:
5876  *	Should be called before host is registered.
5877  *
5878  *	RETURNS:
5879  *	0 on success, -errno on failure.
5880  */
5881 int ata_slave_link_init(struct ata_port *ap)
5882 {
5883 	struct ata_link *link;
5884 
5885 	WARN_ON(ap->slave_link);
5886 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5887 
5888 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5889 	if (!link)
5890 		return -ENOMEM;
5891 
5892 	ata_link_init(ap, link, 1);
5893 	ap->slave_link = link;
5894 	return 0;
5895 }
5896 
5897 static void ata_host_stop(struct device *gendev, void *res)
5898 {
5899 	struct ata_host *host = dev_get_drvdata(gendev);
5900 	int i;
5901 
5902 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5903 
5904 	for (i = 0; i < host->n_ports; i++) {
5905 		struct ata_port *ap = host->ports[i];
5906 
5907 		if (ap->ops->port_stop)
5908 			ap->ops->port_stop(ap);
5909 	}
5910 
5911 	if (host->ops->host_stop)
5912 		host->ops->host_stop(host);
5913 }
5914 
5915 /**
5916  *	ata_finalize_port_ops - finalize ata_port_operations
5917  *	@ops: ata_port_operations to finalize
5918  *
5919  *	An ata_port_operations can inherit from another ops and that
5920  *	ops can again inherit from another.  This can go on as many
5921  *	times as necessary as long as there is no loop in the
5922  *	inheritance chain.
5923  *
5924  *	Ops tables are finalized when the host is started.  NULL or
5925  *	unspecified entries are inherited from the closet ancestor
5926  *	which has the method and the entry is populated with it.
5927  *	After finalization, the ops table directly points to all the
5928  *	methods and ->inherits is no longer necessary and cleared.
5929  *
5930  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5931  *
5932  *	LOCKING:
5933  *	None.
5934  */
5935 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5936 {
5937 	static DEFINE_SPINLOCK(lock);
5938 	const struct ata_port_operations *cur;
5939 	void **begin = (void **)ops;
5940 	void **end = (void **)&ops->inherits;
5941 	void **pp;
5942 
5943 	if (!ops || !ops->inherits)
5944 		return;
5945 
5946 	spin_lock(&lock);
5947 
5948 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5949 		void **inherit = (void **)cur;
5950 
5951 		for (pp = begin; pp < end; pp++, inherit++)
5952 			if (!*pp)
5953 				*pp = *inherit;
5954 	}
5955 
5956 	for (pp = begin; pp < end; pp++)
5957 		if (IS_ERR(*pp))
5958 			*pp = NULL;
5959 
5960 	ops->inherits = NULL;
5961 
5962 	spin_unlock(&lock);
5963 }
5964 
5965 /**
5966  *	ata_host_start - start and freeze ports of an ATA host
5967  *	@host: ATA host to start ports for
5968  *
5969  *	Start and then freeze ports of @host.  Started status is
5970  *	recorded in host->flags, so this function can be called
5971  *	multiple times.  Ports are guaranteed to get started only
5972  *	once.  If host->ops isn't initialized yet, its set to the
5973  *	first non-dummy port ops.
5974  *
5975  *	LOCKING:
5976  *	Inherited from calling layer (may sleep).
5977  *
5978  *	RETURNS:
5979  *	0 if all ports are started successfully, -errno otherwise.
5980  */
5981 int ata_host_start(struct ata_host *host)
5982 {
5983 	int have_stop = 0;
5984 	void *start_dr = NULL;
5985 	int i, rc;
5986 
5987 	if (host->flags & ATA_HOST_STARTED)
5988 		return 0;
5989 
5990 	ata_finalize_port_ops(host->ops);
5991 
5992 	for (i = 0; i < host->n_ports; i++) {
5993 		struct ata_port *ap = host->ports[i];
5994 
5995 		ata_finalize_port_ops(ap->ops);
5996 
5997 		if (!host->ops && !ata_port_is_dummy(ap))
5998 			host->ops = ap->ops;
5999 
6000 		if (ap->ops->port_stop)
6001 			have_stop = 1;
6002 	}
6003 
6004 	if (host->ops->host_stop)
6005 		have_stop = 1;
6006 
6007 	if (have_stop) {
6008 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6009 		if (!start_dr)
6010 			return -ENOMEM;
6011 	}
6012 
6013 	for (i = 0; i < host->n_ports; i++) {
6014 		struct ata_port *ap = host->ports[i];
6015 
6016 		if (ap->ops->port_start) {
6017 			rc = ap->ops->port_start(ap);
6018 			if (rc) {
6019 				if (rc != -ENODEV)
6020 					dev_err(host->dev,
6021 						"failed to start port %d (errno=%d)\n",
6022 						i, rc);
6023 				goto err_out;
6024 			}
6025 		}
6026 		ata_eh_freeze_port(ap);
6027 	}
6028 
6029 	if (start_dr)
6030 		devres_add(host->dev, start_dr);
6031 	host->flags |= ATA_HOST_STARTED;
6032 	return 0;
6033 
6034  err_out:
6035 	while (--i >= 0) {
6036 		struct ata_port *ap = host->ports[i];
6037 
6038 		if (ap->ops->port_stop)
6039 			ap->ops->port_stop(ap);
6040 	}
6041 	devres_free(start_dr);
6042 	return rc;
6043 }
6044 
6045 /**
6046  *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6047  *	@host:	host to initialize
6048  *	@dev:	device host is attached to
6049  *	@ops:	port_ops
6050  *
6051  */
6052 void ata_host_init(struct ata_host *host, struct device *dev,
6053 		   struct ata_port_operations *ops)
6054 {
6055 	spin_lock_init(&host->lock);
6056 	mutex_init(&host->eh_mutex);
6057 	host->dev = dev;
6058 	host->ops = ops;
6059 }
6060 
6061 void __ata_port_probe(struct ata_port *ap)
6062 {
6063 	struct ata_eh_info *ehi = &ap->link.eh_info;
6064 	unsigned long flags;
6065 
6066 	/* kick EH for boot probing */
6067 	spin_lock_irqsave(ap->lock, flags);
6068 
6069 	ehi->probe_mask |= ATA_ALL_DEVICES;
6070 	ehi->action |= ATA_EH_RESET;
6071 	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6072 
6073 	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6074 	ap->pflags |= ATA_PFLAG_LOADING;
6075 	ata_port_schedule_eh(ap);
6076 
6077 	spin_unlock_irqrestore(ap->lock, flags);
6078 }
6079 
6080 int ata_port_probe(struct ata_port *ap)
6081 {
6082 	int rc = 0;
6083 
6084 	if (ap->ops->error_handler) {
6085 		__ata_port_probe(ap);
6086 		ata_port_wait_eh(ap);
6087 	} else {
6088 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6089 		rc = ata_bus_probe(ap);
6090 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6091 	}
6092 	return rc;
6093 }
6094 
6095 
6096 static void async_port_probe(void *data, async_cookie_t cookie)
6097 {
6098 	struct ata_port *ap = data;
6099 
6100 	/*
6101 	 * If we're not allowed to scan this host in parallel,
6102 	 * we need to wait until all previous scans have completed
6103 	 * before going further.
6104 	 * Jeff Garzik says this is only within a controller, so we
6105 	 * don't need to wait for port 0, only for later ports.
6106 	 */
6107 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6108 		async_synchronize_cookie(cookie);
6109 
6110 	(void)ata_port_probe(ap);
6111 
6112 	/* in order to keep device order, we need to synchronize at this point */
6113 	async_synchronize_cookie(cookie);
6114 
6115 	ata_scsi_scan_host(ap, 1);
6116 }
6117 
6118 /**
6119  *	ata_host_register - register initialized ATA host
6120  *	@host: ATA host to register
6121  *	@sht: template for SCSI host
6122  *
6123  *	Register initialized ATA host.  @host is allocated using
6124  *	ata_host_alloc() and fully initialized by LLD.  This function
6125  *	starts ports, registers @host with ATA and SCSI layers and
6126  *	probe registered devices.
6127  *
6128  *	LOCKING:
6129  *	Inherited from calling layer (may sleep).
6130  *
6131  *	RETURNS:
6132  *	0 on success, -errno otherwise.
6133  */
6134 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6135 {
6136 	int i, rc;
6137 
6138 	/* host must have been started */
6139 	if (!(host->flags & ATA_HOST_STARTED)) {
6140 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6141 		WARN_ON(1);
6142 		return -EINVAL;
6143 	}
6144 
6145 	/* Blow away unused ports.  This happens when LLD can't
6146 	 * determine the exact number of ports to allocate at
6147 	 * allocation time.
6148 	 */
6149 	for (i = host->n_ports; host->ports[i]; i++)
6150 		kfree(host->ports[i]);
6151 
6152 	/* give ports names and add SCSI hosts */
6153 	for (i = 0; i < host->n_ports; i++) {
6154 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6155 		host->ports[i]->local_port_no = i + 1;
6156 	}
6157 
6158 	/* Create associated sysfs transport objects  */
6159 	for (i = 0; i < host->n_ports; i++) {
6160 		rc = ata_tport_add(host->dev,host->ports[i]);
6161 		if (rc) {
6162 			goto err_tadd;
6163 		}
6164 	}
6165 
6166 	rc = ata_scsi_add_hosts(host, sht);
6167 	if (rc)
6168 		goto err_tadd;
6169 
6170 	/* set cable, sata_spd_limit and report */
6171 	for (i = 0; i < host->n_ports; i++) {
6172 		struct ata_port *ap = host->ports[i];
6173 		unsigned long xfer_mask;
6174 
6175 		/* set SATA cable type if still unset */
6176 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6177 			ap->cbl = ATA_CBL_SATA;
6178 
6179 		/* init sata_spd_limit to the current value */
6180 		sata_link_init_spd(&ap->link);
6181 		if (ap->slave_link)
6182 			sata_link_init_spd(ap->slave_link);
6183 
6184 		/* print per-port info to dmesg */
6185 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6186 					      ap->udma_mask);
6187 
6188 		if (!ata_port_is_dummy(ap)) {
6189 			ata_port_info(ap, "%cATA max %s %s\n",
6190 				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6191 				      ata_mode_string(xfer_mask),
6192 				      ap->link.eh_info.desc);
6193 			ata_ehi_clear_desc(&ap->link.eh_info);
6194 		} else
6195 			ata_port_info(ap, "DUMMY\n");
6196 	}
6197 
6198 	/* perform each probe asynchronously */
6199 	for (i = 0; i < host->n_ports; i++) {
6200 		struct ata_port *ap = host->ports[i];
6201 		async_schedule(async_port_probe, ap);
6202 	}
6203 
6204 	return 0;
6205 
6206  err_tadd:
6207 	while (--i >= 0) {
6208 		ata_tport_delete(host->ports[i]);
6209 	}
6210 	return rc;
6211 
6212 }
6213 
6214 /**
6215  *	ata_host_activate - start host, request IRQ and register it
6216  *	@host: target ATA host
6217  *	@irq: IRQ to request
6218  *	@irq_handler: irq_handler used when requesting IRQ
6219  *	@irq_flags: irq_flags used when requesting IRQ
6220  *	@sht: scsi_host_template to use when registering the host
6221  *
6222  *	After allocating an ATA host and initializing it, most libata
6223  *	LLDs perform three steps to activate the host - start host,
6224  *	request IRQ and register it.  This helper takes necessasry
6225  *	arguments and performs the three steps in one go.
6226  *
6227  *	An invalid IRQ skips the IRQ registration and expects the host to
6228  *	have set polling mode on the port. In this case, @irq_handler
6229  *	should be NULL.
6230  *
6231  *	LOCKING:
6232  *	Inherited from calling layer (may sleep).
6233  *
6234  *	RETURNS:
6235  *	0 on success, -errno otherwise.
6236  */
6237 int ata_host_activate(struct ata_host *host, int irq,
6238 		      irq_handler_t irq_handler, unsigned long irq_flags,
6239 		      struct scsi_host_template *sht)
6240 {
6241 	int i, rc;
6242 
6243 	rc = ata_host_start(host);
6244 	if (rc)
6245 		return rc;
6246 
6247 	/* Special case for polling mode */
6248 	if (!irq) {
6249 		WARN_ON(irq_handler);
6250 		return ata_host_register(host, sht);
6251 	}
6252 
6253 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6254 			      dev_driver_string(host->dev), host);
6255 	if (rc)
6256 		return rc;
6257 
6258 	for (i = 0; i < host->n_ports; i++)
6259 		ata_port_desc(host->ports[i], "irq %d", irq);
6260 
6261 	rc = ata_host_register(host, sht);
6262 	/* if failed, just free the IRQ and leave ports alone */
6263 	if (rc)
6264 		devm_free_irq(host->dev, irq, host);
6265 
6266 	return rc;
6267 }
6268 
6269 /**
6270  *	ata_port_detach - Detach ATA port in prepration of device removal
6271  *	@ap: ATA port to be detached
6272  *
6273  *	Detach all ATA devices and the associated SCSI devices of @ap;
6274  *	then, remove the associated SCSI host.  @ap is guaranteed to
6275  *	be quiescent on return from this function.
6276  *
6277  *	LOCKING:
6278  *	Kernel thread context (may sleep).
6279  */
6280 static void ata_port_detach(struct ata_port *ap)
6281 {
6282 	unsigned long flags;
6283 
6284 	if (!ap->ops->error_handler)
6285 		goto skip_eh;
6286 
6287 	/* tell EH we're leaving & flush EH */
6288 	spin_lock_irqsave(ap->lock, flags);
6289 	ap->pflags |= ATA_PFLAG_UNLOADING;
6290 	ata_port_schedule_eh(ap);
6291 	spin_unlock_irqrestore(ap->lock, flags);
6292 
6293 	/* wait till EH commits suicide */
6294 	ata_port_wait_eh(ap);
6295 
6296 	/* it better be dead now */
6297 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6298 
6299 	cancel_delayed_work_sync(&ap->hotplug_task);
6300 
6301  skip_eh:
6302 	if (ap->pmp_link) {
6303 		int i;
6304 		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6305 			ata_tlink_delete(&ap->pmp_link[i]);
6306 	}
6307 	/* remove the associated SCSI host */
6308 	scsi_remove_host(ap->scsi_host);
6309 	ata_tport_delete(ap);
6310 }
6311 
6312 /**
6313  *	ata_host_detach - Detach all ports of an ATA host
6314  *	@host: Host to detach
6315  *
6316  *	Detach all ports of @host.
6317  *
6318  *	LOCKING:
6319  *	Kernel thread context (may sleep).
6320  */
6321 void ata_host_detach(struct ata_host *host)
6322 {
6323 	int i;
6324 
6325 	for (i = 0; i < host->n_ports; i++)
6326 		ata_port_detach(host->ports[i]);
6327 
6328 	/* the host is dead now, dissociate ACPI */
6329 	ata_acpi_dissociate(host);
6330 }
6331 
6332 #ifdef CONFIG_PCI
6333 
6334 /**
6335  *	ata_pci_remove_one - PCI layer callback for device removal
6336  *	@pdev: PCI device that was removed
6337  *
6338  *	PCI layer indicates to libata via this hook that hot-unplug or
6339  *	module unload event has occurred.  Detach all ports.  Resource
6340  *	release is handled via devres.
6341  *
6342  *	LOCKING:
6343  *	Inherited from PCI layer (may sleep).
6344  */
6345 void ata_pci_remove_one(struct pci_dev *pdev)
6346 {
6347 	struct ata_host *host = pci_get_drvdata(pdev);
6348 
6349 	ata_host_detach(host);
6350 }
6351 
6352 /* move to PCI subsystem */
6353 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6354 {
6355 	unsigned long tmp = 0;
6356 
6357 	switch (bits->width) {
6358 	case 1: {
6359 		u8 tmp8 = 0;
6360 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6361 		tmp = tmp8;
6362 		break;
6363 	}
6364 	case 2: {
6365 		u16 tmp16 = 0;
6366 		pci_read_config_word(pdev, bits->reg, &tmp16);
6367 		tmp = tmp16;
6368 		break;
6369 	}
6370 	case 4: {
6371 		u32 tmp32 = 0;
6372 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6373 		tmp = tmp32;
6374 		break;
6375 	}
6376 
6377 	default:
6378 		return -EINVAL;
6379 	}
6380 
6381 	tmp &= bits->mask;
6382 
6383 	return (tmp == bits->val) ? 1 : 0;
6384 }
6385 
6386 #ifdef CONFIG_PM
6387 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6388 {
6389 	pci_save_state(pdev);
6390 	pci_disable_device(pdev);
6391 
6392 	if (mesg.event & PM_EVENT_SLEEP)
6393 		pci_set_power_state(pdev, PCI_D3hot);
6394 }
6395 
6396 int ata_pci_device_do_resume(struct pci_dev *pdev)
6397 {
6398 	int rc;
6399 
6400 	pci_set_power_state(pdev, PCI_D0);
6401 	pci_restore_state(pdev);
6402 
6403 	rc = pcim_enable_device(pdev);
6404 	if (rc) {
6405 		dev_err(&pdev->dev,
6406 			"failed to enable device after resume (%d)\n", rc);
6407 		return rc;
6408 	}
6409 
6410 	pci_set_master(pdev);
6411 	return 0;
6412 }
6413 
6414 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6415 {
6416 	struct ata_host *host = pci_get_drvdata(pdev);
6417 	int rc = 0;
6418 
6419 	rc = ata_host_suspend(host, mesg);
6420 	if (rc)
6421 		return rc;
6422 
6423 	ata_pci_device_do_suspend(pdev, mesg);
6424 
6425 	return 0;
6426 }
6427 
6428 int ata_pci_device_resume(struct pci_dev *pdev)
6429 {
6430 	struct ata_host *host = pci_get_drvdata(pdev);
6431 	int rc;
6432 
6433 	rc = ata_pci_device_do_resume(pdev);
6434 	if (rc == 0)
6435 		ata_host_resume(host);
6436 	return rc;
6437 }
6438 #endif /* CONFIG_PM */
6439 
6440 #endif /* CONFIG_PCI */
6441 
6442 /**
6443  *	ata_platform_remove_one - Platform layer callback for device removal
6444  *	@pdev: Platform device that was removed
6445  *
6446  *	Platform layer indicates to libata via this hook that hot-unplug or
6447  *	module unload event has occurred.  Detach all ports.  Resource
6448  *	release is handled via devres.
6449  *
6450  *	LOCKING:
6451  *	Inherited from platform layer (may sleep).
6452  */
6453 int ata_platform_remove_one(struct platform_device *pdev)
6454 {
6455 	struct ata_host *host = platform_get_drvdata(pdev);
6456 
6457 	ata_host_detach(host);
6458 
6459 	return 0;
6460 }
6461 
6462 static int __init ata_parse_force_one(char **cur,
6463 				      struct ata_force_ent *force_ent,
6464 				      const char **reason)
6465 {
6466 	/* FIXME: Currently, there's no way to tag init const data and
6467 	 * using __initdata causes build failure on some versions of
6468 	 * gcc.  Once __initdataconst is implemented, add const to the
6469 	 * following structure.
6470 	 */
6471 	static struct ata_force_param force_tbl[] __initdata = {
6472 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6473 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6474 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6475 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6476 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6477 		{ "sata",	.cbl		= ATA_CBL_SATA },
6478 		{ "1.5Gbps",	.spd_limit	= 1 },
6479 		{ "3.0Gbps",	.spd_limit	= 2 },
6480 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6481 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6482 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6483 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6484 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6485 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6486 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6487 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6488 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6489 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6490 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6491 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6492 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6493 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6494 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6495 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6496 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6497 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6498 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6499 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6500 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6501 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6502 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6503 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6504 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6505 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6506 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6507 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6508 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6509 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6510 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6511 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6512 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6513 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6514 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6515 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6516 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6517 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6518 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6519 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6520 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6521 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6522 	};
6523 	char *start = *cur, *p = *cur;
6524 	char *id, *val, *endp;
6525 	const struct ata_force_param *match_fp = NULL;
6526 	int nr_matches = 0, i;
6527 
6528 	/* find where this param ends and update *cur */
6529 	while (*p != '\0' && *p != ',')
6530 		p++;
6531 
6532 	if (*p == '\0')
6533 		*cur = p;
6534 	else
6535 		*cur = p + 1;
6536 
6537 	*p = '\0';
6538 
6539 	/* parse */
6540 	p = strchr(start, ':');
6541 	if (!p) {
6542 		val = strstrip(start);
6543 		goto parse_val;
6544 	}
6545 	*p = '\0';
6546 
6547 	id = strstrip(start);
6548 	val = strstrip(p + 1);
6549 
6550 	/* parse id */
6551 	p = strchr(id, '.');
6552 	if (p) {
6553 		*p++ = '\0';
6554 		force_ent->device = simple_strtoul(p, &endp, 10);
6555 		if (p == endp || *endp != '\0') {
6556 			*reason = "invalid device";
6557 			return -EINVAL;
6558 		}
6559 	}
6560 
6561 	force_ent->port = simple_strtoul(id, &endp, 10);
6562 	if (p == endp || *endp != '\0') {
6563 		*reason = "invalid port/link";
6564 		return -EINVAL;
6565 	}
6566 
6567  parse_val:
6568 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6569 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6570 		const struct ata_force_param *fp = &force_tbl[i];
6571 
6572 		if (strncasecmp(val, fp->name, strlen(val)))
6573 			continue;
6574 
6575 		nr_matches++;
6576 		match_fp = fp;
6577 
6578 		if (strcasecmp(val, fp->name) == 0) {
6579 			nr_matches = 1;
6580 			break;
6581 		}
6582 	}
6583 
6584 	if (!nr_matches) {
6585 		*reason = "unknown value";
6586 		return -EINVAL;
6587 	}
6588 	if (nr_matches > 1) {
6589 		*reason = "ambigious value";
6590 		return -EINVAL;
6591 	}
6592 
6593 	force_ent->param = *match_fp;
6594 
6595 	return 0;
6596 }
6597 
6598 static void __init ata_parse_force_param(void)
6599 {
6600 	int idx = 0, size = 1;
6601 	int last_port = -1, last_device = -1;
6602 	char *p, *cur, *next;
6603 
6604 	/* calculate maximum number of params and allocate force_tbl */
6605 	for (p = ata_force_param_buf; *p; p++)
6606 		if (*p == ',')
6607 			size++;
6608 
6609 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6610 	if (!ata_force_tbl) {
6611 		printk(KERN_WARNING "ata: failed to extend force table, "
6612 		       "libata.force ignored\n");
6613 		return;
6614 	}
6615 
6616 	/* parse and populate the table */
6617 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6618 		const char *reason = "";
6619 		struct ata_force_ent te = { .port = -1, .device = -1 };
6620 
6621 		next = cur;
6622 		if (ata_parse_force_one(&next, &te, &reason)) {
6623 			printk(KERN_WARNING "ata: failed to parse force "
6624 			       "parameter \"%s\" (%s)\n",
6625 			       cur, reason);
6626 			continue;
6627 		}
6628 
6629 		if (te.port == -1) {
6630 			te.port = last_port;
6631 			te.device = last_device;
6632 		}
6633 
6634 		ata_force_tbl[idx++] = te;
6635 
6636 		last_port = te.port;
6637 		last_device = te.device;
6638 	}
6639 
6640 	ata_force_tbl_size = idx;
6641 }
6642 
6643 static int __init ata_init(void)
6644 {
6645 	int rc;
6646 
6647 	ata_parse_force_param();
6648 
6649 	rc = ata_sff_init();
6650 	if (rc) {
6651 		kfree(ata_force_tbl);
6652 		return rc;
6653 	}
6654 
6655 	libata_transport_init();
6656 	ata_scsi_transport_template = ata_attach_transport();
6657 	if (!ata_scsi_transport_template) {
6658 		ata_sff_exit();
6659 		rc = -ENOMEM;
6660 		goto err_out;
6661 	}
6662 
6663 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6664 	return 0;
6665 
6666 err_out:
6667 	return rc;
6668 }
6669 
6670 static void __exit ata_exit(void)
6671 {
6672 	ata_release_transport(ata_scsi_transport_template);
6673 	libata_transport_exit();
6674 	ata_sff_exit();
6675 	kfree(ata_force_tbl);
6676 }
6677 
6678 subsys_initcall(ata_init);
6679 module_exit(ata_exit);
6680 
6681 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6682 
6683 int ata_ratelimit(void)
6684 {
6685 	return __ratelimit(&ratelimit);
6686 }
6687 
6688 /**
6689  *	ata_msleep - ATA EH owner aware msleep
6690  *	@ap: ATA port to attribute the sleep to
6691  *	@msecs: duration to sleep in milliseconds
6692  *
6693  *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6694  *	ownership is released before going to sleep and reacquired
6695  *	after the sleep is complete.  IOW, other ports sharing the
6696  *	@ap->host will be allowed to own the EH while this task is
6697  *	sleeping.
6698  *
6699  *	LOCKING:
6700  *	Might sleep.
6701  */
6702 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6703 {
6704 	bool owns_eh = ap && ap->host->eh_owner == current;
6705 
6706 	if (owns_eh)
6707 		ata_eh_release(ap);
6708 
6709 	msleep(msecs);
6710 
6711 	if (owns_eh)
6712 		ata_eh_acquire(ap);
6713 }
6714 
6715 /**
6716  *	ata_wait_register - wait until register value changes
6717  *	@ap: ATA port to wait register for, can be NULL
6718  *	@reg: IO-mapped register
6719  *	@mask: Mask to apply to read register value
6720  *	@val: Wait condition
6721  *	@interval: polling interval in milliseconds
6722  *	@timeout: timeout in milliseconds
6723  *
6724  *	Waiting for some bits of register to change is a common
6725  *	operation for ATA controllers.  This function reads 32bit LE
6726  *	IO-mapped register @reg and tests for the following condition.
6727  *
6728  *	(*@reg & mask) != val
6729  *
6730  *	If the condition is met, it returns; otherwise, the process is
6731  *	repeated after @interval_msec until timeout.
6732  *
6733  *	LOCKING:
6734  *	Kernel thread context (may sleep)
6735  *
6736  *	RETURNS:
6737  *	The final register value.
6738  */
6739 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6740 		      unsigned long interval, unsigned long timeout)
6741 {
6742 	unsigned long deadline;
6743 	u32 tmp;
6744 
6745 	tmp = ioread32(reg);
6746 
6747 	/* Calculate timeout _after_ the first read to make sure
6748 	 * preceding writes reach the controller before starting to
6749 	 * eat away the timeout.
6750 	 */
6751 	deadline = ata_deadline(jiffies, timeout);
6752 
6753 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6754 		ata_msleep(ap, interval);
6755 		tmp = ioread32(reg);
6756 	}
6757 
6758 	return tmp;
6759 }
6760 
6761 /*
6762  * Dummy port_ops
6763  */
6764 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6765 {
6766 	return AC_ERR_SYSTEM;
6767 }
6768 
6769 static void ata_dummy_error_handler(struct ata_port *ap)
6770 {
6771 	/* truly dummy */
6772 }
6773 
6774 struct ata_port_operations ata_dummy_port_ops = {
6775 	.qc_prep		= ata_noop_qc_prep,
6776 	.qc_issue		= ata_dummy_qc_issue,
6777 	.error_handler		= ata_dummy_error_handler,
6778 	.sched_eh		= ata_std_sched_eh,
6779 	.end_eh			= ata_std_end_eh,
6780 };
6781 
6782 const struct ata_port_info ata_dummy_port_info = {
6783 	.port_ops		= &ata_dummy_port_ops,
6784 };
6785 
6786 /*
6787  * Utility print functions
6788  */
6789 int ata_port_printk(const struct ata_port *ap, const char *level,
6790 		    const char *fmt, ...)
6791 {
6792 	struct va_format vaf;
6793 	va_list args;
6794 	int r;
6795 
6796 	va_start(args, fmt);
6797 
6798 	vaf.fmt = fmt;
6799 	vaf.va = &args;
6800 
6801 	r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6802 
6803 	va_end(args);
6804 
6805 	return r;
6806 }
6807 EXPORT_SYMBOL(ata_port_printk);
6808 
6809 int ata_link_printk(const struct ata_link *link, const char *level,
6810 		    const char *fmt, ...)
6811 {
6812 	struct va_format vaf;
6813 	va_list args;
6814 	int r;
6815 
6816 	va_start(args, fmt);
6817 
6818 	vaf.fmt = fmt;
6819 	vaf.va = &args;
6820 
6821 	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6822 		r = printk("%sata%u.%02u: %pV",
6823 			   level, link->ap->print_id, link->pmp, &vaf);
6824 	else
6825 		r = printk("%sata%u: %pV",
6826 			   level, link->ap->print_id, &vaf);
6827 
6828 	va_end(args);
6829 
6830 	return r;
6831 }
6832 EXPORT_SYMBOL(ata_link_printk);
6833 
6834 int ata_dev_printk(const struct ata_device *dev, const char *level,
6835 		    const char *fmt, ...)
6836 {
6837 	struct va_format vaf;
6838 	va_list args;
6839 	int r;
6840 
6841 	va_start(args, fmt);
6842 
6843 	vaf.fmt = fmt;
6844 	vaf.va = &args;
6845 
6846 	r = printk("%sata%u.%02u: %pV",
6847 		   level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6848 		   &vaf);
6849 
6850 	va_end(args);
6851 
6852 	return r;
6853 }
6854 EXPORT_SYMBOL(ata_dev_printk);
6855 
6856 void ata_print_version(const struct device *dev, const char *version)
6857 {
6858 	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6859 }
6860 EXPORT_SYMBOL(ata_print_version);
6861 
6862 /*
6863  * libata is essentially a library of internal helper functions for
6864  * low-level ATA host controller drivers.  As such, the API/ABI is
6865  * likely to change as new drivers are added and updated.
6866  * Do not depend on ABI/API stability.
6867  */
6868 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6869 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6870 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6871 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6872 EXPORT_SYMBOL_GPL(sata_port_ops);
6873 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6874 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6875 EXPORT_SYMBOL_GPL(ata_link_next);
6876 EXPORT_SYMBOL_GPL(ata_dev_next);
6877 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6878 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6879 EXPORT_SYMBOL_GPL(ata_host_init);
6880 EXPORT_SYMBOL_GPL(ata_host_alloc);
6881 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6882 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6883 EXPORT_SYMBOL_GPL(ata_host_start);
6884 EXPORT_SYMBOL_GPL(ata_host_register);
6885 EXPORT_SYMBOL_GPL(ata_host_activate);
6886 EXPORT_SYMBOL_GPL(ata_host_detach);
6887 EXPORT_SYMBOL_GPL(ata_sg_init);
6888 EXPORT_SYMBOL_GPL(ata_qc_complete);
6889 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6890 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6891 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6892 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6893 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6894 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6895 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6896 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6897 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6898 EXPORT_SYMBOL_GPL(ata_mode_string);
6899 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6900 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6901 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6902 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6903 EXPORT_SYMBOL_GPL(ata_dev_disable);
6904 EXPORT_SYMBOL_GPL(sata_set_spd);
6905 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6906 EXPORT_SYMBOL_GPL(sata_link_debounce);
6907 EXPORT_SYMBOL_GPL(sata_link_resume);
6908 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6909 EXPORT_SYMBOL_GPL(ata_std_prereset);
6910 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6911 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6912 EXPORT_SYMBOL_GPL(ata_std_postreset);
6913 EXPORT_SYMBOL_GPL(ata_dev_classify);
6914 EXPORT_SYMBOL_GPL(ata_dev_pair);
6915 EXPORT_SYMBOL_GPL(ata_ratelimit);
6916 EXPORT_SYMBOL_GPL(ata_msleep);
6917 EXPORT_SYMBOL_GPL(ata_wait_register);
6918 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6919 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6920 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6921 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6922 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6923 EXPORT_SYMBOL_GPL(sata_scr_valid);
6924 EXPORT_SYMBOL_GPL(sata_scr_read);
6925 EXPORT_SYMBOL_GPL(sata_scr_write);
6926 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6927 EXPORT_SYMBOL_GPL(ata_link_online);
6928 EXPORT_SYMBOL_GPL(ata_link_offline);
6929 #ifdef CONFIG_PM
6930 EXPORT_SYMBOL_GPL(ata_host_suspend);
6931 EXPORT_SYMBOL_GPL(ata_host_resume);
6932 #endif /* CONFIG_PM */
6933 EXPORT_SYMBOL_GPL(ata_id_string);
6934 EXPORT_SYMBOL_GPL(ata_id_c_string);
6935 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6936 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6937 
6938 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6939 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6940 EXPORT_SYMBOL_GPL(ata_timing_compute);
6941 EXPORT_SYMBOL_GPL(ata_timing_merge);
6942 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6943 
6944 #ifdef CONFIG_PCI
6945 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6946 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6947 #ifdef CONFIG_PM
6948 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6949 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6950 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6951 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6952 #endif /* CONFIG_PM */
6953 #endif /* CONFIG_PCI */
6954 
6955 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6956 
6957 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6958 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6959 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6960 EXPORT_SYMBOL_GPL(ata_port_desc);
6961 #ifdef CONFIG_PCI
6962 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6963 #endif /* CONFIG_PCI */
6964 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6965 EXPORT_SYMBOL_GPL(ata_link_abort);
6966 EXPORT_SYMBOL_GPL(ata_port_abort);
6967 EXPORT_SYMBOL_GPL(ata_port_freeze);
6968 EXPORT_SYMBOL_GPL(sata_async_notification);
6969 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6970 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6971 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6972 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6973 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6974 EXPORT_SYMBOL_GPL(ata_do_eh);
6975 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6976 
6977 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6978 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6979 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6980 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6981 EXPORT_SYMBOL_GPL(ata_cable_sata);
6982