xref: /openbmc/linux/drivers/ata/libata-core.c (revision 0c7beb2d)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/driver-api/libata.rst
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/time.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <linux/async.h>
61 #include <linux/log2.h>
62 #include <linux/slab.h>
63 #include <linux/glob.h>
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_host.h>
67 #include <linux/libata.h>
68 #include <asm/byteorder.h>
69 #include <asm/unaligned.h>
70 #include <linux/cdrom.h>
71 #include <linux/ratelimit.h>
72 #include <linux/leds.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/platform_device.h>
75 
76 #define CREATE_TRACE_POINTS
77 #include <trace/events/libata.h>
78 
79 #include "libata.h"
80 #include "libata-transport.h"
81 
82 /* debounce timing parameters in msecs { interval, duration, timeout } */
83 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
84 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
85 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
86 
87 const struct ata_port_operations ata_base_port_ops = {
88 	.prereset		= ata_std_prereset,
89 	.postreset		= ata_std_postreset,
90 	.error_handler		= ata_std_error_handler,
91 	.sched_eh		= ata_std_sched_eh,
92 	.end_eh			= ata_std_end_eh,
93 };
94 
95 const struct ata_port_operations sata_port_ops = {
96 	.inherits		= &ata_base_port_ops,
97 
98 	.qc_defer		= ata_std_qc_defer,
99 	.hardreset		= sata_std_hardreset,
100 };
101 
102 static unsigned int ata_dev_init_params(struct ata_device *dev,
103 					u16 heads, u16 sectors);
104 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
105 static void ata_dev_xfermask(struct ata_device *dev);
106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
107 
108 atomic_t ata_print_id = ATOMIC_INIT(0);
109 
110 struct ata_force_param {
111 	const char	*name;
112 	unsigned int	cbl;
113 	int		spd_limit;
114 	unsigned long	xfer_mask;
115 	unsigned int	horkage_on;
116 	unsigned int	horkage_off;
117 	unsigned int	lflags;
118 };
119 
120 struct ata_force_ent {
121 	int			port;
122 	int			device;
123 	struct ata_force_param	param;
124 };
125 
126 static struct ata_force_ent *ata_force_tbl;
127 static int ata_force_tbl_size;
128 
129 static char ata_force_param_buf[PAGE_SIZE] __initdata;
130 /* param_buf is thrown away after initialization, disallow read */
131 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
132 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
133 
134 static int atapi_enabled = 1;
135 module_param(atapi_enabled, int, 0444);
136 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
137 
138 static int atapi_dmadir = 0;
139 module_param(atapi_dmadir, int, 0444);
140 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
141 
142 int atapi_passthru16 = 1;
143 module_param(atapi_passthru16, int, 0444);
144 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
145 
146 int libata_fua = 0;
147 module_param_named(fua, libata_fua, int, 0444);
148 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
149 
150 static int ata_ignore_hpa;
151 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153 
154 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155 module_param_named(dma, libata_dma_mask, int, 0444);
156 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157 
158 static int ata_probe_timeout;
159 module_param(ata_probe_timeout, int, 0444);
160 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161 
162 int libata_noacpi = 0;
163 module_param_named(noacpi, libata_noacpi, int, 0444);
164 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
165 
166 int libata_allow_tpm = 0;
167 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
169 
170 static int atapi_an;
171 module_param(atapi_an, int, 0444);
172 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
173 
174 MODULE_AUTHOR("Jeff Garzik");
175 MODULE_DESCRIPTION("Library module for ATA devices");
176 MODULE_LICENSE("GPL");
177 MODULE_VERSION(DRV_VERSION);
178 
179 
180 static bool ata_sstatus_online(u32 sstatus)
181 {
182 	return (sstatus & 0xf) == 0x3;
183 }
184 
185 /**
186  *	ata_link_next - link iteration helper
187  *	@link: the previous link, NULL to start
188  *	@ap: ATA port containing links to iterate
189  *	@mode: iteration mode, one of ATA_LITER_*
190  *
191  *	LOCKING:
192  *	Host lock or EH context.
193  *
194  *	RETURNS:
195  *	Pointer to the next link.
196  */
197 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
198 			       enum ata_link_iter_mode mode)
199 {
200 	BUG_ON(mode != ATA_LITER_EDGE &&
201 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
202 
203 	/* NULL link indicates start of iteration */
204 	if (!link)
205 		switch (mode) {
206 		case ATA_LITER_EDGE:
207 		case ATA_LITER_PMP_FIRST:
208 			if (sata_pmp_attached(ap))
209 				return ap->pmp_link;
210 			/* fall through */
211 		case ATA_LITER_HOST_FIRST:
212 			return &ap->link;
213 		}
214 
215 	/* we just iterated over the host link, what's next? */
216 	if (link == &ap->link)
217 		switch (mode) {
218 		case ATA_LITER_HOST_FIRST:
219 			if (sata_pmp_attached(ap))
220 				return ap->pmp_link;
221 			/* fall through */
222 		case ATA_LITER_PMP_FIRST:
223 			if (unlikely(ap->slave_link))
224 				return ap->slave_link;
225 			/* fall through */
226 		case ATA_LITER_EDGE:
227 			return NULL;
228 		}
229 
230 	/* slave_link excludes PMP */
231 	if (unlikely(link == ap->slave_link))
232 		return NULL;
233 
234 	/* we were over a PMP link */
235 	if (++link < ap->pmp_link + ap->nr_pmp_links)
236 		return link;
237 
238 	if (mode == ATA_LITER_PMP_FIRST)
239 		return &ap->link;
240 
241 	return NULL;
242 }
243 
244 /**
245  *	ata_dev_next - device iteration helper
246  *	@dev: the previous device, NULL to start
247  *	@link: ATA link containing devices to iterate
248  *	@mode: iteration mode, one of ATA_DITER_*
249  *
250  *	LOCKING:
251  *	Host lock or EH context.
252  *
253  *	RETURNS:
254  *	Pointer to the next device.
255  */
256 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
257 				enum ata_dev_iter_mode mode)
258 {
259 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
260 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
261 
262 	/* NULL dev indicates start of iteration */
263 	if (!dev)
264 		switch (mode) {
265 		case ATA_DITER_ENABLED:
266 		case ATA_DITER_ALL:
267 			dev = link->device;
268 			goto check;
269 		case ATA_DITER_ENABLED_REVERSE:
270 		case ATA_DITER_ALL_REVERSE:
271 			dev = link->device + ata_link_max_devices(link) - 1;
272 			goto check;
273 		}
274 
275  next:
276 	/* move to the next one */
277 	switch (mode) {
278 	case ATA_DITER_ENABLED:
279 	case ATA_DITER_ALL:
280 		if (++dev < link->device + ata_link_max_devices(link))
281 			goto check;
282 		return NULL;
283 	case ATA_DITER_ENABLED_REVERSE:
284 	case ATA_DITER_ALL_REVERSE:
285 		if (--dev >= link->device)
286 			goto check;
287 		return NULL;
288 	}
289 
290  check:
291 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
292 	    !ata_dev_enabled(dev))
293 		goto next;
294 	return dev;
295 }
296 
297 /**
298  *	ata_dev_phys_link - find physical link for a device
299  *	@dev: ATA device to look up physical link for
300  *
301  *	Look up physical link which @dev is attached to.  Note that
302  *	this is different from @dev->link only when @dev is on slave
303  *	link.  For all other cases, it's the same as @dev->link.
304  *
305  *	LOCKING:
306  *	Don't care.
307  *
308  *	RETURNS:
309  *	Pointer to the found physical link.
310  */
311 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
312 {
313 	struct ata_port *ap = dev->link->ap;
314 
315 	if (!ap->slave_link)
316 		return dev->link;
317 	if (!dev->devno)
318 		return &ap->link;
319 	return ap->slave_link;
320 }
321 
322 /**
323  *	ata_force_cbl - force cable type according to libata.force
324  *	@ap: ATA port of interest
325  *
326  *	Force cable type according to libata.force and whine about it.
327  *	The last entry which has matching port number is used, so it
328  *	can be specified as part of device force parameters.  For
329  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
330  *	same effect.
331  *
332  *	LOCKING:
333  *	EH context.
334  */
335 void ata_force_cbl(struct ata_port *ap)
336 {
337 	int i;
338 
339 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
340 		const struct ata_force_ent *fe = &ata_force_tbl[i];
341 
342 		if (fe->port != -1 && fe->port != ap->print_id)
343 			continue;
344 
345 		if (fe->param.cbl == ATA_CBL_NONE)
346 			continue;
347 
348 		ap->cbl = fe->param.cbl;
349 		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
350 		return;
351 	}
352 }
353 
354 /**
355  *	ata_force_link_limits - force link limits according to libata.force
356  *	@link: ATA link of interest
357  *
358  *	Force link flags and SATA spd limit according to libata.force
359  *	and whine about it.  When only the port part is specified
360  *	(e.g. 1:), the limit applies to all links connected to both
361  *	the host link and all fan-out ports connected via PMP.  If the
362  *	device part is specified as 0 (e.g. 1.00:), it specifies the
363  *	first fan-out link not the host link.  Device number 15 always
364  *	points to the host link whether PMP is attached or not.  If the
365  *	controller has slave link, device number 16 points to it.
366  *
367  *	LOCKING:
368  *	EH context.
369  */
370 static void ata_force_link_limits(struct ata_link *link)
371 {
372 	bool did_spd = false;
373 	int linkno = link->pmp;
374 	int i;
375 
376 	if (ata_is_host_link(link))
377 		linkno += 15;
378 
379 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
380 		const struct ata_force_ent *fe = &ata_force_tbl[i];
381 
382 		if (fe->port != -1 && fe->port != link->ap->print_id)
383 			continue;
384 
385 		if (fe->device != -1 && fe->device != linkno)
386 			continue;
387 
388 		/* only honor the first spd limit */
389 		if (!did_spd && fe->param.spd_limit) {
390 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
391 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
392 					fe->param.name);
393 			did_spd = true;
394 		}
395 
396 		/* let lflags stack */
397 		if (fe->param.lflags) {
398 			link->flags |= fe->param.lflags;
399 			ata_link_notice(link,
400 					"FORCE: link flag 0x%x forced -> 0x%x\n",
401 					fe->param.lflags, link->flags);
402 		}
403 	}
404 }
405 
406 /**
407  *	ata_force_xfermask - force xfermask according to libata.force
408  *	@dev: ATA device of interest
409  *
410  *	Force xfer_mask according to libata.force and whine about it.
411  *	For consistency with link selection, device number 15 selects
412  *	the first device connected to the host link.
413  *
414  *	LOCKING:
415  *	EH context.
416  */
417 static void ata_force_xfermask(struct ata_device *dev)
418 {
419 	int devno = dev->link->pmp + dev->devno;
420 	int alt_devno = devno;
421 	int i;
422 
423 	/* allow n.15/16 for devices attached to host port */
424 	if (ata_is_host_link(dev->link))
425 		alt_devno += 15;
426 
427 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
428 		const struct ata_force_ent *fe = &ata_force_tbl[i];
429 		unsigned long pio_mask, mwdma_mask, udma_mask;
430 
431 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
432 			continue;
433 
434 		if (fe->device != -1 && fe->device != devno &&
435 		    fe->device != alt_devno)
436 			continue;
437 
438 		if (!fe->param.xfer_mask)
439 			continue;
440 
441 		ata_unpack_xfermask(fe->param.xfer_mask,
442 				    &pio_mask, &mwdma_mask, &udma_mask);
443 		if (udma_mask)
444 			dev->udma_mask = udma_mask;
445 		else if (mwdma_mask) {
446 			dev->udma_mask = 0;
447 			dev->mwdma_mask = mwdma_mask;
448 		} else {
449 			dev->udma_mask = 0;
450 			dev->mwdma_mask = 0;
451 			dev->pio_mask = pio_mask;
452 		}
453 
454 		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
455 			       fe->param.name);
456 		return;
457 	}
458 }
459 
460 /**
461  *	ata_force_horkage - force horkage according to libata.force
462  *	@dev: ATA device of interest
463  *
464  *	Force horkage according to libata.force and whine about it.
465  *	For consistency with link selection, device number 15 selects
466  *	the first device connected to the host link.
467  *
468  *	LOCKING:
469  *	EH context.
470  */
471 static void ata_force_horkage(struct ata_device *dev)
472 {
473 	int devno = dev->link->pmp + dev->devno;
474 	int alt_devno = devno;
475 	int i;
476 
477 	/* allow n.15/16 for devices attached to host port */
478 	if (ata_is_host_link(dev->link))
479 		alt_devno += 15;
480 
481 	for (i = 0; i < ata_force_tbl_size; i++) {
482 		const struct ata_force_ent *fe = &ata_force_tbl[i];
483 
484 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
485 			continue;
486 
487 		if (fe->device != -1 && fe->device != devno &&
488 		    fe->device != alt_devno)
489 			continue;
490 
491 		if (!(~dev->horkage & fe->param.horkage_on) &&
492 		    !(dev->horkage & fe->param.horkage_off))
493 			continue;
494 
495 		dev->horkage |= fe->param.horkage_on;
496 		dev->horkage &= ~fe->param.horkage_off;
497 
498 		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
499 			       fe->param.name);
500 	}
501 }
502 
503 /**
504  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
505  *	@opcode: SCSI opcode
506  *
507  *	Determine ATAPI command type from @opcode.
508  *
509  *	LOCKING:
510  *	None.
511  *
512  *	RETURNS:
513  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
514  */
515 int atapi_cmd_type(u8 opcode)
516 {
517 	switch (opcode) {
518 	case GPCMD_READ_10:
519 	case GPCMD_READ_12:
520 		return ATAPI_READ;
521 
522 	case GPCMD_WRITE_10:
523 	case GPCMD_WRITE_12:
524 	case GPCMD_WRITE_AND_VERIFY_10:
525 		return ATAPI_WRITE;
526 
527 	case GPCMD_READ_CD:
528 	case GPCMD_READ_CD_MSF:
529 		return ATAPI_READ_CD;
530 
531 	case ATA_16:
532 	case ATA_12:
533 		if (atapi_passthru16)
534 			return ATAPI_PASS_THRU;
535 		/* fall thru */
536 	default:
537 		return ATAPI_MISC;
538 	}
539 }
540 
541 /**
542  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
543  *	@tf: Taskfile to convert
544  *	@pmp: Port multiplier port
545  *	@is_cmd: This FIS is for command
546  *	@fis: Buffer into which data will output
547  *
548  *	Converts a standard ATA taskfile to a Serial ATA
549  *	FIS structure (Register - Host to Device).
550  *
551  *	LOCKING:
552  *	Inherited from caller.
553  */
554 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
555 {
556 	fis[0] = 0x27;			/* Register - Host to Device FIS */
557 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
558 	if (is_cmd)
559 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
560 
561 	fis[2] = tf->command;
562 	fis[3] = tf->feature;
563 
564 	fis[4] = tf->lbal;
565 	fis[5] = tf->lbam;
566 	fis[6] = tf->lbah;
567 	fis[7] = tf->device;
568 
569 	fis[8] = tf->hob_lbal;
570 	fis[9] = tf->hob_lbam;
571 	fis[10] = tf->hob_lbah;
572 	fis[11] = tf->hob_feature;
573 
574 	fis[12] = tf->nsect;
575 	fis[13] = tf->hob_nsect;
576 	fis[14] = 0;
577 	fis[15] = tf->ctl;
578 
579 	fis[16] = tf->auxiliary & 0xff;
580 	fis[17] = (tf->auxiliary >> 8) & 0xff;
581 	fis[18] = (tf->auxiliary >> 16) & 0xff;
582 	fis[19] = (tf->auxiliary >> 24) & 0xff;
583 }
584 
585 /**
586  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
587  *	@fis: Buffer from which data will be input
588  *	@tf: Taskfile to output
589  *
590  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
591  *
592  *	LOCKING:
593  *	Inherited from caller.
594  */
595 
596 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
597 {
598 	tf->command	= fis[2];	/* status */
599 	tf->feature	= fis[3];	/* error */
600 
601 	tf->lbal	= fis[4];
602 	tf->lbam	= fis[5];
603 	tf->lbah	= fis[6];
604 	tf->device	= fis[7];
605 
606 	tf->hob_lbal	= fis[8];
607 	tf->hob_lbam	= fis[9];
608 	tf->hob_lbah	= fis[10];
609 
610 	tf->nsect	= fis[12];
611 	tf->hob_nsect	= fis[13];
612 }
613 
614 static const u8 ata_rw_cmds[] = {
615 	/* pio multi */
616 	ATA_CMD_READ_MULTI,
617 	ATA_CMD_WRITE_MULTI,
618 	ATA_CMD_READ_MULTI_EXT,
619 	ATA_CMD_WRITE_MULTI_EXT,
620 	0,
621 	0,
622 	0,
623 	ATA_CMD_WRITE_MULTI_FUA_EXT,
624 	/* pio */
625 	ATA_CMD_PIO_READ,
626 	ATA_CMD_PIO_WRITE,
627 	ATA_CMD_PIO_READ_EXT,
628 	ATA_CMD_PIO_WRITE_EXT,
629 	0,
630 	0,
631 	0,
632 	0,
633 	/* dma */
634 	ATA_CMD_READ,
635 	ATA_CMD_WRITE,
636 	ATA_CMD_READ_EXT,
637 	ATA_CMD_WRITE_EXT,
638 	0,
639 	0,
640 	0,
641 	ATA_CMD_WRITE_FUA_EXT
642 };
643 
644 /**
645  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
646  *	@tf: command to examine and configure
647  *	@dev: device tf belongs to
648  *
649  *	Examine the device configuration and tf->flags to calculate
650  *	the proper read/write commands and protocol to use.
651  *
652  *	LOCKING:
653  *	caller.
654  */
655 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
656 {
657 	u8 cmd;
658 
659 	int index, fua, lba48, write;
660 
661 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
662 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
663 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
664 
665 	if (dev->flags & ATA_DFLAG_PIO) {
666 		tf->protocol = ATA_PROT_PIO;
667 		index = dev->multi_count ? 0 : 8;
668 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
669 		/* Unable to use DMA due to host limitation */
670 		tf->protocol = ATA_PROT_PIO;
671 		index = dev->multi_count ? 0 : 8;
672 	} else {
673 		tf->protocol = ATA_PROT_DMA;
674 		index = 16;
675 	}
676 
677 	cmd = ata_rw_cmds[index + fua + lba48 + write];
678 	if (cmd) {
679 		tf->command = cmd;
680 		return 0;
681 	}
682 	return -1;
683 }
684 
685 /**
686  *	ata_tf_read_block - Read block address from ATA taskfile
687  *	@tf: ATA taskfile of interest
688  *	@dev: ATA device @tf belongs to
689  *
690  *	LOCKING:
691  *	None.
692  *
693  *	Read block address from @tf.  This function can handle all
694  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
695  *	flags select the address format to use.
696  *
697  *	RETURNS:
698  *	Block address read from @tf.
699  */
700 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
701 {
702 	u64 block = 0;
703 
704 	if (tf->flags & ATA_TFLAG_LBA) {
705 		if (tf->flags & ATA_TFLAG_LBA48) {
706 			block |= (u64)tf->hob_lbah << 40;
707 			block |= (u64)tf->hob_lbam << 32;
708 			block |= (u64)tf->hob_lbal << 24;
709 		} else
710 			block |= (tf->device & 0xf) << 24;
711 
712 		block |= tf->lbah << 16;
713 		block |= tf->lbam << 8;
714 		block |= tf->lbal;
715 	} else {
716 		u32 cyl, head, sect;
717 
718 		cyl = tf->lbam | (tf->lbah << 8);
719 		head = tf->device & 0xf;
720 		sect = tf->lbal;
721 
722 		if (!sect) {
723 			ata_dev_warn(dev,
724 				     "device reported invalid CHS sector 0\n");
725 			return U64_MAX;
726 		}
727 
728 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
729 	}
730 
731 	return block;
732 }
733 
734 /**
735  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
736  *	@tf: Target ATA taskfile
737  *	@dev: ATA device @tf belongs to
738  *	@block: Block address
739  *	@n_block: Number of blocks
740  *	@tf_flags: RW/FUA etc...
741  *	@tag: tag
742  *	@class: IO priority class
743  *
744  *	LOCKING:
745  *	None.
746  *
747  *	Build ATA taskfile @tf for read/write request described by
748  *	@block, @n_block, @tf_flags and @tag on @dev.
749  *
750  *	RETURNS:
751  *
752  *	0 on success, -ERANGE if the request is too large for @dev,
753  *	-EINVAL if the request is invalid.
754  */
755 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
756 		    u64 block, u32 n_block, unsigned int tf_flags,
757 		    unsigned int tag, int class)
758 {
759 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
760 	tf->flags |= tf_flags;
761 
762 	if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
763 		/* yay, NCQ */
764 		if (!lba_48_ok(block, n_block))
765 			return -ERANGE;
766 
767 		tf->protocol = ATA_PROT_NCQ;
768 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
769 
770 		if (tf->flags & ATA_TFLAG_WRITE)
771 			tf->command = ATA_CMD_FPDMA_WRITE;
772 		else
773 			tf->command = ATA_CMD_FPDMA_READ;
774 
775 		tf->nsect = tag << 3;
776 		tf->hob_feature = (n_block >> 8) & 0xff;
777 		tf->feature = n_block & 0xff;
778 
779 		tf->hob_lbah = (block >> 40) & 0xff;
780 		tf->hob_lbam = (block >> 32) & 0xff;
781 		tf->hob_lbal = (block >> 24) & 0xff;
782 		tf->lbah = (block >> 16) & 0xff;
783 		tf->lbam = (block >> 8) & 0xff;
784 		tf->lbal = block & 0xff;
785 
786 		tf->device = ATA_LBA;
787 		if (tf->flags & ATA_TFLAG_FUA)
788 			tf->device |= 1 << 7;
789 
790 		if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
791 			if (class == IOPRIO_CLASS_RT)
792 				tf->hob_nsect |= ATA_PRIO_HIGH <<
793 						 ATA_SHIFT_PRIO;
794 		}
795 	} else if (dev->flags & ATA_DFLAG_LBA) {
796 		tf->flags |= ATA_TFLAG_LBA;
797 
798 		if (lba_28_ok(block, n_block)) {
799 			/* use LBA28 */
800 			tf->device |= (block >> 24) & 0xf;
801 		} else if (lba_48_ok(block, n_block)) {
802 			if (!(dev->flags & ATA_DFLAG_LBA48))
803 				return -ERANGE;
804 
805 			/* use LBA48 */
806 			tf->flags |= ATA_TFLAG_LBA48;
807 
808 			tf->hob_nsect = (n_block >> 8) & 0xff;
809 
810 			tf->hob_lbah = (block >> 40) & 0xff;
811 			tf->hob_lbam = (block >> 32) & 0xff;
812 			tf->hob_lbal = (block >> 24) & 0xff;
813 		} else
814 			/* request too large even for LBA48 */
815 			return -ERANGE;
816 
817 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818 			return -EINVAL;
819 
820 		tf->nsect = n_block & 0xff;
821 
822 		tf->lbah = (block >> 16) & 0xff;
823 		tf->lbam = (block >> 8) & 0xff;
824 		tf->lbal = block & 0xff;
825 
826 		tf->device |= ATA_LBA;
827 	} else {
828 		/* CHS */
829 		u32 sect, head, cyl, track;
830 
831 		/* The request -may- be too large for CHS addressing. */
832 		if (!lba_28_ok(block, n_block))
833 			return -ERANGE;
834 
835 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
836 			return -EINVAL;
837 
838 		/* Convert LBA to CHS */
839 		track = (u32)block / dev->sectors;
840 		cyl   = track / dev->heads;
841 		head  = track % dev->heads;
842 		sect  = (u32)block % dev->sectors + 1;
843 
844 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
845 			(u32)block, track, cyl, head, sect);
846 
847 		/* Check whether the converted CHS can fit.
848 		   Cylinder: 0-65535
849 		   Head: 0-15
850 		   Sector: 1-255*/
851 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
852 			return -ERANGE;
853 
854 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
855 		tf->lbal = sect;
856 		tf->lbam = cyl;
857 		tf->lbah = cyl >> 8;
858 		tf->device |= head;
859 	}
860 
861 	return 0;
862 }
863 
864 /**
865  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
866  *	@pio_mask: pio_mask
867  *	@mwdma_mask: mwdma_mask
868  *	@udma_mask: udma_mask
869  *
870  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
871  *	unsigned int xfer_mask.
872  *
873  *	LOCKING:
874  *	None.
875  *
876  *	RETURNS:
877  *	Packed xfer_mask.
878  */
879 unsigned long ata_pack_xfermask(unsigned long pio_mask,
880 				unsigned long mwdma_mask,
881 				unsigned long udma_mask)
882 {
883 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
884 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
885 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
886 }
887 
888 /**
889  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
890  *	@xfer_mask: xfer_mask to unpack
891  *	@pio_mask: resulting pio_mask
892  *	@mwdma_mask: resulting mwdma_mask
893  *	@udma_mask: resulting udma_mask
894  *
895  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
896  *	Any NULL destination masks will be ignored.
897  */
898 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
899 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
900 {
901 	if (pio_mask)
902 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
903 	if (mwdma_mask)
904 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
905 	if (udma_mask)
906 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
907 }
908 
909 static const struct ata_xfer_ent {
910 	int shift, bits;
911 	u8 base;
912 } ata_xfer_tbl[] = {
913 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
914 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
915 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
916 	{ -1, },
917 };
918 
919 /**
920  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
921  *	@xfer_mask: xfer_mask of interest
922  *
923  *	Return matching XFER_* value for @xfer_mask.  Only the highest
924  *	bit of @xfer_mask is considered.
925  *
926  *	LOCKING:
927  *	None.
928  *
929  *	RETURNS:
930  *	Matching XFER_* value, 0xff if no match found.
931  */
932 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
933 {
934 	int highbit = fls(xfer_mask) - 1;
935 	const struct ata_xfer_ent *ent;
936 
937 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
938 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
939 			return ent->base + highbit - ent->shift;
940 	return 0xff;
941 }
942 
943 /**
944  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
945  *	@xfer_mode: XFER_* of interest
946  *
947  *	Return matching xfer_mask for @xfer_mode.
948  *
949  *	LOCKING:
950  *	None.
951  *
952  *	RETURNS:
953  *	Matching xfer_mask, 0 if no match found.
954  */
955 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
956 {
957 	const struct ata_xfer_ent *ent;
958 
959 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
960 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
961 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
962 				& ~((1 << ent->shift) - 1);
963 	return 0;
964 }
965 
966 /**
967  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
968  *	@xfer_mode: XFER_* of interest
969  *
970  *	Return matching xfer_shift for @xfer_mode.
971  *
972  *	LOCKING:
973  *	None.
974  *
975  *	RETURNS:
976  *	Matching xfer_shift, -1 if no match found.
977  */
978 int ata_xfer_mode2shift(unsigned long xfer_mode)
979 {
980 	const struct ata_xfer_ent *ent;
981 
982 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
983 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
984 			return ent->shift;
985 	return -1;
986 }
987 
988 /**
989  *	ata_mode_string - convert xfer_mask to string
990  *	@xfer_mask: mask of bits supported; only highest bit counts.
991  *
992  *	Determine string which represents the highest speed
993  *	(highest bit in @modemask).
994  *
995  *	LOCKING:
996  *	None.
997  *
998  *	RETURNS:
999  *	Constant C string representing highest speed listed in
1000  *	@mode_mask, or the constant C string "<n/a>".
1001  */
1002 const char *ata_mode_string(unsigned long xfer_mask)
1003 {
1004 	static const char * const xfer_mode_str[] = {
1005 		"PIO0",
1006 		"PIO1",
1007 		"PIO2",
1008 		"PIO3",
1009 		"PIO4",
1010 		"PIO5",
1011 		"PIO6",
1012 		"MWDMA0",
1013 		"MWDMA1",
1014 		"MWDMA2",
1015 		"MWDMA3",
1016 		"MWDMA4",
1017 		"UDMA/16",
1018 		"UDMA/25",
1019 		"UDMA/33",
1020 		"UDMA/44",
1021 		"UDMA/66",
1022 		"UDMA/100",
1023 		"UDMA/133",
1024 		"UDMA7",
1025 	};
1026 	int highbit;
1027 
1028 	highbit = fls(xfer_mask) - 1;
1029 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1030 		return xfer_mode_str[highbit];
1031 	return "<n/a>";
1032 }
1033 
1034 const char *sata_spd_string(unsigned int spd)
1035 {
1036 	static const char * const spd_str[] = {
1037 		"1.5 Gbps",
1038 		"3.0 Gbps",
1039 		"6.0 Gbps",
1040 	};
1041 
1042 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1043 		return "<unknown>";
1044 	return spd_str[spd - 1];
1045 }
1046 
1047 /**
1048  *	ata_dev_classify - determine device type based on ATA-spec signature
1049  *	@tf: ATA taskfile register set for device to be identified
1050  *
1051  *	Determine from taskfile register contents whether a device is
1052  *	ATA or ATAPI, as per "Signature and persistence" section
1053  *	of ATA/PI spec (volume 1, sect 5.14).
1054  *
1055  *	LOCKING:
1056  *	None.
1057  *
1058  *	RETURNS:
1059  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1060  *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1061  */
1062 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1063 {
1064 	/* Apple's open source Darwin code hints that some devices only
1065 	 * put a proper signature into the LBA mid/high registers,
1066 	 * So, we only check those.  It's sufficient for uniqueness.
1067 	 *
1068 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1069 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1070 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1071 	 * spec has never mentioned about using different signatures
1072 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1073 	 * Multiplier specification began to use 0x69/0x96 to identify
1074 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1075 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1076 	 * 0x69/0x96 shortly and described them as reserved for
1077 	 * SerialATA.
1078 	 *
1079 	 * We follow the current spec and consider that 0x69/0x96
1080 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1081 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1082 	 * SEMB signature.  This is worked around in
1083 	 * ata_dev_read_id().
1084 	 */
1085 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1086 		DPRINTK("found ATA device by sig\n");
1087 		return ATA_DEV_ATA;
1088 	}
1089 
1090 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1091 		DPRINTK("found ATAPI device by sig\n");
1092 		return ATA_DEV_ATAPI;
1093 	}
1094 
1095 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1096 		DPRINTK("found PMP device by sig\n");
1097 		return ATA_DEV_PMP;
1098 	}
1099 
1100 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1101 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1102 		return ATA_DEV_SEMB;
1103 	}
1104 
1105 	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1106 		DPRINTK("found ZAC device by sig\n");
1107 		return ATA_DEV_ZAC;
1108 	}
1109 
1110 	DPRINTK("unknown device\n");
1111 	return ATA_DEV_UNKNOWN;
1112 }
1113 
1114 /**
1115  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1116  *	@id: IDENTIFY DEVICE results we will examine
1117  *	@s: string into which data is output
1118  *	@ofs: offset into identify device page
1119  *	@len: length of string to return. must be an even number.
1120  *
1121  *	The strings in the IDENTIFY DEVICE page are broken up into
1122  *	16-bit chunks.  Run through the string, and output each
1123  *	8-bit chunk linearly, regardless of platform.
1124  *
1125  *	LOCKING:
1126  *	caller.
1127  */
1128 
1129 void ata_id_string(const u16 *id, unsigned char *s,
1130 		   unsigned int ofs, unsigned int len)
1131 {
1132 	unsigned int c;
1133 
1134 	BUG_ON(len & 1);
1135 
1136 	while (len > 0) {
1137 		c = id[ofs] >> 8;
1138 		*s = c;
1139 		s++;
1140 
1141 		c = id[ofs] & 0xff;
1142 		*s = c;
1143 		s++;
1144 
1145 		ofs++;
1146 		len -= 2;
1147 	}
1148 }
1149 
1150 /**
1151  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1152  *	@id: IDENTIFY DEVICE results we will examine
1153  *	@s: string into which data is output
1154  *	@ofs: offset into identify device page
1155  *	@len: length of string to return. must be an odd number.
1156  *
1157  *	This function is identical to ata_id_string except that it
1158  *	trims trailing spaces and terminates the resulting string with
1159  *	null.  @len must be actual maximum length (even number) + 1.
1160  *
1161  *	LOCKING:
1162  *	caller.
1163  */
1164 void ata_id_c_string(const u16 *id, unsigned char *s,
1165 		     unsigned int ofs, unsigned int len)
1166 {
1167 	unsigned char *p;
1168 
1169 	ata_id_string(id, s, ofs, len - 1);
1170 
1171 	p = s + strnlen(s, len - 1);
1172 	while (p > s && p[-1] == ' ')
1173 		p--;
1174 	*p = '\0';
1175 }
1176 
1177 static u64 ata_id_n_sectors(const u16 *id)
1178 {
1179 	if (ata_id_has_lba(id)) {
1180 		if (ata_id_has_lba48(id))
1181 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1182 		else
1183 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1184 	} else {
1185 		if (ata_id_current_chs_valid(id))
1186 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1187 			       id[ATA_ID_CUR_SECTORS];
1188 		else
1189 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1190 			       id[ATA_ID_SECTORS];
1191 	}
1192 }
1193 
1194 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1195 {
1196 	u64 sectors = 0;
1197 
1198 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1199 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1200 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1201 	sectors |= (tf->lbah & 0xff) << 16;
1202 	sectors |= (tf->lbam & 0xff) << 8;
1203 	sectors |= (tf->lbal & 0xff);
1204 
1205 	return sectors;
1206 }
1207 
1208 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1209 {
1210 	u64 sectors = 0;
1211 
1212 	sectors |= (tf->device & 0x0f) << 24;
1213 	sectors |= (tf->lbah & 0xff) << 16;
1214 	sectors |= (tf->lbam & 0xff) << 8;
1215 	sectors |= (tf->lbal & 0xff);
1216 
1217 	return sectors;
1218 }
1219 
1220 /**
1221  *	ata_read_native_max_address - Read native max address
1222  *	@dev: target device
1223  *	@max_sectors: out parameter for the result native max address
1224  *
1225  *	Perform an LBA48 or LBA28 native size query upon the device in
1226  *	question.
1227  *
1228  *	RETURNS:
1229  *	0 on success, -EACCES if command is aborted by the drive.
1230  *	-EIO on other errors.
1231  */
1232 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1233 {
1234 	unsigned int err_mask;
1235 	struct ata_taskfile tf;
1236 	int lba48 = ata_id_has_lba48(dev->id);
1237 
1238 	ata_tf_init(dev, &tf);
1239 
1240 	/* always clear all address registers */
1241 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1242 
1243 	if (lba48) {
1244 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1245 		tf.flags |= ATA_TFLAG_LBA48;
1246 	} else
1247 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1248 
1249 	tf.protocol = ATA_PROT_NODATA;
1250 	tf.device |= ATA_LBA;
1251 
1252 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1253 	if (err_mask) {
1254 		ata_dev_warn(dev,
1255 			     "failed to read native max address (err_mask=0x%x)\n",
1256 			     err_mask);
1257 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1258 			return -EACCES;
1259 		return -EIO;
1260 	}
1261 
1262 	if (lba48)
1263 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1264 	else
1265 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1266 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1267 		(*max_sectors)--;
1268 	return 0;
1269 }
1270 
1271 /**
1272  *	ata_set_max_sectors - Set max sectors
1273  *	@dev: target device
1274  *	@new_sectors: new max sectors value to set for the device
1275  *
1276  *	Set max sectors of @dev to @new_sectors.
1277  *
1278  *	RETURNS:
1279  *	0 on success, -EACCES if command is aborted or denied (due to
1280  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1281  *	errors.
1282  */
1283 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1284 {
1285 	unsigned int err_mask;
1286 	struct ata_taskfile tf;
1287 	int lba48 = ata_id_has_lba48(dev->id);
1288 
1289 	new_sectors--;
1290 
1291 	ata_tf_init(dev, &tf);
1292 
1293 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1294 
1295 	if (lba48) {
1296 		tf.command = ATA_CMD_SET_MAX_EXT;
1297 		tf.flags |= ATA_TFLAG_LBA48;
1298 
1299 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1300 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1301 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1302 	} else {
1303 		tf.command = ATA_CMD_SET_MAX;
1304 
1305 		tf.device |= (new_sectors >> 24) & 0xf;
1306 	}
1307 
1308 	tf.protocol = ATA_PROT_NODATA;
1309 	tf.device |= ATA_LBA;
1310 
1311 	tf.lbal = (new_sectors >> 0) & 0xff;
1312 	tf.lbam = (new_sectors >> 8) & 0xff;
1313 	tf.lbah = (new_sectors >> 16) & 0xff;
1314 
1315 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1316 	if (err_mask) {
1317 		ata_dev_warn(dev,
1318 			     "failed to set max address (err_mask=0x%x)\n",
1319 			     err_mask);
1320 		if (err_mask == AC_ERR_DEV &&
1321 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1322 			return -EACCES;
1323 		return -EIO;
1324 	}
1325 
1326 	return 0;
1327 }
1328 
1329 /**
1330  *	ata_hpa_resize		-	Resize a device with an HPA set
1331  *	@dev: Device to resize
1332  *
1333  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1334  *	it if required to the full size of the media. The caller must check
1335  *	the drive has the HPA feature set enabled.
1336  *
1337  *	RETURNS:
1338  *	0 on success, -errno on failure.
1339  */
1340 static int ata_hpa_resize(struct ata_device *dev)
1341 {
1342 	struct ata_eh_context *ehc = &dev->link->eh_context;
1343 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1344 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1345 	u64 sectors = ata_id_n_sectors(dev->id);
1346 	u64 native_sectors;
1347 	int rc;
1348 
1349 	/* do we need to do it? */
1350 	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1351 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1352 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1353 		return 0;
1354 
1355 	/* read native max address */
1356 	rc = ata_read_native_max_address(dev, &native_sectors);
1357 	if (rc) {
1358 		/* If device aborted the command or HPA isn't going to
1359 		 * be unlocked, skip HPA resizing.
1360 		 */
1361 		if (rc == -EACCES || !unlock_hpa) {
1362 			ata_dev_warn(dev,
1363 				     "HPA support seems broken, skipping HPA handling\n");
1364 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1365 
1366 			/* we can continue if device aborted the command */
1367 			if (rc == -EACCES)
1368 				rc = 0;
1369 		}
1370 
1371 		return rc;
1372 	}
1373 	dev->n_native_sectors = native_sectors;
1374 
1375 	/* nothing to do? */
1376 	if (native_sectors <= sectors || !unlock_hpa) {
1377 		if (!print_info || native_sectors == sectors)
1378 			return 0;
1379 
1380 		if (native_sectors > sectors)
1381 			ata_dev_info(dev,
1382 				"HPA detected: current %llu, native %llu\n",
1383 				(unsigned long long)sectors,
1384 				(unsigned long long)native_sectors);
1385 		else if (native_sectors < sectors)
1386 			ata_dev_warn(dev,
1387 				"native sectors (%llu) is smaller than sectors (%llu)\n",
1388 				(unsigned long long)native_sectors,
1389 				(unsigned long long)sectors);
1390 		return 0;
1391 	}
1392 
1393 	/* let's unlock HPA */
1394 	rc = ata_set_max_sectors(dev, native_sectors);
1395 	if (rc == -EACCES) {
1396 		/* if device aborted the command, skip HPA resizing */
1397 		ata_dev_warn(dev,
1398 			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1399 			     (unsigned long long)sectors,
1400 			     (unsigned long long)native_sectors);
1401 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1402 		return 0;
1403 	} else if (rc)
1404 		return rc;
1405 
1406 	/* re-read IDENTIFY data */
1407 	rc = ata_dev_reread_id(dev, 0);
1408 	if (rc) {
1409 		ata_dev_err(dev,
1410 			    "failed to re-read IDENTIFY data after HPA resizing\n");
1411 		return rc;
1412 	}
1413 
1414 	if (print_info) {
1415 		u64 new_sectors = ata_id_n_sectors(dev->id);
1416 		ata_dev_info(dev,
1417 			"HPA unlocked: %llu -> %llu, native %llu\n",
1418 			(unsigned long long)sectors,
1419 			(unsigned long long)new_sectors,
1420 			(unsigned long long)native_sectors);
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 /**
1427  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1428  *	@id: IDENTIFY DEVICE page to dump
1429  *
1430  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1431  *	page.
1432  *
1433  *	LOCKING:
1434  *	caller.
1435  */
1436 
1437 static inline void ata_dump_id(const u16 *id)
1438 {
1439 	DPRINTK("49==0x%04x  "
1440 		"53==0x%04x  "
1441 		"63==0x%04x  "
1442 		"64==0x%04x  "
1443 		"75==0x%04x  \n",
1444 		id[49],
1445 		id[53],
1446 		id[63],
1447 		id[64],
1448 		id[75]);
1449 	DPRINTK("80==0x%04x  "
1450 		"81==0x%04x  "
1451 		"82==0x%04x  "
1452 		"83==0x%04x  "
1453 		"84==0x%04x  \n",
1454 		id[80],
1455 		id[81],
1456 		id[82],
1457 		id[83],
1458 		id[84]);
1459 	DPRINTK("88==0x%04x  "
1460 		"93==0x%04x\n",
1461 		id[88],
1462 		id[93]);
1463 }
1464 
1465 /**
1466  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1467  *	@id: IDENTIFY data to compute xfer mask from
1468  *
1469  *	Compute the xfermask for this device. This is not as trivial
1470  *	as it seems if we must consider early devices correctly.
1471  *
1472  *	FIXME: pre IDE drive timing (do we care ?).
1473  *
1474  *	LOCKING:
1475  *	None.
1476  *
1477  *	RETURNS:
1478  *	Computed xfermask
1479  */
1480 unsigned long ata_id_xfermask(const u16 *id)
1481 {
1482 	unsigned long pio_mask, mwdma_mask, udma_mask;
1483 
1484 	/* Usual case. Word 53 indicates word 64 is valid */
1485 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1486 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1487 		pio_mask <<= 3;
1488 		pio_mask |= 0x7;
1489 	} else {
1490 		/* If word 64 isn't valid then Word 51 high byte holds
1491 		 * the PIO timing number for the maximum. Turn it into
1492 		 * a mask.
1493 		 */
1494 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1495 		if (mode < 5)	/* Valid PIO range */
1496 			pio_mask = (2 << mode) - 1;
1497 		else
1498 			pio_mask = 1;
1499 
1500 		/* But wait.. there's more. Design your standards by
1501 		 * committee and you too can get a free iordy field to
1502 		 * process. However its the speeds not the modes that
1503 		 * are supported... Note drivers using the timing API
1504 		 * will get this right anyway
1505 		 */
1506 	}
1507 
1508 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1509 
1510 	if (ata_id_is_cfa(id)) {
1511 		/*
1512 		 *	Process compact flash extended modes
1513 		 */
1514 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1515 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1516 
1517 		if (pio)
1518 			pio_mask |= (1 << 5);
1519 		if (pio > 1)
1520 			pio_mask |= (1 << 6);
1521 		if (dma)
1522 			mwdma_mask |= (1 << 3);
1523 		if (dma > 1)
1524 			mwdma_mask |= (1 << 4);
1525 	}
1526 
1527 	udma_mask = 0;
1528 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1529 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1530 
1531 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1532 }
1533 
1534 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1535 {
1536 	struct completion *waiting = qc->private_data;
1537 
1538 	complete(waiting);
1539 }
1540 
1541 /**
1542  *	ata_exec_internal_sg - execute libata internal command
1543  *	@dev: Device to which the command is sent
1544  *	@tf: Taskfile registers for the command and the result
1545  *	@cdb: CDB for packet command
1546  *	@dma_dir: Data transfer direction of the command
1547  *	@sgl: sg list for the data buffer of the command
1548  *	@n_elem: Number of sg entries
1549  *	@timeout: Timeout in msecs (0 for default)
1550  *
1551  *	Executes libata internal command with timeout.  @tf contains
1552  *	command on entry and result on return.  Timeout and error
1553  *	conditions are reported via return value.  No recovery action
1554  *	is taken after a command times out.  It's caller's duty to
1555  *	clean up after timeout.
1556  *
1557  *	LOCKING:
1558  *	None.  Should be called with kernel context, might sleep.
1559  *
1560  *	RETURNS:
1561  *	Zero on success, AC_ERR_* mask on failure
1562  */
1563 unsigned ata_exec_internal_sg(struct ata_device *dev,
1564 			      struct ata_taskfile *tf, const u8 *cdb,
1565 			      int dma_dir, struct scatterlist *sgl,
1566 			      unsigned int n_elem, unsigned long timeout)
1567 {
1568 	struct ata_link *link = dev->link;
1569 	struct ata_port *ap = link->ap;
1570 	u8 command = tf->command;
1571 	int auto_timeout = 0;
1572 	struct ata_queued_cmd *qc;
1573 	unsigned int preempted_tag;
1574 	u32 preempted_sactive;
1575 	u64 preempted_qc_active;
1576 	int preempted_nr_active_links;
1577 	DECLARE_COMPLETION_ONSTACK(wait);
1578 	unsigned long flags;
1579 	unsigned int err_mask;
1580 	int rc;
1581 
1582 	spin_lock_irqsave(ap->lock, flags);
1583 
1584 	/* no internal command while frozen */
1585 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1586 		spin_unlock_irqrestore(ap->lock, flags);
1587 		return AC_ERR_SYSTEM;
1588 	}
1589 
1590 	/* initialize internal qc */
1591 	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1592 
1593 	qc->tag = ATA_TAG_INTERNAL;
1594 	qc->hw_tag = 0;
1595 	qc->scsicmd = NULL;
1596 	qc->ap = ap;
1597 	qc->dev = dev;
1598 	ata_qc_reinit(qc);
1599 
1600 	preempted_tag = link->active_tag;
1601 	preempted_sactive = link->sactive;
1602 	preempted_qc_active = ap->qc_active;
1603 	preempted_nr_active_links = ap->nr_active_links;
1604 	link->active_tag = ATA_TAG_POISON;
1605 	link->sactive = 0;
1606 	ap->qc_active = 0;
1607 	ap->nr_active_links = 0;
1608 
1609 	/* prepare & issue qc */
1610 	qc->tf = *tf;
1611 	if (cdb)
1612 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1613 
1614 	/* some SATA bridges need us to indicate data xfer direction */
1615 	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1616 	    dma_dir == DMA_FROM_DEVICE)
1617 		qc->tf.feature |= ATAPI_DMADIR;
1618 
1619 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1620 	qc->dma_dir = dma_dir;
1621 	if (dma_dir != DMA_NONE) {
1622 		unsigned int i, buflen = 0;
1623 		struct scatterlist *sg;
1624 
1625 		for_each_sg(sgl, sg, n_elem, i)
1626 			buflen += sg->length;
1627 
1628 		ata_sg_init(qc, sgl, n_elem);
1629 		qc->nbytes = buflen;
1630 	}
1631 
1632 	qc->private_data = &wait;
1633 	qc->complete_fn = ata_qc_complete_internal;
1634 
1635 	ata_qc_issue(qc);
1636 
1637 	spin_unlock_irqrestore(ap->lock, flags);
1638 
1639 	if (!timeout) {
1640 		if (ata_probe_timeout)
1641 			timeout = ata_probe_timeout * 1000;
1642 		else {
1643 			timeout = ata_internal_cmd_timeout(dev, command);
1644 			auto_timeout = 1;
1645 		}
1646 	}
1647 
1648 	if (ap->ops->error_handler)
1649 		ata_eh_release(ap);
1650 
1651 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1652 
1653 	if (ap->ops->error_handler)
1654 		ata_eh_acquire(ap);
1655 
1656 	ata_sff_flush_pio_task(ap);
1657 
1658 	if (!rc) {
1659 		spin_lock_irqsave(ap->lock, flags);
1660 
1661 		/* We're racing with irq here.  If we lose, the
1662 		 * following test prevents us from completing the qc
1663 		 * twice.  If we win, the port is frozen and will be
1664 		 * cleaned up by ->post_internal_cmd().
1665 		 */
1666 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1667 			qc->err_mask |= AC_ERR_TIMEOUT;
1668 
1669 			if (ap->ops->error_handler)
1670 				ata_port_freeze(ap);
1671 			else
1672 				ata_qc_complete(qc);
1673 
1674 			if (ata_msg_warn(ap))
1675 				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1676 					     command);
1677 		}
1678 
1679 		spin_unlock_irqrestore(ap->lock, flags);
1680 	}
1681 
1682 	/* do post_internal_cmd */
1683 	if (ap->ops->post_internal_cmd)
1684 		ap->ops->post_internal_cmd(qc);
1685 
1686 	/* perform minimal error analysis */
1687 	if (qc->flags & ATA_QCFLAG_FAILED) {
1688 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1689 			qc->err_mask |= AC_ERR_DEV;
1690 
1691 		if (!qc->err_mask)
1692 			qc->err_mask |= AC_ERR_OTHER;
1693 
1694 		if (qc->err_mask & ~AC_ERR_OTHER)
1695 			qc->err_mask &= ~AC_ERR_OTHER;
1696 	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1697 		qc->result_tf.command |= ATA_SENSE;
1698 	}
1699 
1700 	/* finish up */
1701 	spin_lock_irqsave(ap->lock, flags);
1702 
1703 	*tf = qc->result_tf;
1704 	err_mask = qc->err_mask;
1705 
1706 	ata_qc_free(qc);
1707 	link->active_tag = preempted_tag;
1708 	link->sactive = preempted_sactive;
1709 	ap->qc_active = preempted_qc_active;
1710 	ap->nr_active_links = preempted_nr_active_links;
1711 
1712 	spin_unlock_irqrestore(ap->lock, flags);
1713 
1714 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1715 		ata_internal_cmd_timed_out(dev, command);
1716 
1717 	return err_mask;
1718 }
1719 
1720 /**
1721  *	ata_exec_internal - execute libata internal command
1722  *	@dev: Device to which the command is sent
1723  *	@tf: Taskfile registers for the command and the result
1724  *	@cdb: CDB for packet command
1725  *	@dma_dir: Data transfer direction of the command
1726  *	@buf: Data buffer of the command
1727  *	@buflen: Length of data buffer
1728  *	@timeout: Timeout in msecs (0 for default)
1729  *
1730  *	Wrapper around ata_exec_internal_sg() which takes simple
1731  *	buffer instead of sg list.
1732  *
1733  *	LOCKING:
1734  *	None.  Should be called with kernel context, might sleep.
1735  *
1736  *	RETURNS:
1737  *	Zero on success, AC_ERR_* mask on failure
1738  */
1739 unsigned ata_exec_internal(struct ata_device *dev,
1740 			   struct ata_taskfile *tf, const u8 *cdb,
1741 			   int dma_dir, void *buf, unsigned int buflen,
1742 			   unsigned long timeout)
1743 {
1744 	struct scatterlist *psg = NULL, sg;
1745 	unsigned int n_elem = 0;
1746 
1747 	if (dma_dir != DMA_NONE) {
1748 		WARN_ON(!buf);
1749 		sg_init_one(&sg, buf, buflen);
1750 		psg = &sg;
1751 		n_elem++;
1752 	}
1753 
1754 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1755 				    timeout);
1756 }
1757 
1758 /**
1759  *	ata_pio_need_iordy	-	check if iordy needed
1760  *	@adev: ATA device
1761  *
1762  *	Check if the current speed of the device requires IORDY. Used
1763  *	by various controllers for chip configuration.
1764  */
1765 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1766 {
1767 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1768 	 * lead to controller lock up on certain controllers if the
1769 	 * port is not occupied.  See bko#11703 for details.
1770 	 */
1771 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1772 		return 0;
1773 	/* Controller doesn't support IORDY.  Probably a pointless
1774 	 * check as the caller should know this.
1775 	 */
1776 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1777 		return 0;
1778 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1779 	if (ata_id_is_cfa(adev->id)
1780 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1781 		return 0;
1782 	/* PIO3 and higher it is mandatory */
1783 	if (adev->pio_mode > XFER_PIO_2)
1784 		return 1;
1785 	/* We turn it on when possible */
1786 	if (ata_id_has_iordy(adev->id))
1787 		return 1;
1788 	return 0;
1789 }
1790 
1791 /**
1792  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1793  *	@adev: ATA device
1794  *
1795  *	Compute the highest mode possible if we are not using iordy. Return
1796  *	-1 if no iordy mode is available.
1797  */
1798 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1799 {
1800 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1801 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1802 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1803 		/* Is the speed faster than the drive allows non IORDY ? */
1804 		if (pio) {
1805 			/* This is cycle times not frequency - watch the logic! */
1806 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1807 				return 3 << ATA_SHIFT_PIO;
1808 			return 7 << ATA_SHIFT_PIO;
1809 		}
1810 	}
1811 	return 3 << ATA_SHIFT_PIO;
1812 }
1813 
1814 /**
1815  *	ata_do_dev_read_id		-	default ID read method
1816  *	@dev: device
1817  *	@tf: proposed taskfile
1818  *	@id: data buffer
1819  *
1820  *	Issue the identify taskfile and hand back the buffer containing
1821  *	identify data. For some RAID controllers and for pre ATA devices
1822  *	this function is wrapped or replaced by the driver
1823  */
1824 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1825 					struct ata_taskfile *tf, u16 *id)
1826 {
1827 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1828 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1829 }
1830 
1831 /**
1832  *	ata_dev_read_id - Read ID data from the specified device
1833  *	@dev: target device
1834  *	@p_class: pointer to class of the target device (may be changed)
1835  *	@flags: ATA_READID_* flags
1836  *	@id: buffer to read IDENTIFY data into
1837  *
1838  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1839  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1840  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1841  *	for pre-ATA4 drives.
1842  *
1843  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1844  *	now we abort if we hit that case.
1845  *
1846  *	LOCKING:
1847  *	Kernel thread context (may sleep)
1848  *
1849  *	RETURNS:
1850  *	0 on success, -errno otherwise.
1851  */
1852 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1853 		    unsigned int flags, u16 *id)
1854 {
1855 	struct ata_port *ap = dev->link->ap;
1856 	unsigned int class = *p_class;
1857 	struct ata_taskfile tf;
1858 	unsigned int err_mask = 0;
1859 	const char *reason;
1860 	bool is_semb = class == ATA_DEV_SEMB;
1861 	int may_fallback = 1, tried_spinup = 0;
1862 	int rc;
1863 
1864 	if (ata_msg_ctl(ap))
1865 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1866 
1867 retry:
1868 	ata_tf_init(dev, &tf);
1869 
1870 	switch (class) {
1871 	case ATA_DEV_SEMB:
1872 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1873 		/* fall through */
1874 	case ATA_DEV_ATA:
1875 	case ATA_DEV_ZAC:
1876 		tf.command = ATA_CMD_ID_ATA;
1877 		break;
1878 	case ATA_DEV_ATAPI:
1879 		tf.command = ATA_CMD_ID_ATAPI;
1880 		break;
1881 	default:
1882 		rc = -ENODEV;
1883 		reason = "unsupported class";
1884 		goto err_out;
1885 	}
1886 
1887 	tf.protocol = ATA_PROT_PIO;
1888 
1889 	/* Some devices choke if TF registers contain garbage.  Make
1890 	 * sure those are properly initialized.
1891 	 */
1892 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1893 
1894 	/* Device presence detection is unreliable on some
1895 	 * controllers.  Always poll IDENTIFY if available.
1896 	 */
1897 	tf.flags |= ATA_TFLAG_POLLING;
1898 
1899 	if (ap->ops->read_id)
1900 		err_mask = ap->ops->read_id(dev, &tf, id);
1901 	else
1902 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1903 
1904 	if (err_mask) {
1905 		if (err_mask & AC_ERR_NODEV_HINT) {
1906 			ata_dev_dbg(dev, "NODEV after polling detection\n");
1907 			return -ENOENT;
1908 		}
1909 
1910 		if (is_semb) {
1911 			ata_dev_info(dev,
1912 		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1913 			/* SEMB is not supported yet */
1914 			*p_class = ATA_DEV_SEMB_UNSUP;
1915 			return 0;
1916 		}
1917 
1918 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1919 			/* Device or controller might have reported
1920 			 * the wrong device class.  Give a shot at the
1921 			 * other IDENTIFY if the current one is
1922 			 * aborted by the device.
1923 			 */
1924 			if (may_fallback) {
1925 				may_fallback = 0;
1926 
1927 				if (class == ATA_DEV_ATA)
1928 					class = ATA_DEV_ATAPI;
1929 				else
1930 					class = ATA_DEV_ATA;
1931 				goto retry;
1932 			}
1933 
1934 			/* Control reaches here iff the device aborted
1935 			 * both flavors of IDENTIFYs which happens
1936 			 * sometimes with phantom devices.
1937 			 */
1938 			ata_dev_dbg(dev,
1939 				    "both IDENTIFYs aborted, assuming NODEV\n");
1940 			return -ENOENT;
1941 		}
1942 
1943 		rc = -EIO;
1944 		reason = "I/O error";
1945 		goto err_out;
1946 	}
1947 
1948 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1949 		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1950 			    "class=%d may_fallback=%d tried_spinup=%d\n",
1951 			    class, may_fallback, tried_spinup);
1952 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1953 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1954 	}
1955 
1956 	/* Falling back doesn't make sense if ID data was read
1957 	 * successfully at least once.
1958 	 */
1959 	may_fallback = 0;
1960 
1961 	swap_buf_le16(id, ATA_ID_WORDS);
1962 
1963 	/* sanity check */
1964 	rc = -EINVAL;
1965 	reason = "device reports invalid type";
1966 
1967 	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1968 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1969 			goto err_out;
1970 		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1971 							ata_id_is_ata(id)) {
1972 			ata_dev_dbg(dev,
1973 				"host indicates ignore ATA devices, ignored\n");
1974 			return -ENOENT;
1975 		}
1976 	} else {
1977 		if (ata_id_is_ata(id))
1978 			goto err_out;
1979 	}
1980 
1981 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1982 		tried_spinup = 1;
1983 		/*
1984 		 * Drive powered-up in standby mode, and requires a specific
1985 		 * SET_FEATURES spin-up subcommand before it will accept
1986 		 * anything other than the original IDENTIFY command.
1987 		 */
1988 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1989 		if (err_mask && id[2] != 0x738c) {
1990 			rc = -EIO;
1991 			reason = "SPINUP failed";
1992 			goto err_out;
1993 		}
1994 		/*
1995 		 * If the drive initially returned incomplete IDENTIFY info,
1996 		 * we now must reissue the IDENTIFY command.
1997 		 */
1998 		if (id[2] == 0x37c8)
1999 			goto retry;
2000 	}
2001 
2002 	if ((flags & ATA_READID_POSTRESET) &&
2003 	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2004 		/*
2005 		 * The exact sequence expected by certain pre-ATA4 drives is:
2006 		 * SRST RESET
2007 		 * IDENTIFY (optional in early ATA)
2008 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2009 		 * anything else..
2010 		 * Some drives were very specific about that exact sequence.
2011 		 *
2012 		 * Note that ATA4 says lba is mandatory so the second check
2013 		 * should never trigger.
2014 		 */
2015 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2016 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2017 			if (err_mask) {
2018 				rc = -EIO;
2019 				reason = "INIT_DEV_PARAMS failed";
2020 				goto err_out;
2021 			}
2022 
2023 			/* current CHS translation info (id[53-58]) might be
2024 			 * changed. reread the identify device info.
2025 			 */
2026 			flags &= ~ATA_READID_POSTRESET;
2027 			goto retry;
2028 		}
2029 	}
2030 
2031 	*p_class = class;
2032 
2033 	return 0;
2034 
2035  err_out:
2036 	if (ata_msg_warn(ap))
2037 		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2038 			     reason, err_mask);
2039 	return rc;
2040 }
2041 
2042 /**
2043  *	ata_read_log_page - read a specific log page
2044  *	@dev: target device
2045  *	@log: log to read
2046  *	@page: page to read
2047  *	@buf: buffer to store read page
2048  *	@sectors: number of sectors to read
2049  *
2050  *	Read log page using READ_LOG_EXT command.
2051  *
2052  *	LOCKING:
2053  *	Kernel thread context (may sleep).
2054  *
2055  *	RETURNS:
2056  *	0 on success, AC_ERR_* mask otherwise.
2057  */
2058 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2059 			       u8 page, void *buf, unsigned int sectors)
2060 {
2061 	unsigned long ap_flags = dev->link->ap->flags;
2062 	struct ata_taskfile tf;
2063 	unsigned int err_mask;
2064 	bool dma = false;
2065 
2066 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2067 
2068 	/*
2069 	 * Return error without actually issuing the command on controllers
2070 	 * which e.g. lockup on a read log page.
2071 	 */
2072 	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2073 		return AC_ERR_DEV;
2074 
2075 retry:
2076 	ata_tf_init(dev, &tf);
2077 	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2078 	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2079 		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2080 		tf.protocol = ATA_PROT_DMA;
2081 		dma = true;
2082 	} else {
2083 		tf.command = ATA_CMD_READ_LOG_EXT;
2084 		tf.protocol = ATA_PROT_PIO;
2085 		dma = false;
2086 	}
2087 	tf.lbal = log;
2088 	tf.lbam = page;
2089 	tf.nsect = sectors;
2090 	tf.hob_nsect = sectors >> 8;
2091 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2092 
2093 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2094 				     buf, sectors * ATA_SECT_SIZE, 0);
2095 
2096 	if (err_mask && dma) {
2097 		dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2098 		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2099 		goto retry;
2100 	}
2101 
2102 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
2103 	return err_mask;
2104 }
2105 
2106 static bool ata_log_supported(struct ata_device *dev, u8 log)
2107 {
2108 	struct ata_port *ap = dev->link->ap;
2109 
2110 	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2111 		return false;
2112 	return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2113 }
2114 
2115 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2116 {
2117 	struct ata_port *ap = dev->link->ap;
2118 	unsigned int err, i;
2119 
2120 	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2121 		ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2122 		return false;
2123 	}
2124 
2125 	/*
2126 	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2127 	 * supported.
2128 	 */
2129 	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2130 				1);
2131 	if (err) {
2132 		ata_dev_info(dev,
2133 			     "failed to get Device Identify Log Emask 0x%x\n",
2134 			     err);
2135 		return false;
2136 	}
2137 
2138 	for (i = 0; i < ap->sector_buf[8]; i++) {
2139 		if (ap->sector_buf[9 + i] == page)
2140 			return true;
2141 	}
2142 
2143 	return false;
2144 }
2145 
2146 static int ata_do_link_spd_horkage(struct ata_device *dev)
2147 {
2148 	struct ata_link *plink = ata_dev_phys_link(dev);
2149 	u32 target, target_limit;
2150 
2151 	if (!sata_scr_valid(plink))
2152 		return 0;
2153 
2154 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2155 		target = 1;
2156 	else
2157 		return 0;
2158 
2159 	target_limit = (1 << target) - 1;
2160 
2161 	/* if already on stricter limit, no need to push further */
2162 	if (plink->sata_spd_limit <= target_limit)
2163 		return 0;
2164 
2165 	plink->sata_spd_limit = target_limit;
2166 
2167 	/* Request another EH round by returning -EAGAIN if link is
2168 	 * going faster than the target speed.  Forward progress is
2169 	 * guaranteed by setting sata_spd_limit to target_limit above.
2170 	 */
2171 	if (plink->sata_spd > target) {
2172 		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2173 			     sata_spd_string(target));
2174 		return -EAGAIN;
2175 	}
2176 	return 0;
2177 }
2178 
2179 static inline u8 ata_dev_knobble(struct ata_device *dev)
2180 {
2181 	struct ata_port *ap = dev->link->ap;
2182 
2183 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2184 		return 0;
2185 
2186 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2187 }
2188 
2189 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2190 {
2191 	struct ata_port *ap = dev->link->ap;
2192 	unsigned int err_mask;
2193 
2194 	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2195 		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2196 		return;
2197 	}
2198 	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2199 				     0, ap->sector_buf, 1);
2200 	if (err_mask) {
2201 		ata_dev_dbg(dev,
2202 			    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2203 			    err_mask);
2204 	} else {
2205 		u8 *cmds = dev->ncq_send_recv_cmds;
2206 
2207 		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2208 		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2209 
2210 		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2211 			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2212 			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2213 				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2214 		}
2215 	}
2216 }
2217 
2218 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2219 {
2220 	struct ata_port *ap = dev->link->ap;
2221 	unsigned int err_mask;
2222 
2223 	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2224 		ata_dev_warn(dev,
2225 			     "NCQ Send/Recv Log not supported\n");
2226 		return;
2227 	}
2228 	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2229 				     0, ap->sector_buf, 1);
2230 	if (err_mask) {
2231 		ata_dev_dbg(dev,
2232 			    "failed to get NCQ Non-Data Log Emask 0x%x\n",
2233 			    err_mask);
2234 	} else {
2235 		u8 *cmds = dev->ncq_non_data_cmds;
2236 
2237 		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2238 	}
2239 }
2240 
2241 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2242 {
2243 	struct ata_port *ap = dev->link->ap;
2244 	unsigned int err_mask;
2245 
2246 	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2247 		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2248 		return;
2249 	}
2250 
2251 	err_mask = ata_read_log_page(dev,
2252 				     ATA_LOG_IDENTIFY_DEVICE,
2253 				     ATA_LOG_SATA_SETTINGS,
2254 				     ap->sector_buf,
2255 				     1);
2256 	if (err_mask) {
2257 		ata_dev_dbg(dev,
2258 			    "failed to get Identify Device data, Emask 0x%x\n",
2259 			    err_mask);
2260 		return;
2261 	}
2262 
2263 	if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2264 		dev->flags |= ATA_DFLAG_NCQ_PRIO;
2265 	} else {
2266 		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2267 		ata_dev_dbg(dev, "SATA page does not support priority\n");
2268 	}
2269 
2270 }
2271 
2272 static int ata_dev_config_ncq(struct ata_device *dev,
2273 			       char *desc, size_t desc_sz)
2274 {
2275 	struct ata_port *ap = dev->link->ap;
2276 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2277 	unsigned int err_mask;
2278 	char *aa_desc = "";
2279 
2280 	if (!ata_id_has_ncq(dev->id)) {
2281 		desc[0] = '\0';
2282 		return 0;
2283 	}
2284 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2285 		snprintf(desc, desc_sz, "NCQ (not used)");
2286 		return 0;
2287 	}
2288 	if (ap->flags & ATA_FLAG_NCQ) {
2289 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2290 		dev->flags |= ATA_DFLAG_NCQ;
2291 	}
2292 
2293 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2294 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2295 		ata_id_has_fpdma_aa(dev->id)) {
2296 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2297 			SATA_FPDMA_AA);
2298 		if (err_mask) {
2299 			ata_dev_err(dev,
2300 				    "failed to enable AA (error_mask=0x%x)\n",
2301 				    err_mask);
2302 			if (err_mask != AC_ERR_DEV) {
2303 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2304 				return -EIO;
2305 			}
2306 		} else
2307 			aa_desc = ", AA";
2308 	}
2309 
2310 	if (hdepth >= ddepth)
2311 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2312 	else
2313 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2314 			ddepth, aa_desc);
2315 
2316 	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2317 		if (ata_id_has_ncq_send_and_recv(dev->id))
2318 			ata_dev_config_ncq_send_recv(dev);
2319 		if (ata_id_has_ncq_non_data(dev->id))
2320 			ata_dev_config_ncq_non_data(dev);
2321 		if (ata_id_has_ncq_prio(dev->id))
2322 			ata_dev_config_ncq_prio(dev);
2323 	}
2324 
2325 	return 0;
2326 }
2327 
2328 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2329 {
2330 	unsigned int err_mask;
2331 
2332 	if (!ata_id_has_sense_reporting(dev->id))
2333 		return;
2334 
2335 	if (ata_id_sense_reporting_enabled(dev->id))
2336 		return;
2337 
2338 	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2339 	if (err_mask) {
2340 		ata_dev_dbg(dev,
2341 			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2342 			    err_mask);
2343 	}
2344 }
2345 
2346 static void ata_dev_config_zac(struct ata_device *dev)
2347 {
2348 	struct ata_port *ap = dev->link->ap;
2349 	unsigned int err_mask;
2350 	u8 *identify_buf = ap->sector_buf;
2351 
2352 	dev->zac_zones_optimal_open = U32_MAX;
2353 	dev->zac_zones_optimal_nonseq = U32_MAX;
2354 	dev->zac_zones_max_open = U32_MAX;
2355 
2356 	/*
2357 	 * Always set the 'ZAC' flag for Host-managed devices.
2358 	 */
2359 	if (dev->class == ATA_DEV_ZAC)
2360 		dev->flags |= ATA_DFLAG_ZAC;
2361 	else if (ata_id_zoned_cap(dev->id) == 0x01)
2362 		/*
2363 		 * Check for host-aware devices.
2364 		 */
2365 		dev->flags |= ATA_DFLAG_ZAC;
2366 
2367 	if (!(dev->flags & ATA_DFLAG_ZAC))
2368 		return;
2369 
2370 	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2371 		ata_dev_warn(dev,
2372 			     "ATA Zoned Information Log not supported\n");
2373 		return;
2374 	}
2375 
2376 	/*
2377 	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2378 	 */
2379 	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2380 				     ATA_LOG_ZONED_INFORMATION,
2381 				     identify_buf, 1);
2382 	if (!err_mask) {
2383 		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2384 
2385 		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2386 		if ((zoned_cap >> 63))
2387 			dev->zac_zoned_cap = (zoned_cap & 1);
2388 		opt_open = get_unaligned_le64(&identify_buf[24]);
2389 		if ((opt_open >> 63))
2390 			dev->zac_zones_optimal_open = (u32)opt_open;
2391 		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2392 		if ((opt_nonseq >> 63))
2393 			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2394 		max_open = get_unaligned_le64(&identify_buf[40]);
2395 		if ((max_open >> 63))
2396 			dev->zac_zones_max_open = (u32)max_open;
2397 	}
2398 }
2399 
2400 static void ata_dev_config_trusted(struct ata_device *dev)
2401 {
2402 	struct ata_port *ap = dev->link->ap;
2403 	u64 trusted_cap;
2404 	unsigned int err;
2405 
2406 	if (!ata_id_has_trusted(dev->id))
2407 		return;
2408 
2409 	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2410 		ata_dev_warn(dev,
2411 			     "Security Log not supported\n");
2412 		return;
2413 	}
2414 
2415 	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2416 			ap->sector_buf, 1);
2417 	if (err) {
2418 		ata_dev_dbg(dev,
2419 			    "failed to read Security Log, Emask 0x%x\n", err);
2420 		return;
2421 	}
2422 
2423 	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2424 	if (!(trusted_cap & (1ULL << 63))) {
2425 		ata_dev_dbg(dev,
2426 			    "Trusted Computing capability qword not valid!\n");
2427 		return;
2428 	}
2429 
2430 	if (trusted_cap & (1 << 0))
2431 		dev->flags |= ATA_DFLAG_TRUSTED;
2432 }
2433 
2434 /**
2435  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2436  *	@dev: Target device to configure
2437  *
2438  *	Configure @dev according to @dev->id.  Generic and low-level
2439  *	driver specific fixups are also applied.
2440  *
2441  *	LOCKING:
2442  *	Kernel thread context (may sleep)
2443  *
2444  *	RETURNS:
2445  *	0 on success, -errno otherwise
2446  */
2447 int ata_dev_configure(struct ata_device *dev)
2448 {
2449 	struct ata_port *ap = dev->link->ap;
2450 	struct ata_eh_context *ehc = &dev->link->eh_context;
2451 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2452 	const u16 *id = dev->id;
2453 	unsigned long xfer_mask;
2454 	unsigned int err_mask;
2455 	char revbuf[7];		/* XYZ-99\0 */
2456 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2457 	char modelbuf[ATA_ID_PROD_LEN+1];
2458 	int rc;
2459 
2460 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2461 		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2462 		return 0;
2463 	}
2464 
2465 	if (ata_msg_probe(ap))
2466 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2467 
2468 	/* set horkage */
2469 	dev->horkage |= ata_dev_blacklisted(dev);
2470 	ata_force_horkage(dev);
2471 
2472 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2473 		ata_dev_info(dev, "unsupported device, disabling\n");
2474 		ata_dev_disable(dev);
2475 		return 0;
2476 	}
2477 
2478 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2479 	    dev->class == ATA_DEV_ATAPI) {
2480 		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2481 			     atapi_enabled ? "not supported with this driver"
2482 			     : "disabled");
2483 		ata_dev_disable(dev);
2484 		return 0;
2485 	}
2486 
2487 	rc = ata_do_link_spd_horkage(dev);
2488 	if (rc)
2489 		return rc;
2490 
2491 	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2492 	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2493 	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2494 		dev->horkage |= ATA_HORKAGE_NOLPM;
2495 
2496 	if (ap->flags & ATA_FLAG_NO_LPM)
2497 		dev->horkage |= ATA_HORKAGE_NOLPM;
2498 
2499 	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2500 		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2501 		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2502 	}
2503 
2504 	/* let ACPI work its magic */
2505 	rc = ata_acpi_on_devcfg(dev);
2506 	if (rc)
2507 		return rc;
2508 
2509 	/* massage HPA, do it early as it might change IDENTIFY data */
2510 	rc = ata_hpa_resize(dev);
2511 	if (rc)
2512 		return rc;
2513 
2514 	/* print device capabilities */
2515 	if (ata_msg_probe(ap))
2516 		ata_dev_dbg(dev,
2517 			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2518 			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2519 			    __func__,
2520 			    id[49], id[82], id[83], id[84],
2521 			    id[85], id[86], id[87], id[88]);
2522 
2523 	/* initialize to-be-configured parameters */
2524 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2525 	dev->max_sectors = 0;
2526 	dev->cdb_len = 0;
2527 	dev->n_sectors = 0;
2528 	dev->cylinders = 0;
2529 	dev->heads = 0;
2530 	dev->sectors = 0;
2531 	dev->multi_count = 0;
2532 
2533 	/*
2534 	 * common ATA, ATAPI feature tests
2535 	 */
2536 
2537 	/* find max transfer mode; for printk only */
2538 	xfer_mask = ata_id_xfermask(id);
2539 
2540 	if (ata_msg_probe(ap))
2541 		ata_dump_id(id);
2542 
2543 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2544 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2545 			sizeof(fwrevbuf));
2546 
2547 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2548 			sizeof(modelbuf));
2549 
2550 	/* ATA-specific feature tests */
2551 	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2552 		if (ata_id_is_cfa(id)) {
2553 			/* CPRM may make this media unusable */
2554 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2555 				ata_dev_warn(dev,
2556 	"supports DRM functions and may not be fully accessible\n");
2557 			snprintf(revbuf, 7, "CFA");
2558 		} else {
2559 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2560 			/* Warn the user if the device has TPM extensions */
2561 			if (ata_id_has_tpm(id))
2562 				ata_dev_warn(dev,
2563 	"supports DRM functions and may not be fully accessible\n");
2564 		}
2565 
2566 		dev->n_sectors = ata_id_n_sectors(id);
2567 
2568 		/* get current R/W Multiple count setting */
2569 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2570 			unsigned int max = dev->id[47] & 0xff;
2571 			unsigned int cnt = dev->id[59] & 0xff;
2572 			/* only recognize/allow powers of two here */
2573 			if (is_power_of_2(max) && is_power_of_2(cnt))
2574 				if (cnt <= max)
2575 					dev->multi_count = cnt;
2576 		}
2577 
2578 		if (ata_id_has_lba(id)) {
2579 			const char *lba_desc;
2580 			char ncq_desc[24];
2581 
2582 			lba_desc = "LBA";
2583 			dev->flags |= ATA_DFLAG_LBA;
2584 			if (ata_id_has_lba48(id)) {
2585 				dev->flags |= ATA_DFLAG_LBA48;
2586 				lba_desc = "LBA48";
2587 
2588 				if (dev->n_sectors >= (1UL << 28) &&
2589 				    ata_id_has_flush_ext(id))
2590 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2591 			}
2592 
2593 			/* config NCQ */
2594 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2595 			if (rc)
2596 				return rc;
2597 
2598 			/* print device info to dmesg */
2599 			if (ata_msg_drv(ap) && print_info) {
2600 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2601 					     revbuf, modelbuf, fwrevbuf,
2602 					     ata_mode_string(xfer_mask));
2603 				ata_dev_info(dev,
2604 					     "%llu sectors, multi %u: %s %s\n",
2605 					(unsigned long long)dev->n_sectors,
2606 					dev->multi_count, lba_desc, ncq_desc);
2607 			}
2608 		} else {
2609 			/* CHS */
2610 
2611 			/* Default translation */
2612 			dev->cylinders	= id[1];
2613 			dev->heads	= id[3];
2614 			dev->sectors	= id[6];
2615 
2616 			if (ata_id_current_chs_valid(id)) {
2617 				/* Current CHS translation is valid. */
2618 				dev->cylinders = id[54];
2619 				dev->heads     = id[55];
2620 				dev->sectors   = id[56];
2621 			}
2622 
2623 			/* print device info to dmesg */
2624 			if (ata_msg_drv(ap) && print_info) {
2625 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2626 					     revbuf,	modelbuf, fwrevbuf,
2627 					     ata_mode_string(xfer_mask));
2628 				ata_dev_info(dev,
2629 					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2630 					     (unsigned long long)dev->n_sectors,
2631 					     dev->multi_count, dev->cylinders,
2632 					     dev->heads, dev->sectors);
2633 			}
2634 		}
2635 
2636 		/* Check and mark DevSlp capability. Get DevSlp timing variables
2637 		 * from SATA Settings page of Identify Device Data Log.
2638 		 */
2639 		if (ata_id_has_devslp(dev->id)) {
2640 			u8 *sata_setting = ap->sector_buf;
2641 			int i, j;
2642 
2643 			dev->flags |= ATA_DFLAG_DEVSLP;
2644 			err_mask = ata_read_log_page(dev,
2645 						     ATA_LOG_IDENTIFY_DEVICE,
2646 						     ATA_LOG_SATA_SETTINGS,
2647 						     sata_setting,
2648 						     1);
2649 			if (err_mask)
2650 				ata_dev_dbg(dev,
2651 					    "failed to get Identify Device Data, Emask 0x%x\n",
2652 					    err_mask);
2653 			else
2654 				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2655 					j = ATA_LOG_DEVSLP_OFFSET + i;
2656 					dev->devslp_timing[i] = sata_setting[j];
2657 				}
2658 		}
2659 		ata_dev_config_sense_reporting(dev);
2660 		ata_dev_config_zac(dev);
2661 		ata_dev_config_trusted(dev);
2662 		dev->cdb_len = 32;
2663 	}
2664 
2665 	/* ATAPI-specific feature tests */
2666 	else if (dev->class == ATA_DEV_ATAPI) {
2667 		const char *cdb_intr_string = "";
2668 		const char *atapi_an_string = "";
2669 		const char *dma_dir_string = "";
2670 		u32 sntf;
2671 
2672 		rc = atapi_cdb_len(id);
2673 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2674 			if (ata_msg_warn(ap))
2675 				ata_dev_warn(dev, "unsupported CDB len\n");
2676 			rc = -EINVAL;
2677 			goto err_out_nosup;
2678 		}
2679 		dev->cdb_len = (unsigned int) rc;
2680 
2681 		/* Enable ATAPI AN if both the host and device have
2682 		 * the support.  If PMP is attached, SNTF is required
2683 		 * to enable ATAPI AN to discern between PHY status
2684 		 * changed notifications and ATAPI ANs.
2685 		 */
2686 		if (atapi_an &&
2687 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2688 		    (!sata_pmp_attached(ap) ||
2689 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2690 			/* issue SET feature command to turn this on */
2691 			err_mask = ata_dev_set_feature(dev,
2692 					SETFEATURES_SATA_ENABLE, SATA_AN);
2693 			if (err_mask)
2694 				ata_dev_err(dev,
2695 					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2696 					    err_mask);
2697 			else {
2698 				dev->flags |= ATA_DFLAG_AN;
2699 				atapi_an_string = ", ATAPI AN";
2700 			}
2701 		}
2702 
2703 		if (ata_id_cdb_intr(dev->id)) {
2704 			dev->flags |= ATA_DFLAG_CDB_INTR;
2705 			cdb_intr_string = ", CDB intr";
2706 		}
2707 
2708 		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2709 			dev->flags |= ATA_DFLAG_DMADIR;
2710 			dma_dir_string = ", DMADIR";
2711 		}
2712 
2713 		if (ata_id_has_da(dev->id)) {
2714 			dev->flags |= ATA_DFLAG_DA;
2715 			zpodd_init(dev);
2716 		}
2717 
2718 		/* print device info to dmesg */
2719 		if (ata_msg_drv(ap) && print_info)
2720 			ata_dev_info(dev,
2721 				     "ATAPI: %s, %s, max %s%s%s%s\n",
2722 				     modelbuf, fwrevbuf,
2723 				     ata_mode_string(xfer_mask),
2724 				     cdb_intr_string, atapi_an_string,
2725 				     dma_dir_string);
2726 	}
2727 
2728 	/* determine max_sectors */
2729 	dev->max_sectors = ATA_MAX_SECTORS;
2730 	if (dev->flags & ATA_DFLAG_LBA48)
2731 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2732 
2733 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2734 	   200 sectors */
2735 	if (ata_dev_knobble(dev)) {
2736 		if (ata_msg_drv(ap) && print_info)
2737 			ata_dev_info(dev, "applying bridge limits\n");
2738 		dev->udma_mask &= ATA_UDMA5;
2739 		dev->max_sectors = ATA_MAX_SECTORS;
2740 	}
2741 
2742 	if ((dev->class == ATA_DEV_ATAPI) &&
2743 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2744 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2745 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2746 	}
2747 
2748 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2749 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2750 					 dev->max_sectors);
2751 
2752 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2753 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2754 					 dev->max_sectors);
2755 
2756 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2757 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2758 
2759 	if (ap->ops->dev_config)
2760 		ap->ops->dev_config(dev);
2761 
2762 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2763 		/* Let the user know. We don't want to disallow opens for
2764 		   rescue purposes, or in case the vendor is just a blithering
2765 		   idiot. Do this after the dev_config call as some controllers
2766 		   with buggy firmware may want to avoid reporting false device
2767 		   bugs */
2768 
2769 		if (print_info) {
2770 			ata_dev_warn(dev,
2771 "Drive reports diagnostics failure. This may indicate a drive\n");
2772 			ata_dev_warn(dev,
2773 "fault or invalid emulation. Contact drive vendor for information.\n");
2774 		}
2775 	}
2776 
2777 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2778 		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2779 		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2780 	}
2781 
2782 	return 0;
2783 
2784 err_out_nosup:
2785 	if (ata_msg_probe(ap))
2786 		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2787 	return rc;
2788 }
2789 
2790 /**
2791  *	ata_cable_40wire	-	return 40 wire cable type
2792  *	@ap: port
2793  *
2794  *	Helper method for drivers which want to hardwire 40 wire cable
2795  *	detection.
2796  */
2797 
2798 int ata_cable_40wire(struct ata_port *ap)
2799 {
2800 	return ATA_CBL_PATA40;
2801 }
2802 
2803 /**
2804  *	ata_cable_80wire	-	return 80 wire cable type
2805  *	@ap: port
2806  *
2807  *	Helper method for drivers which want to hardwire 80 wire cable
2808  *	detection.
2809  */
2810 
2811 int ata_cable_80wire(struct ata_port *ap)
2812 {
2813 	return ATA_CBL_PATA80;
2814 }
2815 
2816 /**
2817  *	ata_cable_unknown	-	return unknown PATA cable.
2818  *	@ap: port
2819  *
2820  *	Helper method for drivers which have no PATA cable detection.
2821  */
2822 
2823 int ata_cable_unknown(struct ata_port *ap)
2824 {
2825 	return ATA_CBL_PATA_UNK;
2826 }
2827 
2828 /**
2829  *	ata_cable_ignore	-	return ignored PATA cable.
2830  *	@ap: port
2831  *
2832  *	Helper method for drivers which don't use cable type to limit
2833  *	transfer mode.
2834  */
2835 int ata_cable_ignore(struct ata_port *ap)
2836 {
2837 	return ATA_CBL_PATA_IGN;
2838 }
2839 
2840 /**
2841  *	ata_cable_sata	-	return SATA cable type
2842  *	@ap: port
2843  *
2844  *	Helper method for drivers which have SATA cables
2845  */
2846 
2847 int ata_cable_sata(struct ata_port *ap)
2848 {
2849 	return ATA_CBL_SATA;
2850 }
2851 
2852 /**
2853  *	ata_bus_probe - Reset and probe ATA bus
2854  *	@ap: Bus to probe
2855  *
2856  *	Master ATA bus probing function.  Initiates a hardware-dependent
2857  *	bus reset, then attempts to identify any devices found on
2858  *	the bus.
2859  *
2860  *	LOCKING:
2861  *	PCI/etc. bus probe sem.
2862  *
2863  *	RETURNS:
2864  *	Zero on success, negative errno otherwise.
2865  */
2866 
2867 int ata_bus_probe(struct ata_port *ap)
2868 {
2869 	unsigned int classes[ATA_MAX_DEVICES];
2870 	int tries[ATA_MAX_DEVICES];
2871 	int rc;
2872 	struct ata_device *dev;
2873 
2874 	ata_for_each_dev(dev, &ap->link, ALL)
2875 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2876 
2877  retry:
2878 	ata_for_each_dev(dev, &ap->link, ALL) {
2879 		/* If we issue an SRST then an ATA drive (not ATAPI)
2880 		 * may change configuration and be in PIO0 timing. If
2881 		 * we do a hard reset (or are coming from power on)
2882 		 * this is true for ATA or ATAPI. Until we've set a
2883 		 * suitable controller mode we should not touch the
2884 		 * bus as we may be talking too fast.
2885 		 */
2886 		dev->pio_mode = XFER_PIO_0;
2887 		dev->dma_mode = 0xff;
2888 
2889 		/* If the controller has a pio mode setup function
2890 		 * then use it to set the chipset to rights. Don't
2891 		 * touch the DMA setup as that will be dealt with when
2892 		 * configuring devices.
2893 		 */
2894 		if (ap->ops->set_piomode)
2895 			ap->ops->set_piomode(ap, dev);
2896 	}
2897 
2898 	/* reset and determine device classes */
2899 	ap->ops->phy_reset(ap);
2900 
2901 	ata_for_each_dev(dev, &ap->link, ALL) {
2902 		if (dev->class != ATA_DEV_UNKNOWN)
2903 			classes[dev->devno] = dev->class;
2904 		else
2905 			classes[dev->devno] = ATA_DEV_NONE;
2906 
2907 		dev->class = ATA_DEV_UNKNOWN;
2908 	}
2909 
2910 	/* read IDENTIFY page and configure devices. We have to do the identify
2911 	   specific sequence bass-ackwards so that PDIAG- is released by
2912 	   the slave device */
2913 
2914 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2915 		if (tries[dev->devno])
2916 			dev->class = classes[dev->devno];
2917 
2918 		if (!ata_dev_enabled(dev))
2919 			continue;
2920 
2921 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2922 				     dev->id);
2923 		if (rc)
2924 			goto fail;
2925 	}
2926 
2927 	/* Now ask for the cable type as PDIAG- should have been released */
2928 	if (ap->ops->cable_detect)
2929 		ap->cbl = ap->ops->cable_detect(ap);
2930 
2931 	/* We may have SATA bridge glue hiding here irrespective of
2932 	 * the reported cable types and sensed types.  When SATA
2933 	 * drives indicate we have a bridge, we don't know which end
2934 	 * of the link the bridge is which is a problem.
2935 	 */
2936 	ata_for_each_dev(dev, &ap->link, ENABLED)
2937 		if (ata_id_is_sata(dev->id))
2938 			ap->cbl = ATA_CBL_SATA;
2939 
2940 	/* After the identify sequence we can now set up the devices. We do
2941 	   this in the normal order so that the user doesn't get confused */
2942 
2943 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2944 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2945 		rc = ata_dev_configure(dev);
2946 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2947 		if (rc)
2948 			goto fail;
2949 	}
2950 
2951 	/* configure transfer mode */
2952 	rc = ata_set_mode(&ap->link, &dev);
2953 	if (rc)
2954 		goto fail;
2955 
2956 	ata_for_each_dev(dev, &ap->link, ENABLED)
2957 		return 0;
2958 
2959 	return -ENODEV;
2960 
2961  fail:
2962 	tries[dev->devno]--;
2963 
2964 	switch (rc) {
2965 	case -EINVAL:
2966 		/* eeek, something went very wrong, give up */
2967 		tries[dev->devno] = 0;
2968 		break;
2969 
2970 	case -ENODEV:
2971 		/* give it just one more chance */
2972 		tries[dev->devno] = min(tries[dev->devno], 1);
2973 		/* fall through */
2974 	case -EIO:
2975 		if (tries[dev->devno] == 1) {
2976 			/* This is the last chance, better to slow
2977 			 * down than lose it.
2978 			 */
2979 			sata_down_spd_limit(&ap->link, 0);
2980 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2981 		}
2982 	}
2983 
2984 	if (!tries[dev->devno])
2985 		ata_dev_disable(dev);
2986 
2987 	goto retry;
2988 }
2989 
2990 /**
2991  *	sata_print_link_status - Print SATA link status
2992  *	@link: SATA link to printk link status about
2993  *
2994  *	This function prints link speed and status of a SATA link.
2995  *
2996  *	LOCKING:
2997  *	None.
2998  */
2999 static void sata_print_link_status(struct ata_link *link)
3000 {
3001 	u32 sstatus, scontrol, tmp;
3002 
3003 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
3004 		return;
3005 	sata_scr_read(link, SCR_CONTROL, &scontrol);
3006 
3007 	if (ata_phys_link_online(link)) {
3008 		tmp = (sstatus >> 4) & 0xf;
3009 		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3010 			      sata_spd_string(tmp), sstatus, scontrol);
3011 	} else {
3012 		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3013 			      sstatus, scontrol);
3014 	}
3015 }
3016 
3017 /**
3018  *	ata_dev_pair		-	return other device on cable
3019  *	@adev: device
3020  *
3021  *	Obtain the other device on the same cable, or if none is
3022  *	present NULL is returned
3023  */
3024 
3025 struct ata_device *ata_dev_pair(struct ata_device *adev)
3026 {
3027 	struct ata_link *link = adev->link;
3028 	struct ata_device *pair = &link->device[1 - adev->devno];
3029 	if (!ata_dev_enabled(pair))
3030 		return NULL;
3031 	return pair;
3032 }
3033 
3034 /**
3035  *	sata_down_spd_limit - adjust SATA spd limit downward
3036  *	@link: Link to adjust SATA spd limit for
3037  *	@spd_limit: Additional limit
3038  *
3039  *	Adjust SATA spd limit of @link downward.  Note that this
3040  *	function only adjusts the limit.  The change must be applied
3041  *	using sata_set_spd().
3042  *
3043  *	If @spd_limit is non-zero, the speed is limited to equal to or
3044  *	lower than @spd_limit if such speed is supported.  If
3045  *	@spd_limit is slower than any supported speed, only the lowest
3046  *	supported speed is allowed.
3047  *
3048  *	LOCKING:
3049  *	Inherited from caller.
3050  *
3051  *	RETURNS:
3052  *	0 on success, negative errno on failure
3053  */
3054 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3055 {
3056 	u32 sstatus, spd, mask;
3057 	int rc, bit;
3058 
3059 	if (!sata_scr_valid(link))
3060 		return -EOPNOTSUPP;
3061 
3062 	/* If SCR can be read, use it to determine the current SPD.
3063 	 * If not, use cached value in link->sata_spd.
3064 	 */
3065 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3066 	if (rc == 0 && ata_sstatus_online(sstatus))
3067 		spd = (sstatus >> 4) & 0xf;
3068 	else
3069 		spd = link->sata_spd;
3070 
3071 	mask = link->sata_spd_limit;
3072 	if (mask <= 1)
3073 		return -EINVAL;
3074 
3075 	/* unconditionally mask off the highest bit */
3076 	bit = fls(mask) - 1;
3077 	mask &= ~(1 << bit);
3078 
3079 	/*
3080 	 * Mask off all speeds higher than or equal to the current one.  At
3081 	 * this point, if current SPD is not available and we previously
3082 	 * recorded the link speed from SStatus, the driver has already
3083 	 * masked off the highest bit so mask should already be 1 or 0.
3084 	 * Otherwise, we should not force 1.5Gbps on a link where we have
3085 	 * not previously recorded speed from SStatus.  Just return in this
3086 	 * case.
3087 	 */
3088 	if (spd > 1)
3089 		mask &= (1 << (spd - 1)) - 1;
3090 	else
3091 		return -EINVAL;
3092 
3093 	/* were we already at the bottom? */
3094 	if (!mask)
3095 		return -EINVAL;
3096 
3097 	if (spd_limit) {
3098 		if (mask & ((1 << spd_limit) - 1))
3099 			mask &= (1 << spd_limit) - 1;
3100 		else {
3101 			bit = ffs(mask) - 1;
3102 			mask = 1 << bit;
3103 		}
3104 	}
3105 
3106 	link->sata_spd_limit = mask;
3107 
3108 	ata_link_warn(link, "limiting SATA link speed to %s\n",
3109 		      sata_spd_string(fls(mask)));
3110 
3111 	return 0;
3112 }
3113 
3114 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3115 {
3116 	struct ata_link *host_link = &link->ap->link;
3117 	u32 limit, target, spd;
3118 
3119 	limit = link->sata_spd_limit;
3120 
3121 	/* Don't configure downstream link faster than upstream link.
3122 	 * It doesn't speed up anything and some PMPs choke on such
3123 	 * configuration.
3124 	 */
3125 	if (!ata_is_host_link(link) && host_link->sata_spd)
3126 		limit &= (1 << host_link->sata_spd) - 1;
3127 
3128 	if (limit == UINT_MAX)
3129 		target = 0;
3130 	else
3131 		target = fls(limit);
3132 
3133 	spd = (*scontrol >> 4) & 0xf;
3134 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3135 
3136 	return spd != target;
3137 }
3138 
3139 /**
3140  *	sata_set_spd_needed - is SATA spd configuration needed
3141  *	@link: Link in question
3142  *
3143  *	Test whether the spd limit in SControl matches
3144  *	@link->sata_spd_limit.  This function is used to determine
3145  *	whether hardreset is necessary to apply SATA spd
3146  *	configuration.
3147  *
3148  *	LOCKING:
3149  *	Inherited from caller.
3150  *
3151  *	RETURNS:
3152  *	1 if SATA spd configuration is needed, 0 otherwise.
3153  */
3154 static int sata_set_spd_needed(struct ata_link *link)
3155 {
3156 	u32 scontrol;
3157 
3158 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3159 		return 1;
3160 
3161 	return __sata_set_spd_needed(link, &scontrol);
3162 }
3163 
3164 /**
3165  *	sata_set_spd - set SATA spd according to spd limit
3166  *	@link: Link to set SATA spd for
3167  *
3168  *	Set SATA spd of @link according to sata_spd_limit.
3169  *
3170  *	LOCKING:
3171  *	Inherited from caller.
3172  *
3173  *	RETURNS:
3174  *	0 if spd doesn't need to be changed, 1 if spd has been
3175  *	changed.  Negative errno if SCR registers are inaccessible.
3176  */
3177 int sata_set_spd(struct ata_link *link)
3178 {
3179 	u32 scontrol;
3180 	int rc;
3181 
3182 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3183 		return rc;
3184 
3185 	if (!__sata_set_spd_needed(link, &scontrol))
3186 		return 0;
3187 
3188 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3189 		return rc;
3190 
3191 	return 1;
3192 }
3193 
3194 /*
3195  * This mode timing computation functionality is ported over from
3196  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3197  */
3198 /*
3199  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3200  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3201  * for UDMA6, which is currently supported only by Maxtor drives.
3202  *
3203  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3204  */
3205 
3206 static const struct ata_timing ata_timing[] = {
3207 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
3208 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
3209 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
3210 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
3211 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
3212 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
3213 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
3214 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
3215 
3216 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
3217 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
3218 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
3219 
3220 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
3221 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
3222 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
3223 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
3224 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
3225 
3226 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
3227 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
3228 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
3229 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
3230 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
3231 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
3232 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
3233 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
3234 
3235 	{ 0xFF }
3236 };
3237 
3238 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3239 #define EZ(v, unit)		((v)?ENOUGH(((v) * 1000), unit):0)
3240 
3241 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3242 {
3243 	q->setup	= EZ(t->setup,       T);
3244 	q->act8b	= EZ(t->act8b,       T);
3245 	q->rec8b	= EZ(t->rec8b,       T);
3246 	q->cyc8b	= EZ(t->cyc8b,       T);
3247 	q->active	= EZ(t->active,      T);
3248 	q->recover	= EZ(t->recover,     T);
3249 	q->dmack_hold	= EZ(t->dmack_hold,  T);
3250 	q->cycle	= EZ(t->cycle,       T);
3251 	q->udma		= EZ(t->udma,       UT);
3252 }
3253 
3254 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3255 		      struct ata_timing *m, unsigned int what)
3256 {
3257 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3258 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3259 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3260 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3261 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3262 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3263 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3264 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3265 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3266 }
3267 
3268 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3269 {
3270 	const struct ata_timing *t = ata_timing;
3271 
3272 	while (xfer_mode > t->mode)
3273 		t++;
3274 
3275 	if (xfer_mode == t->mode)
3276 		return t;
3277 
3278 	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3279 			__func__, xfer_mode);
3280 
3281 	return NULL;
3282 }
3283 
3284 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3285 		       struct ata_timing *t, int T, int UT)
3286 {
3287 	const u16 *id = adev->id;
3288 	const struct ata_timing *s;
3289 	struct ata_timing p;
3290 
3291 	/*
3292 	 * Find the mode.
3293 	 */
3294 
3295 	if (!(s = ata_timing_find_mode(speed)))
3296 		return -EINVAL;
3297 
3298 	memcpy(t, s, sizeof(*s));
3299 
3300 	/*
3301 	 * If the drive is an EIDE drive, it can tell us it needs extended
3302 	 * PIO/MW_DMA cycle timing.
3303 	 */
3304 
3305 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3306 		memset(&p, 0, sizeof(p));
3307 
3308 		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3309 			if (speed <= XFER_PIO_2)
3310 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3311 			else if ((speed <= XFER_PIO_4) ||
3312 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3313 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3314 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3315 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3316 
3317 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3318 	}
3319 
3320 	/*
3321 	 * Convert the timing to bus clock counts.
3322 	 */
3323 
3324 	ata_timing_quantize(t, t, T, UT);
3325 
3326 	/*
3327 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3328 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3329 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3330 	 */
3331 
3332 	if (speed > XFER_PIO_6) {
3333 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3334 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3335 	}
3336 
3337 	/*
3338 	 * Lengthen active & recovery time so that cycle time is correct.
3339 	 */
3340 
3341 	if (t->act8b + t->rec8b < t->cyc8b) {
3342 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3343 		t->rec8b = t->cyc8b - t->act8b;
3344 	}
3345 
3346 	if (t->active + t->recover < t->cycle) {
3347 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3348 		t->recover = t->cycle - t->active;
3349 	}
3350 
3351 	/* In a few cases quantisation may produce enough errors to
3352 	   leave t->cycle too low for the sum of active and recovery
3353 	   if so we must correct this */
3354 	if (t->active + t->recover > t->cycle)
3355 		t->cycle = t->active + t->recover;
3356 
3357 	return 0;
3358 }
3359 
3360 /**
3361  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3362  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3363  *	@cycle: cycle duration in ns
3364  *
3365  *	Return matching xfer mode for @cycle.  The returned mode is of
3366  *	the transfer type specified by @xfer_shift.  If @cycle is too
3367  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3368  *	than the fastest known mode, the fasted mode is returned.
3369  *
3370  *	LOCKING:
3371  *	None.
3372  *
3373  *	RETURNS:
3374  *	Matching xfer_mode, 0xff if no match found.
3375  */
3376 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3377 {
3378 	u8 base_mode = 0xff, last_mode = 0xff;
3379 	const struct ata_xfer_ent *ent;
3380 	const struct ata_timing *t;
3381 
3382 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3383 		if (ent->shift == xfer_shift)
3384 			base_mode = ent->base;
3385 
3386 	for (t = ata_timing_find_mode(base_mode);
3387 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3388 		unsigned short this_cycle;
3389 
3390 		switch (xfer_shift) {
3391 		case ATA_SHIFT_PIO:
3392 		case ATA_SHIFT_MWDMA:
3393 			this_cycle = t->cycle;
3394 			break;
3395 		case ATA_SHIFT_UDMA:
3396 			this_cycle = t->udma;
3397 			break;
3398 		default:
3399 			return 0xff;
3400 		}
3401 
3402 		if (cycle > this_cycle)
3403 			break;
3404 
3405 		last_mode = t->mode;
3406 	}
3407 
3408 	return last_mode;
3409 }
3410 
3411 /**
3412  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3413  *	@dev: Device to adjust xfer masks
3414  *	@sel: ATA_DNXFER_* selector
3415  *
3416  *	Adjust xfer masks of @dev downward.  Note that this function
3417  *	does not apply the change.  Invoking ata_set_mode() afterwards
3418  *	will apply the limit.
3419  *
3420  *	LOCKING:
3421  *	Inherited from caller.
3422  *
3423  *	RETURNS:
3424  *	0 on success, negative errno on failure
3425  */
3426 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3427 {
3428 	char buf[32];
3429 	unsigned long orig_mask, xfer_mask;
3430 	unsigned long pio_mask, mwdma_mask, udma_mask;
3431 	int quiet, highbit;
3432 
3433 	quiet = !!(sel & ATA_DNXFER_QUIET);
3434 	sel &= ~ATA_DNXFER_QUIET;
3435 
3436 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3437 						  dev->mwdma_mask,
3438 						  dev->udma_mask);
3439 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3440 
3441 	switch (sel) {
3442 	case ATA_DNXFER_PIO:
3443 		highbit = fls(pio_mask) - 1;
3444 		pio_mask &= ~(1 << highbit);
3445 		break;
3446 
3447 	case ATA_DNXFER_DMA:
3448 		if (udma_mask) {
3449 			highbit = fls(udma_mask) - 1;
3450 			udma_mask &= ~(1 << highbit);
3451 			if (!udma_mask)
3452 				return -ENOENT;
3453 		} else if (mwdma_mask) {
3454 			highbit = fls(mwdma_mask) - 1;
3455 			mwdma_mask &= ~(1 << highbit);
3456 			if (!mwdma_mask)
3457 				return -ENOENT;
3458 		}
3459 		break;
3460 
3461 	case ATA_DNXFER_40C:
3462 		udma_mask &= ATA_UDMA_MASK_40C;
3463 		break;
3464 
3465 	case ATA_DNXFER_FORCE_PIO0:
3466 		pio_mask &= 1;
3467 		/* fall through */
3468 	case ATA_DNXFER_FORCE_PIO:
3469 		mwdma_mask = 0;
3470 		udma_mask = 0;
3471 		break;
3472 
3473 	default:
3474 		BUG();
3475 	}
3476 
3477 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3478 
3479 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3480 		return -ENOENT;
3481 
3482 	if (!quiet) {
3483 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3484 			snprintf(buf, sizeof(buf), "%s:%s",
3485 				 ata_mode_string(xfer_mask),
3486 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3487 		else
3488 			snprintf(buf, sizeof(buf), "%s",
3489 				 ata_mode_string(xfer_mask));
3490 
3491 		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3492 	}
3493 
3494 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3495 			    &dev->udma_mask);
3496 
3497 	return 0;
3498 }
3499 
3500 static int ata_dev_set_mode(struct ata_device *dev)
3501 {
3502 	struct ata_port *ap = dev->link->ap;
3503 	struct ata_eh_context *ehc = &dev->link->eh_context;
3504 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3505 	const char *dev_err_whine = "";
3506 	int ign_dev_err = 0;
3507 	unsigned int err_mask = 0;
3508 	int rc;
3509 
3510 	dev->flags &= ~ATA_DFLAG_PIO;
3511 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3512 		dev->flags |= ATA_DFLAG_PIO;
3513 
3514 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3515 		dev_err_whine = " (SET_XFERMODE skipped)";
3516 	else {
3517 		if (nosetxfer)
3518 			ata_dev_warn(dev,
3519 				     "NOSETXFER but PATA detected - can't "
3520 				     "skip SETXFER, might malfunction\n");
3521 		err_mask = ata_dev_set_xfermode(dev);
3522 	}
3523 
3524 	if (err_mask & ~AC_ERR_DEV)
3525 		goto fail;
3526 
3527 	/* revalidate */
3528 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3529 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3530 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3531 	if (rc)
3532 		return rc;
3533 
3534 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3535 		/* Old CFA may refuse this command, which is just fine */
3536 		if (ata_id_is_cfa(dev->id))
3537 			ign_dev_err = 1;
3538 		/* Catch several broken garbage emulations plus some pre
3539 		   ATA devices */
3540 		if (ata_id_major_version(dev->id) == 0 &&
3541 					dev->pio_mode <= XFER_PIO_2)
3542 			ign_dev_err = 1;
3543 		/* Some very old devices and some bad newer ones fail
3544 		   any kind of SET_XFERMODE request but support PIO0-2
3545 		   timings and no IORDY */
3546 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3547 			ign_dev_err = 1;
3548 	}
3549 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3550 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3551 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3552 	    dev->dma_mode == XFER_MW_DMA_0 &&
3553 	    (dev->id[63] >> 8) & 1)
3554 		ign_dev_err = 1;
3555 
3556 	/* if the device is actually configured correctly, ignore dev err */
3557 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3558 		ign_dev_err = 1;
3559 
3560 	if (err_mask & AC_ERR_DEV) {
3561 		if (!ign_dev_err)
3562 			goto fail;
3563 		else
3564 			dev_err_whine = " (device error ignored)";
3565 	}
3566 
3567 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3568 		dev->xfer_shift, (int)dev->xfer_mode);
3569 
3570 	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3571 	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
3572 		ata_dev_info(dev, "configured for %s%s\n",
3573 			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3574 			     dev_err_whine);
3575 
3576 	return 0;
3577 
3578  fail:
3579 	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3580 	return -EIO;
3581 }
3582 
3583 /**
3584  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3585  *	@link: link on which timings will be programmed
3586  *	@r_failed_dev: out parameter for failed device
3587  *
3588  *	Standard implementation of the function used to tune and set
3589  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3590  *	ata_dev_set_mode() fails, pointer to the failing device is
3591  *	returned in @r_failed_dev.
3592  *
3593  *	LOCKING:
3594  *	PCI/etc. bus probe sem.
3595  *
3596  *	RETURNS:
3597  *	0 on success, negative errno otherwise
3598  */
3599 
3600 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3601 {
3602 	struct ata_port *ap = link->ap;
3603 	struct ata_device *dev;
3604 	int rc = 0, used_dma = 0, found = 0;
3605 
3606 	/* step 1: calculate xfer_mask */
3607 	ata_for_each_dev(dev, link, ENABLED) {
3608 		unsigned long pio_mask, dma_mask;
3609 		unsigned int mode_mask;
3610 
3611 		mode_mask = ATA_DMA_MASK_ATA;
3612 		if (dev->class == ATA_DEV_ATAPI)
3613 			mode_mask = ATA_DMA_MASK_ATAPI;
3614 		else if (ata_id_is_cfa(dev->id))
3615 			mode_mask = ATA_DMA_MASK_CFA;
3616 
3617 		ata_dev_xfermask(dev);
3618 		ata_force_xfermask(dev);
3619 
3620 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3621 
3622 		if (libata_dma_mask & mode_mask)
3623 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3624 						     dev->udma_mask);
3625 		else
3626 			dma_mask = 0;
3627 
3628 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3629 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3630 
3631 		found = 1;
3632 		if (ata_dma_enabled(dev))
3633 			used_dma = 1;
3634 	}
3635 	if (!found)
3636 		goto out;
3637 
3638 	/* step 2: always set host PIO timings */
3639 	ata_for_each_dev(dev, link, ENABLED) {
3640 		if (dev->pio_mode == 0xff) {
3641 			ata_dev_warn(dev, "no PIO support\n");
3642 			rc = -EINVAL;
3643 			goto out;
3644 		}
3645 
3646 		dev->xfer_mode = dev->pio_mode;
3647 		dev->xfer_shift = ATA_SHIFT_PIO;
3648 		if (ap->ops->set_piomode)
3649 			ap->ops->set_piomode(ap, dev);
3650 	}
3651 
3652 	/* step 3: set host DMA timings */
3653 	ata_for_each_dev(dev, link, ENABLED) {
3654 		if (!ata_dma_enabled(dev))
3655 			continue;
3656 
3657 		dev->xfer_mode = dev->dma_mode;
3658 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3659 		if (ap->ops->set_dmamode)
3660 			ap->ops->set_dmamode(ap, dev);
3661 	}
3662 
3663 	/* step 4: update devices' xfer mode */
3664 	ata_for_each_dev(dev, link, ENABLED) {
3665 		rc = ata_dev_set_mode(dev);
3666 		if (rc)
3667 			goto out;
3668 	}
3669 
3670 	/* Record simplex status. If we selected DMA then the other
3671 	 * host channels are not permitted to do so.
3672 	 */
3673 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3674 		ap->host->simplex_claimed = ap;
3675 
3676  out:
3677 	if (rc)
3678 		*r_failed_dev = dev;
3679 	return rc;
3680 }
3681 
3682 /**
3683  *	ata_wait_ready - wait for link to become ready
3684  *	@link: link to be waited on
3685  *	@deadline: deadline jiffies for the operation
3686  *	@check_ready: callback to check link readiness
3687  *
3688  *	Wait for @link to become ready.  @check_ready should return
3689  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3690  *	link doesn't seem to be occupied, other errno for other error
3691  *	conditions.
3692  *
3693  *	Transient -ENODEV conditions are allowed for
3694  *	ATA_TMOUT_FF_WAIT.
3695  *
3696  *	LOCKING:
3697  *	EH context.
3698  *
3699  *	RETURNS:
3700  *	0 if @link is ready before @deadline; otherwise, -errno.
3701  */
3702 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3703 		   int (*check_ready)(struct ata_link *link))
3704 {
3705 	unsigned long start = jiffies;
3706 	unsigned long nodev_deadline;
3707 	int warned = 0;
3708 
3709 	/* choose which 0xff timeout to use, read comment in libata.h */
3710 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3711 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3712 	else
3713 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3714 
3715 	/* Slave readiness can't be tested separately from master.  On
3716 	 * M/S emulation configuration, this function should be called
3717 	 * only on the master and it will handle both master and slave.
3718 	 */
3719 	WARN_ON(link == link->ap->slave_link);
3720 
3721 	if (time_after(nodev_deadline, deadline))
3722 		nodev_deadline = deadline;
3723 
3724 	while (1) {
3725 		unsigned long now = jiffies;
3726 		int ready, tmp;
3727 
3728 		ready = tmp = check_ready(link);
3729 		if (ready > 0)
3730 			return 0;
3731 
3732 		/*
3733 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3734 		 * is online.  Also, some SATA devices take a long
3735 		 * time to clear 0xff after reset.  Wait for
3736 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3737 		 * offline.
3738 		 *
3739 		 * Note that some PATA controllers (pata_ali) explode
3740 		 * if status register is read more than once when
3741 		 * there's no device attached.
3742 		 */
3743 		if (ready == -ENODEV) {
3744 			if (ata_link_online(link))
3745 				ready = 0;
3746 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3747 				 !ata_link_offline(link) &&
3748 				 time_before(now, nodev_deadline))
3749 				ready = 0;
3750 		}
3751 
3752 		if (ready)
3753 			return ready;
3754 		if (time_after(now, deadline))
3755 			return -EBUSY;
3756 
3757 		if (!warned && time_after(now, start + 5 * HZ) &&
3758 		    (deadline - now > 3 * HZ)) {
3759 			ata_link_warn(link,
3760 				"link is slow to respond, please be patient "
3761 				"(ready=%d)\n", tmp);
3762 			warned = 1;
3763 		}
3764 
3765 		ata_msleep(link->ap, 50);
3766 	}
3767 }
3768 
3769 /**
3770  *	ata_wait_after_reset - wait for link to become ready after reset
3771  *	@link: link to be waited on
3772  *	@deadline: deadline jiffies for the operation
3773  *	@check_ready: callback to check link readiness
3774  *
3775  *	Wait for @link to become ready after reset.
3776  *
3777  *	LOCKING:
3778  *	EH context.
3779  *
3780  *	RETURNS:
3781  *	0 if @link is ready before @deadline; otherwise, -errno.
3782  */
3783 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3784 				int (*check_ready)(struct ata_link *link))
3785 {
3786 	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3787 
3788 	return ata_wait_ready(link, deadline, check_ready);
3789 }
3790 
3791 /**
3792  *	sata_link_debounce - debounce SATA phy status
3793  *	@link: ATA link to debounce SATA phy status for
3794  *	@params: timing parameters { interval, duration, timeout } in msec
3795  *	@deadline: deadline jiffies for the operation
3796  *
3797  *	Make sure SStatus of @link reaches stable state, determined by
3798  *	holding the same value where DET is not 1 for @duration polled
3799  *	every @interval, before @timeout.  Timeout constraints the
3800  *	beginning of the stable state.  Because DET gets stuck at 1 on
3801  *	some controllers after hot unplugging, this functions waits
3802  *	until timeout then returns 0 if DET is stable at 1.
3803  *
3804  *	@timeout is further limited by @deadline.  The sooner of the
3805  *	two is used.
3806  *
3807  *	LOCKING:
3808  *	Kernel thread context (may sleep)
3809  *
3810  *	RETURNS:
3811  *	0 on success, -errno on failure.
3812  */
3813 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3814 		       unsigned long deadline)
3815 {
3816 	unsigned long interval = params[0];
3817 	unsigned long duration = params[1];
3818 	unsigned long last_jiffies, t;
3819 	u32 last, cur;
3820 	int rc;
3821 
3822 	t = ata_deadline(jiffies, params[2]);
3823 	if (time_before(t, deadline))
3824 		deadline = t;
3825 
3826 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3827 		return rc;
3828 	cur &= 0xf;
3829 
3830 	last = cur;
3831 	last_jiffies = jiffies;
3832 
3833 	while (1) {
3834 		ata_msleep(link->ap, interval);
3835 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3836 			return rc;
3837 		cur &= 0xf;
3838 
3839 		/* DET stable? */
3840 		if (cur == last) {
3841 			if (cur == 1 && time_before(jiffies, deadline))
3842 				continue;
3843 			if (time_after(jiffies,
3844 				       ata_deadline(last_jiffies, duration)))
3845 				return 0;
3846 			continue;
3847 		}
3848 
3849 		/* unstable, start over */
3850 		last = cur;
3851 		last_jiffies = jiffies;
3852 
3853 		/* Check deadline.  If debouncing failed, return
3854 		 * -EPIPE to tell upper layer to lower link speed.
3855 		 */
3856 		if (time_after(jiffies, deadline))
3857 			return -EPIPE;
3858 	}
3859 }
3860 
3861 /**
3862  *	sata_link_resume - resume SATA link
3863  *	@link: ATA link to resume SATA
3864  *	@params: timing parameters { interval, duration, timeout } in msec
3865  *	@deadline: deadline jiffies for the operation
3866  *
3867  *	Resume SATA phy @link and debounce it.
3868  *
3869  *	LOCKING:
3870  *	Kernel thread context (may sleep)
3871  *
3872  *	RETURNS:
3873  *	0 on success, -errno on failure.
3874  */
3875 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3876 		     unsigned long deadline)
3877 {
3878 	int tries = ATA_LINK_RESUME_TRIES;
3879 	u32 scontrol, serror;
3880 	int rc;
3881 
3882 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3883 		return rc;
3884 
3885 	/*
3886 	 * Writes to SControl sometimes get ignored under certain
3887 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3888 	 * cleared.
3889 	 */
3890 	do {
3891 		scontrol = (scontrol & 0x0f0) | 0x300;
3892 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3893 			return rc;
3894 		/*
3895 		 * Some PHYs react badly if SStatus is pounded
3896 		 * immediately after resuming.  Delay 200ms before
3897 		 * debouncing.
3898 		 */
3899 		if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3900 			ata_msleep(link->ap, 200);
3901 
3902 		/* is SControl restored correctly? */
3903 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3904 			return rc;
3905 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3906 
3907 	if ((scontrol & 0xf0f) != 0x300) {
3908 		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3909 			     scontrol);
3910 		return 0;
3911 	}
3912 
3913 	if (tries < ATA_LINK_RESUME_TRIES)
3914 		ata_link_warn(link, "link resume succeeded after %d retries\n",
3915 			      ATA_LINK_RESUME_TRIES - tries);
3916 
3917 	if ((rc = sata_link_debounce(link, params, deadline)))
3918 		return rc;
3919 
3920 	/* clear SError, some PHYs require this even for SRST to work */
3921 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3922 		rc = sata_scr_write(link, SCR_ERROR, serror);
3923 
3924 	return rc != -EINVAL ? rc : 0;
3925 }
3926 
3927 /**
3928  *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3929  *	@link: ATA link to manipulate SControl for
3930  *	@policy: LPM policy to configure
3931  *	@spm_wakeup: initiate LPM transition to active state
3932  *
3933  *	Manipulate the IPM field of the SControl register of @link
3934  *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3935  *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3936  *	the link.  This function also clears PHYRDY_CHG before
3937  *	returning.
3938  *
3939  *	LOCKING:
3940  *	EH context.
3941  *
3942  *	RETURNS:
3943  *	0 on success, -errno otherwise.
3944  */
3945 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3946 		      bool spm_wakeup)
3947 {
3948 	struct ata_eh_context *ehc = &link->eh_context;
3949 	bool woken_up = false;
3950 	u32 scontrol;
3951 	int rc;
3952 
3953 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3954 	if (rc)
3955 		return rc;
3956 
3957 	switch (policy) {
3958 	case ATA_LPM_MAX_POWER:
3959 		/* disable all LPM transitions */
3960 		scontrol |= (0x7 << 8);
3961 		/* initiate transition to active state */
3962 		if (spm_wakeup) {
3963 			scontrol |= (0x4 << 12);
3964 			woken_up = true;
3965 		}
3966 		break;
3967 	case ATA_LPM_MED_POWER:
3968 		/* allow LPM to PARTIAL */
3969 		scontrol &= ~(0x1 << 8);
3970 		scontrol |= (0x6 << 8);
3971 		break;
3972 	case ATA_LPM_MED_POWER_WITH_DIPM:
3973 	case ATA_LPM_MIN_POWER_WITH_PARTIAL:
3974 	case ATA_LPM_MIN_POWER:
3975 		if (ata_link_nr_enabled(link) > 0)
3976 			/* no restrictions on LPM transitions */
3977 			scontrol &= ~(0x7 << 8);
3978 		else {
3979 			/* empty port, power off */
3980 			scontrol &= ~0xf;
3981 			scontrol |= (0x1 << 2);
3982 		}
3983 		break;
3984 	default:
3985 		WARN_ON(1);
3986 	}
3987 
3988 	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3989 	if (rc)
3990 		return rc;
3991 
3992 	/* give the link time to transit out of LPM state */
3993 	if (woken_up)
3994 		msleep(10);
3995 
3996 	/* clear PHYRDY_CHG from SError */
3997 	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3998 	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3999 }
4000 
4001 /**
4002  *	ata_std_prereset - prepare for reset
4003  *	@link: ATA link to be reset
4004  *	@deadline: deadline jiffies for the operation
4005  *
4006  *	@link is about to be reset.  Initialize it.  Failure from
4007  *	prereset makes libata abort whole reset sequence and give up
4008  *	that port, so prereset should be best-effort.  It does its
4009  *	best to prepare for reset sequence but if things go wrong, it
4010  *	should just whine, not fail.
4011  *
4012  *	LOCKING:
4013  *	Kernel thread context (may sleep)
4014  *
4015  *	RETURNS:
4016  *	0 on success, -errno otherwise.
4017  */
4018 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
4019 {
4020 	struct ata_port *ap = link->ap;
4021 	struct ata_eh_context *ehc = &link->eh_context;
4022 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
4023 	int rc;
4024 
4025 	/* if we're about to do hardreset, nothing more to do */
4026 	if (ehc->i.action & ATA_EH_HARDRESET)
4027 		return 0;
4028 
4029 	/* if SATA, resume link */
4030 	if (ap->flags & ATA_FLAG_SATA) {
4031 		rc = sata_link_resume(link, timing, deadline);
4032 		/* whine about phy resume failure but proceed */
4033 		if (rc && rc != -EOPNOTSUPP)
4034 			ata_link_warn(link,
4035 				      "failed to resume link for reset (errno=%d)\n",
4036 				      rc);
4037 	}
4038 
4039 	/* no point in trying softreset on offline link */
4040 	if (ata_phys_link_offline(link))
4041 		ehc->i.action &= ~ATA_EH_SOFTRESET;
4042 
4043 	return 0;
4044 }
4045 
4046 /**
4047  *	sata_link_hardreset - reset link via SATA phy reset
4048  *	@link: link to reset
4049  *	@timing: timing parameters { interval, duration, timeout } in msec
4050  *	@deadline: deadline jiffies for the operation
4051  *	@online: optional out parameter indicating link onlineness
4052  *	@check_ready: optional callback to check link readiness
4053  *
4054  *	SATA phy-reset @link using DET bits of SControl register.
4055  *	After hardreset, link readiness is waited upon using
4056  *	ata_wait_ready() if @check_ready is specified.  LLDs are
4057  *	allowed to not specify @check_ready and wait itself after this
4058  *	function returns.  Device classification is LLD's
4059  *	responsibility.
4060  *
4061  *	*@online is set to one iff reset succeeded and @link is online
4062  *	after reset.
4063  *
4064  *	LOCKING:
4065  *	Kernel thread context (may sleep)
4066  *
4067  *	RETURNS:
4068  *	0 on success, -errno otherwise.
4069  */
4070 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4071 			unsigned long deadline,
4072 			bool *online, int (*check_ready)(struct ata_link *))
4073 {
4074 	u32 scontrol;
4075 	int rc;
4076 
4077 	DPRINTK("ENTER\n");
4078 
4079 	if (online)
4080 		*online = false;
4081 
4082 	if (sata_set_spd_needed(link)) {
4083 		/* SATA spec says nothing about how to reconfigure
4084 		 * spd.  To be on the safe side, turn off phy during
4085 		 * reconfiguration.  This works for at least ICH7 AHCI
4086 		 * and Sil3124.
4087 		 */
4088 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4089 			goto out;
4090 
4091 		scontrol = (scontrol & 0x0f0) | 0x304;
4092 
4093 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4094 			goto out;
4095 
4096 		sata_set_spd(link);
4097 	}
4098 
4099 	/* issue phy wake/reset */
4100 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4101 		goto out;
4102 
4103 	scontrol = (scontrol & 0x0f0) | 0x301;
4104 
4105 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4106 		goto out;
4107 
4108 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4109 	 * 10.4.2 says at least 1 ms.
4110 	 */
4111 	ata_msleep(link->ap, 1);
4112 
4113 	/* bring link back */
4114 	rc = sata_link_resume(link, timing, deadline);
4115 	if (rc)
4116 		goto out;
4117 	/* if link is offline nothing more to do */
4118 	if (ata_phys_link_offline(link))
4119 		goto out;
4120 
4121 	/* Link is online.  From this point, -ENODEV too is an error. */
4122 	if (online)
4123 		*online = true;
4124 
4125 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4126 		/* If PMP is supported, we have to do follow-up SRST.
4127 		 * Some PMPs don't send D2H Reg FIS after hardreset if
4128 		 * the first port is empty.  Wait only for
4129 		 * ATA_TMOUT_PMP_SRST_WAIT.
4130 		 */
4131 		if (check_ready) {
4132 			unsigned long pmp_deadline;
4133 
4134 			pmp_deadline = ata_deadline(jiffies,
4135 						    ATA_TMOUT_PMP_SRST_WAIT);
4136 			if (time_after(pmp_deadline, deadline))
4137 				pmp_deadline = deadline;
4138 			ata_wait_ready(link, pmp_deadline, check_ready);
4139 		}
4140 		rc = -EAGAIN;
4141 		goto out;
4142 	}
4143 
4144 	rc = 0;
4145 	if (check_ready)
4146 		rc = ata_wait_ready(link, deadline, check_ready);
4147  out:
4148 	if (rc && rc != -EAGAIN) {
4149 		/* online is set iff link is online && reset succeeded */
4150 		if (online)
4151 			*online = false;
4152 		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4153 	}
4154 	DPRINTK("EXIT, rc=%d\n", rc);
4155 	return rc;
4156 }
4157 
4158 /**
4159  *	sata_std_hardreset - COMRESET w/o waiting or classification
4160  *	@link: link to reset
4161  *	@class: resulting class of attached device
4162  *	@deadline: deadline jiffies for the operation
4163  *
4164  *	Standard SATA COMRESET w/o waiting or classification.
4165  *
4166  *	LOCKING:
4167  *	Kernel thread context (may sleep)
4168  *
4169  *	RETURNS:
4170  *	0 if link offline, -EAGAIN if link online, -errno on errors.
4171  */
4172 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4173 		       unsigned long deadline)
4174 {
4175 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4176 	bool online;
4177 	int rc;
4178 
4179 	/* do hardreset */
4180 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4181 	return online ? -EAGAIN : rc;
4182 }
4183 
4184 /**
4185  *	ata_std_postreset - standard postreset callback
4186  *	@link: the target ata_link
4187  *	@classes: classes of attached devices
4188  *
4189  *	This function is invoked after a successful reset.  Note that
4190  *	the device might have been reset more than once using
4191  *	different reset methods before postreset is invoked.
4192  *
4193  *	LOCKING:
4194  *	Kernel thread context (may sleep)
4195  */
4196 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4197 {
4198 	u32 serror;
4199 
4200 	DPRINTK("ENTER\n");
4201 
4202 	/* reset complete, clear SError */
4203 	if (!sata_scr_read(link, SCR_ERROR, &serror))
4204 		sata_scr_write(link, SCR_ERROR, serror);
4205 
4206 	/* print link status */
4207 	sata_print_link_status(link);
4208 
4209 	DPRINTK("EXIT\n");
4210 }
4211 
4212 /**
4213  *	ata_dev_same_device - Determine whether new ID matches configured device
4214  *	@dev: device to compare against
4215  *	@new_class: class of the new device
4216  *	@new_id: IDENTIFY page of the new device
4217  *
4218  *	Compare @new_class and @new_id against @dev and determine
4219  *	whether @dev is the device indicated by @new_class and
4220  *	@new_id.
4221  *
4222  *	LOCKING:
4223  *	None.
4224  *
4225  *	RETURNS:
4226  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4227  */
4228 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4229 			       const u16 *new_id)
4230 {
4231 	const u16 *old_id = dev->id;
4232 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4233 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4234 
4235 	if (dev->class != new_class) {
4236 		ata_dev_info(dev, "class mismatch %d != %d\n",
4237 			     dev->class, new_class);
4238 		return 0;
4239 	}
4240 
4241 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4242 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4243 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4244 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4245 
4246 	if (strcmp(model[0], model[1])) {
4247 		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4248 			     model[0], model[1]);
4249 		return 0;
4250 	}
4251 
4252 	if (strcmp(serial[0], serial[1])) {
4253 		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4254 			     serial[0], serial[1]);
4255 		return 0;
4256 	}
4257 
4258 	return 1;
4259 }
4260 
4261 /**
4262  *	ata_dev_reread_id - Re-read IDENTIFY data
4263  *	@dev: target ATA device
4264  *	@readid_flags: read ID flags
4265  *
4266  *	Re-read IDENTIFY page and make sure @dev is still attached to
4267  *	the port.
4268  *
4269  *	LOCKING:
4270  *	Kernel thread context (may sleep)
4271  *
4272  *	RETURNS:
4273  *	0 on success, negative errno otherwise
4274  */
4275 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4276 {
4277 	unsigned int class = dev->class;
4278 	u16 *id = (void *)dev->link->ap->sector_buf;
4279 	int rc;
4280 
4281 	/* read ID data */
4282 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4283 	if (rc)
4284 		return rc;
4285 
4286 	/* is the device still there? */
4287 	if (!ata_dev_same_device(dev, class, id))
4288 		return -ENODEV;
4289 
4290 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4291 	return 0;
4292 }
4293 
4294 /**
4295  *	ata_dev_revalidate - Revalidate ATA device
4296  *	@dev: device to revalidate
4297  *	@new_class: new class code
4298  *	@readid_flags: read ID flags
4299  *
4300  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4301  *	port and reconfigure it according to the new IDENTIFY page.
4302  *
4303  *	LOCKING:
4304  *	Kernel thread context (may sleep)
4305  *
4306  *	RETURNS:
4307  *	0 on success, negative errno otherwise
4308  */
4309 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4310 		       unsigned int readid_flags)
4311 {
4312 	u64 n_sectors = dev->n_sectors;
4313 	u64 n_native_sectors = dev->n_native_sectors;
4314 	int rc;
4315 
4316 	if (!ata_dev_enabled(dev))
4317 		return -ENODEV;
4318 
4319 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4320 	if (ata_class_enabled(new_class) &&
4321 	    new_class != ATA_DEV_ATA &&
4322 	    new_class != ATA_DEV_ATAPI &&
4323 	    new_class != ATA_DEV_ZAC &&
4324 	    new_class != ATA_DEV_SEMB) {
4325 		ata_dev_info(dev, "class mismatch %u != %u\n",
4326 			     dev->class, new_class);
4327 		rc = -ENODEV;
4328 		goto fail;
4329 	}
4330 
4331 	/* re-read ID */
4332 	rc = ata_dev_reread_id(dev, readid_flags);
4333 	if (rc)
4334 		goto fail;
4335 
4336 	/* configure device according to the new ID */
4337 	rc = ata_dev_configure(dev);
4338 	if (rc)
4339 		goto fail;
4340 
4341 	/* verify n_sectors hasn't changed */
4342 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4343 	    dev->n_sectors == n_sectors)
4344 		return 0;
4345 
4346 	/* n_sectors has changed */
4347 	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4348 		     (unsigned long long)n_sectors,
4349 		     (unsigned long long)dev->n_sectors);
4350 
4351 	/*
4352 	 * Something could have caused HPA to be unlocked
4353 	 * involuntarily.  If n_native_sectors hasn't changed and the
4354 	 * new size matches it, keep the device.
4355 	 */
4356 	if (dev->n_native_sectors == n_native_sectors &&
4357 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4358 		ata_dev_warn(dev,
4359 			     "new n_sectors matches native, probably "
4360 			     "late HPA unlock, n_sectors updated\n");
4361 		/* use the larger n_sectors */
4362 		return 0;
4363 	}
4364 
4365 	/*
4366 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4367 	 * unlocking HPA in those cases.
4368 	 *
4369 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4370 	 */
4371 	if (dev->n_native_sectors == n_native_sectors &&
4372 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4373 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4374 		ata_dev_warn(dev,
4375 			     "old n_sectors matches native, probably "
4376 			     "late HPA lock, will try to unlock HPA\n");
4377 		/* try unlocking HPA */
4378 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4379 		rc = -EIO;
4380 	} else
4381 		rc = -ENODEV;
4382 
4383 	/* restore original n_[native_]sectors and fail */
4384 	dev->n_native_sectors = n_native_sectors;
4385 	dev->n_sectors = n_sectors;
4386  fail:
4387 	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4388 	return rc;
4389 }
4390 
4391 struct ata_blacklist_entry {
4392 	const char *model_num;
4393 	const char *model_rev;
4394 	unsigned long horkage;
4395 };
4396 
4397 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4398 	/* Devices with DMA related problems under Linux */
4399 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4400 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4401 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4402 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4403 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4404 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4405 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4406 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4407 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4408 	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4409 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4410 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4411 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4412 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4413 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4414 	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4415 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4416 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4417 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4418 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4419 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4420 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4421 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4422 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4423 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4424 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4425 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4426 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4427 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4428 	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
4429 	/* Odd clown on sil3726/4726 PMPs */
4430 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4431 
4432 	/* Weird ATAPI devices */
4433 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4434 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4435 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4436 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4437 
4438 	/*
4439 	 * Causes silent data corruption with higher max sects.
4440 	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4441 	 */
4442 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4443 
4444 	/*
4445 	 * These devices time out with higher max sects.
4446 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4447 	 */
4448 	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4449 	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4450 
4451 	/* Devices we expect to fail diagnostics */
4452 
4453 	/* Devices where NCQ should be avoided */
4454 	/* NCQ is slow */
4455 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4456 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4457 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4458 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4459 	/* NCQ is broken */
4460 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4461 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4462 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4463 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4464 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4465 
4466 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4467 	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4468 						ATA_HORKAGE_FIRMWARE_WARN },
4469 
4470 	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4471 						ATA_HORKAGE_FIRMWARE_WARN },
4472 
4473 	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4474 						ATA_HORKAGE_FIRMWARE_WARN },
4475 
4476 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4477 						ATA_HORKAGE_FIRMWARE_WARN },
4478 
4479 	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4480 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4481 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4482 	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4483 
4484 	/* Blacklist entries taken from Silicon Image 3124/3132
4485 	   Windows driver .inf file - also several Linux problem reports */
4486 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4487 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4488 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4489 
4490 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4491 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4492 
4493 	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
4494 	   SD7SN6S256G and SD8SN8U256G */
4495 	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
4496 
4497 	/* devices which puke on READ_NATIVE_MAX */
4498 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4499 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4500 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4501 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4502 
4503 	/* this one allows HPA unlocking but fails IOs on the area */
4504 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4505 
4506 	/* Devices which report 1 sector over size HPA */
4507 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4508 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4509 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4510 
4511 	/* Devices which get the IVB wrong */
4512 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4513 	/* Maybe we should just blacklist TSSTcorp... */
4514 	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4515 
4516 	/* Devices that do not need bridging limits applied */
4517 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4518 	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4519 
4520 	/* Devices which aren't very happy with higher link speeds */
4521 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4522 	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4523 
4524 	/*
4525 	 * Devices which choke on SETXFER.  Applies only if both the
4526 	 * device and controller are SATA.
4527 	 */
4528 	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4529 	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4530 	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4531 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4532 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4533 
4534 	/* Crucial BX100 SSD 500GB has broken LPM support */
4535 	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
4536 
4537 	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4538 	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4539 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4540 						ATA_HORKAGE_NOLPM, },
4541 	/* 512GB MX100 with newer firmware has only LPM issues */
4542 	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
4543 						ATA_HORKAGE_NOLPM, },
4544 
4545 	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4546 	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4547 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4548 						ATA_HORKAGE_NOLPM, },
4549 	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4550 						ATA_HORKAGE_ZERO_AFTER_TRIM |
4551 						ATA_HORKAGE_NOLPM, },
4552 
4553 	/* These specific Samsung models/firmware-revs do not handle LPM well */
4554 	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4555 	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4556 	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
4557 	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
4558 
4559 	/* devices that don't properly handle queued TRIM commands */
4560 	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4561 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4562 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4563 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4564 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4565 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4566 	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4567 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4568 	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4569 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4570 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4571 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4572 	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4573 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4574 	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4575 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4576 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4577 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4578 
4579 	/* devices that don't properly handle TRIM commands */
4580 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4581 
4582 	/*
4583 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4584 	 * (Return Zero After Trim) flags in the ATA Command Set are
4585 	 * unreliable in the sense that they only define what happens if
4586 	 * the device successfully executed the DSM TRIM command. TRIM
4587 	 * is only advisory, however, and the device is free to silently
4588 	 * ignore all or parts of the request.
4589 	 *
4590 	 * Whitelist drives that are known to reliably return zeroes
4591 	 * after TRIM.
4592 	 */
4593 
4594 	/*
4595 	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4596 	 * that model before whitelisting all other intel SSDs.
4597 	 */
4598 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4599 
4600 	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4601 	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4602 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4603 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4604 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4605 	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4606 	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4607 	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4608 
4609 	/*
4610 	 * Some WD SATA-I drives spin up and down erratically when the link
4611 	 * is put into the slumber mode.  We don't have full list of the
4612 	 * affected devices.  Disable LPM if the device matches one of the
4613 	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4614 	 * lost too.
4615 	 *
4616 	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4617 	 */
4618 	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4619 	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4620 	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4621 	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4622 	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4623 	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4624 	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4625 
4626 	/* End Marker */
4627 	{ }
4628 };
4629 
4630 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4631 {
4632 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4633 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4634 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4635 
4636 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4637 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4638 
4639 	while (ad->model_num) {
4640 		if (glob_match(ad->model_num, model_num)) {
4641 			if (ad->model_rev == NULL)
4642 				return ad->horkage;
4643 			if (glob_match(ad->model_rev, model_rev))
4644 				return ad->horkage;
4645 		}
4646 		ad++;
4647 	}
4648 	return 0;
4649 }
4650 
4651 static int ata_dma_blacklisted(const struct ata_device *dev)
4652 {
4653 	/* We don't support polling DMA.
4654 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4655 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4656 	 */
4657 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4658 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4659 		return 1;
4660 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4661 }
4662 
4663 /**
4664  *	ata_is_40wire		-	check drive side detection
4665  *	@dev: device
4666  *
4667  *	Perform drive side detection decoding, allowing for device vendors
4668  *	who can't follow the documentation.
4669  */
4670 
4671 static int ata_is_40wire(struct ata_device *dev)
4672 {
4673 	if (dev->horkage & ATA_HORKAGE_IVB)
4674 		return ata_drive_40wire_relaxed(dev->id);
4675 	return ata_drive_40wire(dev->id);
4676 }
4677 
4678 /**
4679  *	cable_is_40wire		-	40/80/SATA decider
4680  *	@ap: port to consider
4681  *
4682  *	This function encapsulates the policy for speed management
4683  *	in one place. At the moment we don't cache the result but
4684  *	there is a good case for setting ap->cbl to the result when
4685  *	we are called with unknown cables (and figuring out if it
4686  *	impacts hotplug at all).
4687  *
4688  *	Return 1 if the cable appears to be 40 wire.
4689  */
4690 
4691 static int cable_is_40wire(struct ata_port *ap)
4692 {
4693 	struct ata_link *link;
4694 	struct ata_device *dev;
4695 
4696 	/* If the controller thinks we are 40 wire, we are. */
4697 	if (ap->cbl == ATA_CBL_PATA40)
4698 		return 1;
4699 
4700 	/* If the controller thinks we are 80 wire, we are. */
4701 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4702 		return 0;
4703 
4704 	/* If the system is known to be 40 wire short cable (eg
4705 	 * laptop), then we allow 80 wire modes even if the drive
4706 	 * isn't sure.
4707 	 */
4708 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4709 		return 0;
4710 
4711 	/* If the controller doesn't know, we scan.
4712 	 *
4713 	 * Note: We look for all 40 wire detects at this point.  Any
4714 	 *       80 wire detect is taken to be 80 wire cable because
4715 	 * - in many setups only the one drive (slave if present) will
4716 	 *   give a valid detect
4717 	 * - if you have a non detect capable drive you don't want it
4718 	 *   to colour the choice
4719 	 */
4720 	ata_for_each_link(link, ap, EDGE) {
4721 		ata_for_each_dev(dev, link, ENABLED) {
4722 			if (!ata_is_40wire(dev))
4723 				return 0;
4724 		}
4725 	}
4726 	return 1;
4727 }
4728 
4729 /**
4730  *	ata_dev_xfermask - Compute supported xfermask of the given device
4731  *	@dev: Device to compute xfermask for
4732  *
4733  *	Compute supported xfermask of @dev and store it in
4734  *	dev->*_mask.  This function is responsible for applying all
4735  *	known limits including host controller limits, device
4736  *	blacklist, etc...
4737  *
4738  *	LOCKING:
4739  *	None.
4740  */
4741 static void ata_dev_xfermask(struct ata_device *dev)
4742 {
4743 	struct ata_link *link = dev->link;
4744 	struct ata_port *ap = link->ap;
4745 	struct ata_host *host = ap->host;
4746 	unsigned long xfer_mask;
4747 
4748 	/* controller modes available */
4749 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4750 				      ap->mwdma_mask, ap->udma_mask);
4751 
4752 	/* drive modes available */
4753 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4754 				       dev->mwdma_mask, dev->udma_mask);
4755 	xfer_mask &= ata_id_xfermask(dev->id);
4756 
4757 	/*
4758 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4759 	 *	cable
4760 	 */
4761 	if (ata_dev_pair(dev)) {
4762 		/* No PIO5 or PIO6 */
4763 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4764 		/* No MWDMA3 or MWDMA 4 */
4765 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4766 	}
4767 
4768 	if (ata_dma_blacklisted(dev)) {
4769 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4770 		ata_dev_warn(dev,
4771 			     "device is on DMA blacklist, disabling DMA\n");
4772 	}
4773 
4774 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4775 	    host->simplex_claimed && host->simplex_claimed != ap) {
4776 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4777 		ata_dev_warn(dev,
4778 			     "simplex DMA is claimed by other device, disabling DMA\n");
4779 	}
4780 
4781 	if (ap->flags & ATA_FLAG_NO_IORDY)
4782 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4783 
4784 	if (ap->ops->mode_filter)
4785 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4786 
4787 	/* Apply cable rule here.  Don't apply it early because when
4788 	 * we handle hot plug the cable type can itself change.
4789 	 * Check this last so that we know if the transfer rate was
4790 	 * solely limited by the cable.
4791 	 * Unknown or 80 wire cables reported host side are checked
4792 	 * drive side as well. Cases where we know a 40wire cable
4793 	 * is used safely for 80 are not checked here.
4794 	 */
4795 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4796 		/* UDMA/44 or higher would be available */
4797 		if (cable_is_40wire(ap)) {
4798 			ata_dev_warn(dev,
4799 				     "limited to UDMA/33 due to 40-wire cable\n");
4800 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4801 		}
4802 
4803 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4804 			    &dev->mwdma_mask, &dev->udma_mask);
4805 }
4806 
4807 /**
4808  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4809  *	@dev: Device to which command will be sent
4810  *
4811  *	Issue SET FEATURES - XFER MODE command to device @dev
4812  *	on port @ap.
4813  *
4814  *	LOCKING:
4815  *	PCI/etc. bus probe sem.
4816  *
4817  *	RETURNS:
4818  *	0 on success, AC_ERR_* mask otherwise.
4819  */
4820 
4821 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4822 {
4823 	struct ata_taskfile tf;
4824 	unsigned int err_mask;
4825 
4826 	/* set up set-features taskfile */
4827 	DPRINTK("set features - xfer mode\n");
4828 
4829 	/* Some controllers and ATAPI devices show flaky interrupt
4830 	 * behavior after setting xfer mode.  Use polling instead.
4831 	 */
4832 	ata_tf_init(dev, &tf);
4833 	tf.command = ATA_CMD_SET_FEATURES;
4834 	tf.feature = SETFEATURES_XFER;
4835 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4836 	tf.protocol = ATA_PROT_NODATA;
4837 	/* If we are using IORDY we must send the mode setting command */
4838 	if (ata_pio_need_iordy(dev))
4839 		tf.nsect = dev->xfer_mode;
4840 	/* If the device has IORDY and the controller does not - turn it off */
4841  	else if (ata_id_has_iordy(dev->id))
4842 		tf.nsect = 0x01;
4843 	else /* In the ancient relic department - skip all of this */
4844 		return 0;
4845 
4846 	/* On some disks, this command causes spin-up, so we need longer timeout */
4847 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4848 
4849 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4850 	return err_mask;
4851 }
4852 
4853 /**
4854  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4855  *	@dev: Device to which command will be sent
4856  *	@enable: Whether to enable or disable the feature
4857  *	@feature: The sector count represents the feature to set
4858  *
4859  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4860  *	on port @ap with sector count
4861  *
4862  *	LOCKING:
4863  *	PCI/etc. bus probe sem.
4864  *
4865  *	RETURNS:
4866  *	0 on success, AC_ERR_* mask otherwise.
4867  */
4868 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4869 {
4870 	struct ata_taskfile tf;
4871 	unsigned int err_mask;
4872 	unsigned long timeout = 0;
4873 
4874 	/* set up set-features taskfile */
4875 	DPRINTK("set features - SATA features\n");
4876 
4877 	ata_tf_init(dev, &tf);
4878 	tf.command = ATA_CMD_SET_FEATURES;
4879 	tf.feature = enable;
4880 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4881 	tf.protocol = ATA_PROT_NODATA;
4882 	tf.nsect = feature;
4883 
4884 	if (enable == SETFEATURES_SPINUP)
4885 		timeout = ata_probe_timeout ?
4886 			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4887 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4888 
4889 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4890 	return err_mask;
4891 }
4892 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4893 
4894 /**
4895  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4896  *	@dev: Device to which command will be sent
4897  *	@heads: Number of heads (taskfile parameter)
4898  *	@sectors: Number of sectors (taskfile parameter)
4899  *
4900  *	LOCKING:
4901  *	Kernel thread context (may sleep)
4902  *
4903  *	RETURNS:
4904  *	0 on success, AC_ERR_* mask otherwise.
4905  */
4906 static unsigned int ata_dev_init_params(struct ata_device *dev,
4907 					u16 heads, u16 sectors)
4908 {
4909 	struct ata_taskfile tf;
4910 	unsigned int err_mask;
4911 
4912 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4913 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4914 		return AC_ERR_INVALID;
4915 
4916 	/* set up init dev params taskfile */
4917 	DPRINTK("init dev params \n");
4918 
4919 	ata_tf_init(dev, &tf);
4920 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4921 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4922 	tf.protocol = ATA_PROT_NODATA;
4923 	tf.nsect = sectors;
4924 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4925 
4926 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4927 	/* A clean abort indicates an original or just out of spec drive
4928 	   and we should continue as we issue the setup based on the
4929 	   drive reported working geometry */
4930 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4931 		err_mask = 0;
4932 
4933 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4934 	return err_mask;
4935 }
4936 
4937 /**
4938  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4939  *	@qc: Metadata associated with taskfile to check
4940  *
4941  *	Allow low-level driver to filter ATA PACKET commands, returning
4942  *	a status indicating whether or not it is OK to use DMA for the
4943  *	supplied PACKET command.
4944  *
4945  *	LOCKING:
4946  *	spin_lock_irqsave(host lock)
4947  *
4948  *	RETURNS: 0 when ATAPI DMA can be used
4949  *               nonzero otherwise
4950  */
4951 int atapi_check_dma(struct ata_queued_cmd *qc)
4952 {
4953 	struct ata_port *ap = qc->ap;
4954 
4955 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4956 	 * few ATAPI devices choke on such DMA requests.
4957 	 */
4958 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4959 	    unlikely(qc->nbytes & 15))
4960 		return 1;
4961 
4962 	if (ap->ops->check_atapi_dma)
4963 		return ap->ops->check_atapi_dma(qc);
4964 
4965 	return 0;
4966 }
4967 
4968 /**
4969  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4970  *	@qc: ATA command in question
4971  *
4972  *	Non-NCQ commands cannot run with any other command, NCQ or
4973  *	not.  As upper layer only knows the queue depth, we are
4974  *	responsible for maintaining exclusion.  This function checks
4975  *	whether a new command @qc can be issued.
4976  *
4977  *	LOCKING:
4978  *	spin_lock_irqsave(host lock)
4979  *
4980  *	RETURNS:
4981  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4982  */
4983 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4984 {
4985 	struct ata_link *link = qc->dev->link;
4986 
4987 	if (ata_is_ncq(qc->tf.protocol)) {
4988 		if (!ata_tag_valid(link->active_tag))
4989 			return 0;
4990 	} else {
4991 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4992 			return 0;
4993 	}
4994 
4995 	return ATA_DEFER_LINK;
4996 }
4997 
4998 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4999 
5000 /**
5001  *	ata_sg_init - Associate command with scatter-gather table.
5002  *	@qc: Command to be associated
5003  *	@sg: Scatter-gather table.
5004  *	@n_elem: Number of elements in s/g table.
5005  *
5006  *	Initialize the data-related elements of queued_cmd @qc
5007  *	to point to a scatter-gather table @sg, containing @n_elem
5008  *	elements.
5009  *
5010  *	LOCKING:
5011  *	spin_lock_irqsave(host lock)
5012  */
5013 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
5014 		 unsigned int n_elem)
5015 {
5016 	qc->sg = sg;
5017 	qc->n_elem = n_elem;
5018 	qc->cursg = qc->sg;
5019 }
5020 
5021 #ifdef CONFIG_HAS_DMA
5022 
5023 /**
5024  *	ata_sg_clean - Unmap DMA memory associated with command
5025  *	@qc: Command containing DMA memory to be released
5026  *
5027  *	Unmap all mapped DMA memory associated with this command.
5028  *
5029  *	LOCKING:
5030  *	spin_lock_irqsave(host lock)
5031  */
5032 static void ata_sg_clean(struct ata_queued_cmd *qc)
5033 {
5034 	struct ata_port *ap = qc->ap;
5035 	struct scatterlist *sg = qc->sg;
5036 	int dir = qc->dma_dir;
5037 
5038 	WARN_ON_ONCE(sg == NULL);
5039 
5040 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
5041 
5042 	if (qc->n_elem)
5043 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5044 
5045 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5046 	qc->sg = NULL;
5047 }
5048 
5049 /**
5050  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5051  *	@qc: Command with scatter-gather table to be mapped.
5052  *
5053  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
5054  *
5055  *	LOCKING:
5056  *	spin_lock_irqsave(host lock)
5057  *
5058  *	RETURNS:
5059  *	Zero on success, negative on error.
5060  *
5061  */
5062 static int ata_sg_setup(struct ata_queued_cmd *qc)
5063 {
5064 	struct ata_port *ap = qc->ap;
5065 	unsigned int n_elem;
5066 
5067 	VPRINTK("ENTER, ata%u\n", ap->print_id);
5068 
5069 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5070 	if (n_elem < 1)
5071 		return -1;
5072 
5073 	VPRINTK("%d sg elements mapped\n", n_elem);
5074 	qc->orig_n_elem = qc->n_elem;
5075 	qc->n_elem = n_elem;
5076 	qc->flags |= ATA_QCFLAG_DMAMAP;
5077 
5078 	return 0;
5079 }
5080 
5081 #else /* !CONFIG_HAS_DMA */
5082 
5083 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
5084 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5085 
5086 #endif /* !CONFIG_HAS_DMA */
5087 
5088 /**
5089  *	swap_buf_le16 - swap halves of 16-bit words in place
5090  *	@buf:  Buffer to swap
5091  *	@buf_words:  Number of 16-bit words in buffer.
5092  *
5093  *	Swap halves of 16-bit words if needed to convert from
5094  *	little-endian byte order to native cpu byte order, or
5095  *	vice-versa.
5096  *
5097  *	LOCKING:
5098  *	Inherited from caller.
5099  */
5100 void swap_buf_le16(u16 *buf, unsigned int buf_words)
5101 {
5102 #ifdef __BIG_ENDIAN
5103 	unsigned int i;
5104 
5105 	for (i = 0; i < buf_words; i++)
5106 		buf[i] = le16_to_cpu(buf[i]);
5107 #endif /* __BIG_ENDIAN */
5108 }
5109 
5110 /**
5111  *	ata_qc_new_init - Request an available ATA command, and initialize it
5112  *	@dev: Device from whom we request an available command structure
5113  *	@tag: tag
5114  *
5115  *	LOCKING:
5116  *	None.
5117  */
5118 
5119 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
5120 {
5121 	struct ata_port *ap = dev->link->ap;
5122 	struct ata_queued_cmd *qc;
5123 
5124 	/* no command while frozen */
5125 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5126 		return NULL;
5127 
5128 	/* libsas case */
5129 	if (ap->flags & ATA_FLAG_SAS_HOST) {
5130 		tag = ata_sas_allocate_tag(ap);
5131 		if (tag < 0)
5132 			return NULL;
5133 	}
5134 
5135 	qc = __ata_qc_from_tag(ap, tag);
5136 	qc->tag = qc->hw_tag = tag;
5137 	qc->scsicmd = NULL;
5138 	qc->ap = ap;
5139 	qc->dev = dev;
5140 
5141 	ata_qc_reinit(qc);
5142 
5143 	return qc;
5144 }
5145 
5146 /**
5147  *	ata_qc_free - free unused ata_queued_cmd
5148  *	@qc: Command to complete
5149  *
5150  *	Designed to free unused ata_queued_cmd object
5151  *	in case something prevents using it.
5152  *
5153  *	LOCKING:
5154  *	spin_lock_irqsave(host lock)
5155  */
5156 void ata_qc_free(struct ata_queued_cmd *qc)
5157 {
5158 	struct ata_port *ap;
5159 	unsigned int tag;
5160 
5161 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5162 	ap = qc->ap;
5163 
5164 	qc->flags = 0;
5165 	tag = qc->tag;
5166 	if (ata_tag_valid(tag)) {
5167 		qc->tag = ATA_TAG_POISON;
5168 		if (ap->flags & ATA_FLAG_SAS_HOST)
5169 			ata_sas_free_tag(tag, ap);
5170 	}
5171 }
5172 
5173 void __ata_qc_complete(struct ata_queued_cmd *qc)
5174 {
5175 	struct ata_port *ap;
5176 	struct ata_link *link;
5177 
5178 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5179 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5180 	ap = qc->ap;
5181 	link = qc->dev->link;
5182 
5183 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5184 		ata_sg_clean(qc);
5185 
5186 	/* command should be marked inactive atomically with qc completion */
5187 	if (ata_is_ncq(qc->tf.protocol)) {
5188 		link->sactive &= ~(1 << qc->hw_tag);
5189 		if (!link->sactive)
5190 			ap->nr_active_links--;
5191 	} else {
5192 		link->active_tag = ATA_TAG_POISON;
5193 		ap->nr_active_links--;
5194 	}
5195 
5196 	/* clear exclusive status */
5197 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5198 		     ap->excl_link == link))
5199 		ap->excl_link = NULL;
5200 
5201 	/* atapi: mark qc as inactive to prevent the interrupt handler
5202 	 * from completing the command twice later, before the error handler
5203 	 * is called. (when rc != 0 and atapi request sense is needed)
5204 	 */
5205 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5206 	ap->qc_active &= ~(1ULL << qc->tag);
5207 
5208 	/* call completion callback */
5209 	qc->complete_fn(qc);
5210 }
5211 
5212 static void fill_result_tf(struct ata_queued_cmd *qc)
5213 {
5214 	struct ata_port *ap = qc->ap;
5215 
5216 	qc->result_tf.flags = qc->tf.flags;
5217 	ap->ops->qc_fill_rtf(qc);
5218 }
5219 
5220 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5221 {
5222 	struct ata_device *dev = qc->dev;
5223 
5224 	if (!ata_is_data(qc->tf.protocol))
5225 		return;
5226 
5227 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5228 		return;
5229 
5230 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5231 }
5232 
5233 /**
5234  *	ata_qc_complete - Complete an active ATA command
5235  *	@qc: Command to complete
5236  *
5237  *	Indicate to the mid and upper layers that an ATA command has
5238  *	completed, with either an ok or not-ok status.
5239  *
5240  *	Refrain from calling this function multiple times when
5241  *	successfully completing multiple NCQ commands.
5242  *	ata_qc_complete_multiple() should be used instead, which will
5243  *	properly update IRQ expect state.
5244  *
5245  *	LOCKING:
5246  *	spin_lock_irqsave(host lock)
5247  */
5248 void ata_qc_complete(struct ata_queued_cmd *qc)
5249 {
5250 	struct ata_port *ap = qc->ap;
5251 
5252 	/* Trigger the LED (if available) */
5253 	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
5254 
5255 	/* XXX: New EH and old EH use different mechanisms to
5256 	 * synchronize EH with regular execution path.
5257 	 *
5258 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5259 	 * Normal execution path is responsible for not accessing a
5260 	 * failed qc.  libata core enforces the rule by returning NULL
5261 	 * from ata_qc_from_tag() for failed qcs.
5262 	 *
5263 	 * Old EH depends on ata_qc_complete() nullifying completion
5264 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5265 	 * not synchronize with interrupt handler.  Only PIO task is
5266 	 * taken care of.
5267 	 */
5268 	if (ap->ops->error_handler) {
5269 		struct ata_device *dev = qc->dev;
5270 		struct ata_eh_info *ehi = &dev->link->eh_info;
5271 
5272 		if (unlikely(qc->err_mask))
5273 			qc->flags |= ATA_QCFLAG_FAILED;
5274 
5275 		/*
5276 		 * Finish internal commands without any further processing
5277 		 * and always with the result TF filled.
5278 		 */
5279 		if (unlikely(ata_tag_internal(qc->tag))) {
5280 			fill_result_tf(qc);
5281 			trace_ata_qc_complete_internal(qc);
5282 			__ata_qc_complete(qc);
5283 			return;
5284 		}
5285 
5286 		/*
5287 		 * Non-internal qc has failed.  Fill the result TF and
5288 		 * summon EH.
5289 		 */
5290 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5291 			fill_result_tf(qc);
5292 			trace_ata_qc_complete_failed(qc);
5293 			ata_qc_schedule_eh(qc);
5294 			return;
5295 		}
5296 
5297 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5298 
5299 		/* read result TF if requested */
5300 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5301 			fill_result_tf(qc);
5302 
5303 		trace_ata_qc_complete_done(qc);
5304 		/* Some commands need post-processing after successful
5305 		 * completion.
5306 		 */
5307 		switch (qc->tf.command) {
5308 		case ATA_CMD_SET_FEATURES:
5309 			if (qc->tf.feature != SETFEATURES_WC_ON &&
5310 			    qc->tf.feature != SETFEATURES_WC_OFF &&
5311 			    qc->tf.feature != SETFEATURES_RA_ON &&
5312 			    qc->tf.feature != SETFEATURES_RA_OFF)
5313 				break;
5314 			/* fall through */
5315 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5316 		case ATA_CMD_SET_MULTI: /* multi_count changed */
5317 			/* revalidate device */
5318 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5319 			ata_port_schedule_eh(ap);
5320 			break;
5321 
5322 		case ATA_CMD_SLEEP:
5323 			dev->flags |= ATA_DFLAG_SLEEPING;
5324 			break;
5325 		}
5326 
5327 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5328 			ata_verify_xfer(qc);
5329 
5330 		__ata_qc_complete(qc);
5331 	} else {
5332 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5333 			return;
5334 
5335 		/* read result TF if failed or requested */
5336 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5337 			fill_result_tf(qc);
5338 
5339 		__ata_qc_complete(qc);
5340 	}
5341 }
5342 
5343 /**
5344  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5345  *	@ap: port in question
5346  *	@qc_active: new qc_active mask
5347  *
5348  *	Complete in-flight commands.  This functions is meant to be
5349  *	called from low-level driver's interrupt routine to complete
5350  *	requests normally.  ap->qc_active and @qc_active is compared
5351  *	and commands are completed accordingly.
5352  *
5353  *	Always use this function when completing multiple NCQ commands
5354  *	from IRQ handlers instead of calling ata_qc_complete()
5355  *	multiple times to keep IRQ expect status properly in sync.
5356  *
5357  *	LOCKING:
5358  *	spin_lock_irqsave(host lock)
5359  *
5360  *	RETURNS:
5361  *	Number of completed commands on success, -errno otherwise.
5362  */
5363 int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
5364 {
5365 	u64 done_mask, ap_qc_active = ap->qc_active;
5366 	int nr_done = 0;
5367 
5368 	/*
5369 	 * If the internal tag is set on ap->qc_active, then we care about
5370 	 * bit0 on the passed in qc_active mask. Move that bit up to match
5371 	 * the internal tag.
5372 	 */
5373 	if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5374 		qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5375 		qc_active ^= qc_active & 0x01;
5376 	}
5377 
5378 	done_mask = ap_qc_active ^ qc_active;
5379 
5380 	if (unlikely(done_mask & qc_active)) {
5381 		ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
5382 			     ap->qc_active, qc_active);
5383 		return -EINVAL;
5384 	}
5385 
5386 	while (done_mask) {
5387 		struct ata_queued_cmd *qc;
5388 		unsigned int tag = __ffs64(done_mask);
5389 
5390 		qc = ata_qc_from_tag(ap, tag);
5391 		if (qc) {
5392 			ata_qc_complete(qc);
5393 			nr_done++;
5394 		}
5395 		done_mask &= ~(1ULL << tag);
5396 	}
5397 
5398 	return nr_done;
5399 }
5400 
5401 /**
5402  *	ata_qc_issue - issue taskfile to device
5403  *	@qc: command to issue to device
5404  *
5405  *	Prepare an ATA command to submission to device.
5406  *	This includes mapping the data into a DMA-able
5407  *	area, filling in the S/G table, and finally
5408  *	writing the taskfile to hardware, starting the command.
5409  *
5410  *	LOCKING:
5411  *	spin_lock_irqsave(host lock)
5412  */
5413 void ata_qc_issue(struct ata_queued_cmd *qc)
5414 {
5415 	struct ata_port *ap = qc->ap;
5416 	struct ata_link *link = qc->dev->link;
5417 	u8 prot = qc->tf.protocol;
5418 
5419 	/* Make sure only one non-NCQ command is outstanding.  The
5420 	 * check is skipped for old EH because it reuses active qc to
5421 	 * request ATAPI sense.
5422 	 */
5423 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5424 
5425 	if (ata_is_ncq(prot)) {
5426 		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
5427 
5428 		if (!link->sactive)
5429 			ap->nr_active_links++;
5430 		link->sactive |= 1 << qc->hw_tag;
5431 	} else {
5432 		WARN_ON_ONCE(link->sactive);
5433 
5434 		ap->nr_active_links++;
5435 		link->active_tag = qc->tag;
5436 	}
5437 
5438 	qc->flags |= ATA_QCFLAG_ACTIVE;
5439 	ap->qc_active |= 1ULL << qc->tag;
5440 
5441 	/*
5442 	 * We guarantee to LLDs that they will have at least one
5443 	 * non-zero sg if the command is a data command.
5444 	 */
5445 	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5446 		goto sys_err;
5447 
5448 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5449 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5450 		if (ata_sg_setup(qc))
5451 			goto sys_err;
5452 
5453 	/* if device is sleeping, schedule reset and abort the link */
5454 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5455 		link->eh_info.action |= ATA_EH_RESET;
5456 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5457 		ata_link_abort(link);
5458 		return;
5459 	}
5460 
5461 	ap->ops->qc_prep(qc);
5462 	trace_ata_qc_issue(qc);
5463 	qc->err_mask |= ap->ops->qc_issue(qc);
5464 	if (unlikely(qc->err_mask))
5465 		goto err;
5466 	return;
5467 
5468 sys_err:
5469 	qc->err_mask |= AC_ERR_SYSTEM;
5470 err:
5471 	ata_qc_complete(qc);
5472 }
5473 
5474 /**
5475  *	sata_scr_valid - test whether SCRs are accessible
5476  *	@link: ATA link to test SCR accessibility for
5477  *
5478  *	Test whether SCRs are accessible for @link.
5479  *
5480  *	LOCKING:
5481  *	None.
5482  *
5483  *	RETURNS:
5484  *	1 if SCRs are accessible, 0 otherwise.
5485  */
5486 int sata_scr_valid(struct ata_link *link)
5487 {
5488 	struct ata_port *ap = link->ap;
5489 
5490 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5491 }
5492 
5493 /**
5494  *	sata_scr_read - read SCR register of the specified port
5495  *	@link: ATA link to read SCR for
5496  *	@reg: SCR to read
5497  *	@val: Place to store read value
5498  *
5499  *	Read SCR register @reg of @link into *@val.  This function is
5500  *	guaranteed to succeed if @link is ap->link, the cable type of
5501  *	the port is SATA and the port implements ->scr_read.
5502  *
5503  *	LOCKING:
5504  *	None if @link is ap->link.  Kernel thread context otherwise.
5505  *
5506  *	RETURNS:
5507  *	0 on success, negative errno on failure.
5508  */
5509 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5510 {
5511 	if (ata_is_host_link(link)) {
5512 		if (sata_scr_valid(link))
5513 			return link->ap->ops->scr_read(link, reg, val);
5514 		return -EOPNOTSUPP;
5515 	}
5516 
5517 	return sata_pmp_scr_read(link, reg, val);
5518 }
5519 
5520 /**
5521  *	sata_scr_write - write SCR register of the specified port
5522  *	@link: ATA link to write SCR for
5523  *	@reg: SCR to write
5524  *	@val: value to write
5525  *
5526  *	Write @val to SCR register @reg of @link.  This function is
5527  *	guaranteed to succeed if @link is ap->link, the cable type of
5528  *	the port is SATA and the port implements ->scr_read.
5529  *
5530  *	LOCKING:
5531  *	None if @link is ap->link.  Kernel thread context otherwise.
5532  *
5533  *	RETURNS:
5534  *	0 on success, negative errno on failure.
5535  */
5536 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5537 {
5538 	if (ata_is_host_link(link)) {
5539 		if (sata_scr_valid(link))
5540 			return link->ap->ops->scr_write(link, reg, val);
5541 		return -EOPNOTSUPP;
5542 	}
5543 
5544 	return sata_pmp_scr_write(link, reg, val);
5545 }
5546 
5547 /**
5548  *	sata_scr_write_flush - write SCR register of the specified port and flush
5549  *	@link: ATA link to write SCR for
5550  *	@reg: SCR to write
5551  *	@val: value to write
5552  *
5553  *	This function is identical to sata_scr_write() except that this
5554  *	function performs flush after writing to the register.
5555  *
5556  *	LOCKING:
5557  *	None if @link is ap->link.  Kernel thread context otherwise.
5558  *
5559  *	RETURNS:
5560  *	0 on success, negative errno on failure.
5561  */
5562 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5563 {
5564 	if (ata_is_host_link(link)) {
5565 		int rc;
5566 
5567 		if (sata_scr_valid(link)) {
5568 			rc = link->ap->ops->scr_write(link, reg, val);
5569 			if (rc == 0)
5570 				rc = link->ap->ops->scr_read(link, reg, &val);
5571 			return rc;
5572 		}
5573 		return -EOPNOTSUPP;
5574 	}
5575 
5576 	return sata_pmp_scr_write(link, reg, val);
5577 }
5578 
5579 /**
5580  *	ata_phys_link_online - test whether the given link is online
5581  *	@link: ATA link to test
5582  *
5583  *	Test whether @link is online.  Note that this function returns
5584  *	0 if online status of @link cannot be obtained, so
5585  *	ata_link_online(link) != !ata_link_offline(link).
5586  *
5587  *	LOCKING:
5588  *	None.
5589  *
5590  *	RETURNS:
5591  *	True if the port online status is available and online.
5592  */
5593 bool ata_phys_link_online(struct ata_link *link)
5594 {
5595 	u32 sstatus;
5596 
5597 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5598 	    ata_sstatus_online(sstatus))
5599 		return true;
5600 	return false;
5601 }
5602 
5603 /**
5604  *	ata_phys_link_offline - test whether the given link is offline
5605  *	@link: ATA link to test
5606  *
5607  *	Test whether @link is offline.  Note that this function
5608  *	returns 0 if offline status of @link cannot be obtained, so
5609  *	ata_link_online(link) != !ata_link_offline(link).
5610  *
5611  *	LOCKING:
5612  *	None.
5613  *
5614  *	RETURNS:
5615  *	True if the port offline status is available and offline.
5616  */
5617 bool ata_phys_link_offline(struct ata_link *link)
5618 {
5619 	u32 sstatus;
5620 
5621 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5622 	    !ata_sstatus_online(sstatus))
5623 		return true;
5624 	return false;
5625 }
5626 
5627 /**
5628  *	ata_link_online - test whether the given link is online
5629  *	@link: ATA link to test
5630  *
5631  *	Test whether @link is online.  This is identical to
5632  *	ata_phys_link_online() when there's no slave link.  When
5633  *	there's a slave link, this function should only be called on
5634  *	the master link and will return true if any of M/S links is
5635  *	online.
5636  *
5637  *	LOCKING:
5638  *	None.
5639  *
5640  *	RETURNS:
5641  *	True if the port online status is available and online.
5642  */
5643 bool ata_link_online(struct ata_link *link)
5644 {
5645 	struct ata_link *slave = link->ap->slave_link;
5646 
5647 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5648 
5649 	return ata_phys_link_online(link) ||
5650 		(slave && ata_phys_link_online(slave));
5651 }
5652 
5653 /**
5654  *	ata_link_offline - test whether the given link is offline
5655  *	@link: ATA link to test
5656  *
5657  *	Test whether @link is offline.  This is identical to
5658  *	ata_phys_link_offline() when there's no slave link.  When
5659  *	there's a slave link, this function should only be called on
5660  *	the master link and will return true if both M/S links are
5661  *	offline.
5662  *
5663  *	LOCKING:
5664  *	None.
5665  *
5666  *	RETURNS:
5667  *	True if the port offline status is available and offline.
5668  */
5669 bool ata_link_offline(struct ata_link *link)
5670 {
5671 	struct ata_link *slave = link->ap->slave_link;
5672 
5673 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5674 
5675 	return ata_phys_link_offline(link) &&
5676 		(!slave || ata_phys_link_offline(slave));
5677 }
5678 
5679 #ifdef CONFIG_PM
5680 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5681 				unsigned int action, unsigned int ehi_flags,
5682 				bool async)
5683 {
5684 	struct ata_link *link;
5685 	unsigned long flags;
5686 
5687 	/* Previous resume operation might still be in
5688 	 * progress.  Wait for PM_PENDING to clear.
5689 	 */
5690 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5691 		ata_port_wait_eh(ap);
5692 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5693 	}
5694 
5695 	/* request PM ops to EH */
5696 	spin_lock_irqsave(ap->lock, flags);
5697 
5698 	ap->pm_mesg = mesg;
5699 	ap->pflags |= ATA_PFLAG_PM_PENDING;
5700 	ata_for_each_link(link, ap, HOST_FIRST) {
5701 		link->eh_info.action |= action;
5702 		link->eh_info.flags |= ehi_flags;
5703 	}
5704 
5705 	ata_port_schedule_eh(ap);
5706 
5707 	spin_unlock_irqrestore(ap->lock, flags);
5708 
5709 	if (!async) {
5710 		ata_port_wait_eh(ap);
5711 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5712 	}
5713 }
5714 
5715 /*
5716  * On some hardware, device fails to respond after spun down for suspend.  As
5717  * the device won't be used before being resumed, we don't need to touch the
5718  * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5719  *
5720  * http://thread.gmane.org/gmane.linux.ide/46764
5721  */
5722 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5723 						 | ATA_EHI_NO_AUTOPSY
5724 						 | ATA_EHI_NO_RECOVERY;
5725 
5726 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5727 {
5728 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5729 }
5730 
5731 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5732 {
5733 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5734 }
5735 
5736 static int ata_port_pm_suspend(struct device *dev)
5737 {
5738 	struct ata_port *ap = to_ata_port(dev);
5739 
5740 	if (pm_runtime_suspended(dev))
5741 		return 0;
5742 
5743 	ata_port_suspend(ap, PMSG_SUSPEND);
5744 	return 0;
5745 }
5746 
5747 static int ata_port_pm_freeze(struct device *dev)
5748 {
5749 	struct ata_port *ap = to_ata_port(dev);
5750 
5751 	if (pm_runtime_suspended(dev))
5752 		return 0;
5753 
5754 	ata_port_suspend(ap, PMSG_FREEZE);
5755 	return 0;
5756 }
5757 
5758 static int ata_port_pm_poweroff(struct device *dev)
5759 {
5760 	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5761 	return 0;
5762 }
5763 
5764 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5765 						| ATA_EHI_QUIET;
5766 
5767 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5768 {
5769 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5770 }
5771 
5772 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5773 {
5774 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5775 }
5776 
5777 static int ata_port_pm_resume(struct device *dev)
5778 {
5779 	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5780 	pm_runtime_disable(dev);
5781 	pm_runtime_set_active(dev);
5782 	pm_runtime_enable(dev);
5783 	return 0;
5784 }
5785 
5786 /*
5787  * For ODDs, the upper layer will poll for media change every few seconds,
5788  * which will make it enter and leave suspend state every few seconds. And
5789  * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5790  * is very little and the ODD may malfunction after constantly being reset.
5791  * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5792  * ODD is attached to the port.
5793  */
5794 static int ata_port_runtime_idle(struct device *dev)
5795 {
5796 	struct ata_port *ap = to_ata_port(dev);
5797 	struct ata_link *link;
5798 	struct ata_device *adev;
5799 
5800 	ata_for_each_link(link, ap, HOST_FIRST) {
5801 		ata_for_each_dev(adev, link, ENABLED)
5802 			if (adev->class == ATA_DEV_ATAPI &&
5803 			    !zpodd_dev_enabled(adev))
5804 				return -EBUSY;
5805 	}
5806 
5807 	return 0;
5808 }
5809 
5810 static int ata_port_runtime_suspend(struct device *dev)
5811 {
5812 	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5813 	return 0;
5814 }
5815 
5816 static int ata_port_runtime_resume(struct device *dev)
5817 {
5818 	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5819 	return 0;
5820 }
5821 
5822 static const struct dev_pm_ops ata_port_pm_ops = {
5823 	.suspend = ata_port_pm_suspend,
5824 	.resume = ata_port_pm_resume,
5825 	.freeze = ata_port_pm_freeze,
5826 	.thaw = ata_port_pm_resume,
5827 	.poweroff = ata_port_pm_poweroff,
5828 	.restore = ata_port_pm_resume,
5829 
5830 	.runtime_suspend = ata_port_runtime_suspend,
5831 	.runtime_resume = ata_port_runtime_resume,
5832 	.runtime_idle = ata_port_runtime_idle,
5833 };
5834 
5835 /* sas ports don't participate in pm runtime management of ata_ports,
5836  * and need to resume ata devices at the domain level, not the per-port
5837  * level. sas suspend/resume is async to allow parallel port recovery
5838  * since sas has multiple ata_port instances per Scsi_Host.
5839  */
5840 void ata_sas_port_suspend(struct ata_port *ap)
5841 {
5842 	ata_port_suspend_async(ap, PMSG_SUSPEND);
5843 }
5844 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5845 
5846 void ata_sas_port_resume(struct ata_port *ap)
5847 {
5848 	ata_port_resume_async(ap, PMSG_RESUME);
5849 }
5850 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5851 
5852 /**
5853  *	ata_host_suspend - suspend host
5854  *	@host: host to suspend
5855  *	@mesg: PM message
5856  *
5857  *	Suspend @host.  Actual operation is performed by port suspend.
5858  */
5859 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5860 {
5861 	host->dev->power.power_state = mesg;
5862 	return 0;
5863 }
5864 
5865 /**
5866  *	ata_host_resume - resume host
5867  *	@host: host to resume
5868  *
5869  *	Resume @host.  Actual operation is performed by port resume.
5870  */
5871 void ata_host_resume(struct ata_host *host)
5872 {
5873 	host->dev->power.power_state = PMSG_ON;
5874 }
5875 #endif
5876 
5877 const struct device_type ata_port_type = {
5878 	.name = "ata_port",
5879 #ifdef CONFIG_PM
5880 	.pm = &ata_port_pm_ops,
5881 #endif
5882 };
5883 
5884 /**
5885  *	ata_dev_init - Initialize an ata_device structure
5886  *	@dev: Device structure to initialize
5887  *
5888  *	Initialize @dev in preparation for probing.
5889  *
5890  *	LOCKING:
5891  *	Inherited from caller.
5892  */
5893 void ata_dev_init(struct ata_device *dev)
5894 {
5895 	struct ata_link *link = ata_dev_phys_link(dev);
5896 	struct ata_port *ap = link->ap;
5897 	unsigned long flags;
5898 
5899 	/* SATA spd limit is bound to the attached device, reset together */
5900 	link->sata_spd_limit = link->hw_sata_spd_limit;
5901 	link->sata_spd = 0;
5902 
5903 	/* High bits of dev->flags are used to record warm plug
5904 	 * requests which occur asynchronously.  Synchronize using
5905 	 * host lock.
5906 	 */
5907 	spin_lock_irqsave(ap->lock, flags);
5908 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5909 	dev->horkage = 0;
5910 	spin_unlock_irqrestore(ap->lock, flags);
5911 
5912 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5913 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5914 	dev->pio_mask = UINT_MAX;
5915 	dev->mwdma_mask = UINT_MAX;
5916 	dev->udma_mask = UINT_MAX;
5917 }
5918 
5919 /**
5920  *	ata_link_init - Initialize an ata_link structure
5921  *	@ap: ATA port link is attached to
5922  *	@link: Link structure to initialize
5923  *	@pmp: Port multiplier port number
5924  *
5925  *	Initialize @link.
5926  *
5927  *	LOCKING:
5928  *	Kernel thread context (may sleep)
5929  */
5930 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5931 {
5932 	int i;
5933 
5934 	/* clear everything except for devices */
5935 	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5936 	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5937 
5938 	link->ap = ap;
5939 	link->pmp = pmp;
5940 	link->active_tag = ATA_TAG_POISON;
5941 	link->hw_sata_spd_limit = UINT_MAX;
5942 
5943 	/* can't use iterator, ap isn't initialized yet */
5944 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5945 		struct ata_device *dev = &link->device[i];
5946 
5947 		dev->link = link;
5948 		dev->devno = dev - link->device;
5949 #ifdef CONFIG_ATA_ACPI
5950 		dev->gtf_filter = ata_acpi_gtf_filter;
5951 #endif
5952 		ata_dev_init(dev);
5953 	}
5954 }
5955 
5956 /**
5957  *	sata_link_init_spd - Initialize link->sata_spd_limit
5958  *	@link: Link to configure sata_spd_limit for
5959  *
5960  *	Initialize @link->[hw_]sata_spd_limit to the currently
5961  *	configured value.
5962  *
5963  *	LOCKING:
5964  *	Kernel thread context (may sleep).
5965  *
5966  *	RETURNS:
5967  *	0 on success, -errno on failure.
5968  */
5969 int sata_link_init_spd(struct ata_link *link)
5970 {
5971 	u8 spd;
5972 	int rc;
5973 
5974 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5975 	if (rc)
5976 		return rc;
5977 
5978 	spd = (link->saved_scontrol >> 4) & 0xf;
5979 	if (spd)
5980 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5981 
5982 	ata_force_link_limits(link);
5983 
5984 	link->sata_spd_limit = link->hw_sata_spd_limit;
5985 
5986 	return 0;
5987 }
5988 
5989 /**
5990  *	ata_port_alloc - allocate and initialize basic ATA port resources
5991  *	@host: ATA host this allocated port belongs to
5992  *
5993  *	Allocate and initialize basic ATA port resources.
5994  *
5995  *	RETURNS:
5996  *	Allocate ATA port on success, NULL on failure.
5997  *
5998  *	LOCKING:
5999  *	Inherited from calling layer (may sleep).
6000  */
6001 struct ata_port *ata_port_alloc(struct ata_host *host)
6002 {
6003 	struct ata_port *ap;
6004 
6005 	DPRINTK("ENTER\n");
6006 
6007 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6008 	if (!ap)
6009 		return NULL;
6010 
6011 	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
6012 	ap->lock = &host->lock;
6013 	ap->print_id = -1;
6014 	ap->local_port_no = -1;
6015 	ap->host = host;
6016 	ap->dev = host->dev;
6017 
6018 #if defined(ATA_VERBOSE_DEBUG)
6019 	/* turn on all debugging levels */
6020 	ap->msg_enable = 0x00FF;
6021 #elif defined(ATA_DEBUG)
6022 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6023 #else
6024 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6025 #endif
6026 
6027 	mutex_init(&ap->scsi_scan_mutex);
6028 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6029 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6030 	INIT_LIST_HEAD(&ap->eh_done_q);
6031 	init_waitqueue_head(&ap->eh_wait_q);
6032 	init_completion(&ap->park_req_pending);
6033 	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
6034 		    TIMER_DEFERRABLE);
6035 
6036 	ap->cbl = ATA_CBL_NONE;
6037 
6038 	ata_link_init(ap, &ap->link, 0);
6039 
6040 #ifdef ATA_IRQ_TRAP
6041 	ap->stats.unhandled_irq = 1;
6042 	ap->stats.idle_irq = 1;
6043 #endif
6044 	ata_sff_port_init(ap);
6045 
6046 	return ap;
6047 }
6048 
6049 static void ata_devres_release(struct device *gendev, void *res)
6050 {
6051 	struct ata_host *host = dev_get_drvdata(gendev);
6052 	int i;
6053 
6054 	for (i = 0; i < host->n_ports; i++) {
6055 		struct ata_port *ap = host->ports[i];
6056 
6057 		if (!ap)
6058 			continue;
6059 
6060 		if (ap->scsi_host)
6061 			scsi_host_put(ap->scsi_host);
6062 
6063 	}
6064 
6065 	dev_set_drvdata(gendev, NULL);
6066 	ata_host_put(host);
6067 }
6068 
6069 static void ata_host_release(struct kref *kref)
6070 {
6071 	struct ata_host *host = container_of(kref, struct ata_host, kref);
6072 	int i;
6073 
6074 	for (i = 0; i < host->n_ports; i++) {
6075 		struct ata_port *ap = host->ports[i];
6076 
6077 		kfree(ap->pmp_link);
6078 		kfree(ap->slave_link);
6079 		kfree(ap);
6080 		host->ports[i] = NULL;
6081 	}
6082 	kfree(host);
6083 }
6084 
6085 void ata_host_get(struct ata_host *host)
6086 {
6087 	kref_get(&host->kref);
6088 }
6089 
6090 void ata_host_put(struct ata_host *host)
6091 {
6092 	kref_put(&host->kref, ata_host_release);
6093 }
6094 
6095 /**
6096  *	ata_host_alloc - allocate and init basic ATA host resources
6097  *	@dev: generic device this host is associated with
6098  *	@max_ports: maximum number of ATA ports associated with this host
6099  *
6100  *	Allocate and initialize basic ATA host resources.  LLD calls
6101  *	this function to allocate a host, initializes it fully and
6102  *	attaches it using ata_host_register().
6103  *
6104  *	@max_ports ports are allocated and host->n_ports is
6105  *	initialized to @max_ports.  The caller is allowed to decrease
6106  *	host->n_ports before calling ata_host_register().  The unused
6107  *	ports will be automatically freed on registration.
6108  *
6109  *	RETURNS:
6110  *	Allocate ATA host on success, NULL on failure.
6111  *
6112  *	LOCKING:
6113  *	Inherited from calling layer (may sleep).
6114  */
6115 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6116 {
6117 	struct ata_host *host;
6118 	size_t sz;
6119 	int i;
6120 	void *dr;
6121 
6122 	DPRINTK("ENTER\n");
6123 
6124 	/* alloc a container for our list of ATA ports (buses) */
6125 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6126 	host = kzalloc(sz, GFP_KERNEL);
6127 	if (!host)
6128 		return NULL;
6129 
6130 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6131 		goto err_free;
6132 
6133 	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
6134 	if (!dr)
6135 		goto err_out;
6136 
6137 	devres_add(dev, dr);
6138 	dev_set_drvdata(dev, host);
6139 
6140 	spin_lock_init(&host->lock);
6141 	mutex_init(&host->eh_mutex);
6142 	host->dev = dev;
6143 	host->n_ports = max_ports;
6144 	kref_init(&host->kref);
6145 
6146 	/* allocate ports bound to this host */
6147 	for (i = 0; i < max_ports; i++) {
6148 		struct ata_port *ap;
6149 
6150 		ap = ata_port_alloc(host);
6151 		if (!ap)
6152 			goto err_out;
6153 
6154 		ap->port_no = i;
6155 		host->ports[i] = ap;
6156 	}
6157 
6158 	devres_remove_group(dev, NULL);
6159 	return host;
6160 
6161  err_out:
6162 	devres_release_group(dev, NULL);
6163  err_free:
6164 	kfree(host);
6165 	return NULL;
6166 }
6167 
6168 /**
6169  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6170  *	@dev: generic device this host is associated with
6171  *	@ppi: array of ATA port_info to initialize host with
6172  *	@n_ports: number of ATA ports attached to this host
6173  *
6174  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6175  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6176  *	last entry will be used for the remaining ports.
6177  *
6178  *	RETURNS:
6179  *	Allocate ATA host on success, NULL on failure.
6180  *
6181  *	LOCKING:
6182  *	Inherited from calling layer (may sleep).
6183  */
6184 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6185 				      const struct ata_port_info * const * ppi,
6186 				      int n_ports)
6187 {
6188 	const struct ata_port_info *pi;
6189 	struct ata_host *host;
6190 	int i, j;
6191 
6192 	host = ata_host_alloc(dev, n_ports);
6193 	if (!host)
6194 		return NULL;
6195 
6196 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6197 		struct ata_port *ap = host->ports[i];
6198 
6199 		if (ppi[j])
6200 			pi = ppi[j++];
6201 
6202 		ap->pio_mask = pi->pio_mask;
6203 		ap->mwdma_mask = pi->mwdma_mask;
6204 		ap->udma_mask = pi->udma_mask;
6205 		ap->flags |= pi->flags;
6206 		ap->link.flags |= pi->link_flags;
6207 		ap->ops = pi->port_ops;
6208 
6209 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6210 			host->ops = pi->port_ops;
6211 	}
6212 
6213 	return host;
6214 }
6215 
6216 /**
6217  *	ata_slave_link_init - initialize slave link
6218  *	@ap: port to initialize slave link for
6219  *
6220  *	Create and initialize slave link for @ap.  This enables slave
6221  *	link handling on the port.
6222  *
6223  *	In libata, a port contains links and a link contains devices.
6224  *	There is single host link but if a PMP is attached to it,
6225  *	there can be multiple fan-out links.  On SATA, there's usually
6226  *	a single device connected to a link but PATA and SATA
6227  *	controllers emulating TF based interface can have two - master
6228  *	and slave.
6229  *
6230  *	However, there are a few controllers which don't fit into this
6231  *	abstraction too well - SATA controllers which emulate TF
6232  *	interface with both master and slave devices but also have
6233  *	separate SCR register sets for each device.  These controllers
6234  *	need separate links for physical link handling
6235  *	(e.g. onlineness, link speed) but should be treated like a
6236  *	traditional M/S controller for everything else (e.g. command
6237  *	issue, softreset).
6238  *
6239  *	slave_link is libata's way of handling this class of
6240  *	controllers without impacting core layer too much.  For
6241  *	anything other than physical link handling, the default host
6242  *	link is used for both master and slave.  For physical link
6243  *	handling, separate @ap->slave_link is used.  All dirty details
6244  *	are implemented inside libata core layer.  From LLD's POV, the
6245  *	only difference is that prereset, hardreset and postreset are
6246  *	called once more for the slave link, so the reset sequence
6247  *	looks like the following.
6248  *
6249  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6250  *	softreset(M) -> postreset(M) -> postreset(S)
6251  *
6252  *	Note that softreset is called only for the master.  Softreset
6253  *	resets both M/S by definition, so SRST on master should handle
6254  *	both (the standard method will work just fine).
6255  *
6256  *	LOCKING:
6257  *	Should be called before host is registered.
6258  *
6259  *	RETURNS:
6260  *	0 on success, -errno on failure.
6261  */
6262 int ata_slave_link_init(struct ata_port *ap)
6263 {
6264 	struct ata_link *link;
6265 
6266 	WARN_ON(ap->slave_link);
6267 	WARN_ON(ap->flags & ATA_FLAG_PMP);
6268 
6269 	link = kzalloc(sizeof(*link), GFP_KERNEL);
6270 	if (!link)
6271 		return -ENOMEM;
6272 
6273 	ata_link_init(ap, link, 1);
6274 	ap->slave_link = link;
6275 	return 0;
6276 }
6277 
6278 static void ata_host_stop(struct device *gendev, void *res)
6279 {
6280 	struct ata_host *host = dev_get_drvdata(gendev);
6281 	int i;
6282 
6283 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
6284 
6285 	for (i = 0; i < host->n_ports; i++) {
6286 		struct ata_port *ap = host->ports[i];
6287 
6288 		if (ap->ops->port_stop)
6289 			ap->ops->port_stop(ap);
6290 	}
6291 
6292 	if (host->ops->host_stop)
6293 		host->ops->host_stop(host);
6294 }
6295 
6296 /**
6297  *	ata_finalize_port_ops - finalize ata_port_operations
6298  *	@ops: ata_port_operations to finalize
6299  *
6300  *	An ata_port_operations can inherit from another ops and that
6301  *	ops can again inherit from another.  This can go on as many
6302  *	times as necessary as long as there is no loop in the
6303  *	inheritance chain.
6304  *
6305  *	Ops tables are finalized when the host is started.  NULL or
6306  *	unspecified entries are inherited from the closet ancestor
6307  *	which has the method and the entry is populated with it.
6308  *	After finalization, the ops table directly points to all the
6309  *	methods and ->inherits is no longer necessary and cleared.
6310  *
6311  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6312  *
6313  *	LOCKING:
6314  *	None.
6315  */
6316 static void ata_finalize_port_ops(struct ata_port_operations *ops)
6317 {
6318 	static DEFINE_SPINLOCK(lock);
6319 	const struct ata_port_operations *cur;
6320 	void **begin = (void **)ops;
6321 	void **end = (void **)&ops->inherits;
6322 	void **pp;
6323 
6324 	if (!ops || !ops->inherits)
6325 		return;
6326 
6327 	spin_lock(&lock);
6328 
6329 	for (cur = ops->inherits; cur; cur = cur->inherits) {
6330 		void **inherit = (void **)cur;
6331 
6332 		for (pp = begin; pp < end; pp++, inherit++)
6333 			if (!*pp)
6334 				*pp = *inherit;
6335 	}
6336 
6337 	for (pp = begin; pp < end; pp++)
6338 		if (IS_ERR(*pp))
6339 			*pp = NULL;
6340 
6341 	ops->inherits = NULL;
6342 
6343 	spin_unlock(&lock);
6344 }
6345 
6346 /**
6347  *	ata_host_start - start and freeze ports of an ATA host
6348  *	@host: ATA host to start ports for
6349  *
6350  *	Start and then freeze ports of @host.  Started status is
6351  *	recorded in host->flags, so this function can be called
6352  *	multiple times.  Ports are guaranteed to get started only
6353  *	once.  If host->ops isn't initialized yet, its set to the
6354  *	first non-dummy port ops.
6355  *
6356  *	LOCKING:
6357  *	Inherited from calling layer (may sleep).
6358  *
6359  *	RETURNS:
6360  *	0 if all ports are started successfully, -errno otherwise.
6361  */
6362 int ata_host_start(struct ata_host *host)
6363 {
6364 	int have_stop = 0;
6365 	void *start_dr = NULL;
6366 	int i, rc;
6367 
6368 	if (host->flags & ATA_HOST_STARTED)
6369 		return 0;
6370 
6371 	ata_finalize_port_ops(host->ops);
6372 
6373 	for (i = 0; i < host->n_ports; i++) {
6374 		struct ata_port *ap = host->ports[i];
6375 
6376 		ata_finalize_port_ops(ap->ops);
6377 
6378 		if (!host->ops && !ata_port_is_dummy(ap))
6379 			host->ops = ap->ops;
6380 
6381 		if (ap->ops->port_stop)
6382 			have_stop = 1;
6383 	}
6384 
6385 	if (host->ops->host_stop)
6386 		have_stop = 1;
6387 
6388 	if (have_stop) {
6389 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6390 		if (!start_dr)
6391 			return -ENOMEM;
6392 	}
6393 
6394 	for (i = 0; i < host->n_ports; i++) {
6395 		struct ata_port *ap = host->ports[i];
6396 
6397 		if (ap->ops->port_start) {
6398 			rc = ap->ops->port_start(ap);
6399 			if (rc) {
6400 				if (rc != -ENODEV)
6401 					dev_err(host->dev,
6402 						"failed to start port %d (errno=%d)\n",
6403 						i, rc);
6404 				goto err_out;
6405 			}
6406 		}
6407 		ata_eh_freeze_port(ap);
6408 	}
6409 
6410 	if (start_dr)
6411 		devres_add(host->dev, start_dr);
6412 	host->flags |= ATA_HOST_STARTED;
6413 	return 0;
6414 
6415  err_out:
6416 	while (--i >= 0) {
6417 		struct ata_port *ap = host->ports[i];
6418 
6419 		if (ap->ops->port_stop)
6420 			ap->ops->port_stop(ap);
6421 	}
6422 	devres_free(start_dr);
6423 	return rc;
6424 }
6425 
6426 /**
6427  *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6428  *	@host:	host to initialize
6429  *	@dev:	device host is attached to
6430  *	@ops:	port_ops
6431  *
6432  */
6433 void ata_host_init(struct ata_host *host, struct device *dev,
6434 		   struct ata_port_operations *ops)
6435 {
6436 	spin_lock_init(&host->lock);
6437 	mutex_init(&host->eh_mutex);
6438 	host->n_tags = ATA_MAX_QUEUE;
6439 	host->dev = dev;
6440 	host->ops = ops;
6441 	kref_init(&host->kref);
6442 }
6443 
6444 void __ata_port_probe(struct ata_port *ap)
6445 {
6446 	struct ata_eh_info *ehi = &ap->link.eh_info;
6447 	unsigned long flags;
6448 
6449 	/* kick EH for boot probing */
6450 	spin_lock_irqsave(ap->lock, flags);
6451 
6452 	ehi->probe_mask |= ATA_ALL_DEVICES;
6453 	ehi->action |= ATA_EH_RESET;
6454 	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6455 
6456 	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6457 	ap->pflags |= ATA_PFLAG_LOADING;
6458 	ata_port_schedule_eh(ap);
6459 
6460 	spin_unlock_irqrestore(ap->lock, flags);
6461 }
6462 
6463 int ata_port_probe(struct ata_port *ap)
6464 {
6465 	int rc = 0;
6466 
6467 	if (ap->ops->error_handler) {
6468 		__ata_port_probe(ap);
6469 		ata_port_wait_eh(ap);
6470 	} else {
6471 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6472 		rc = ata_bus_probe(ap);
6473 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6474 	}
6475 	return rc;
6476 }
6477 
6478 
6479 static void async_port_probe(void *data, async_cookie_t cookie)
6480 {
6481 	struct ata_port *ap = data;
6482 
6483 	/*
6484 	 * If we're not allowed to scan this host in parallel,
6485 	 * we need to wait until all previous scans have completed
6486 	 * before going further.
6487 	 * Jeff Garzik says this is only within a controller, so we
6488 	 * don't need to wait for port 0, only for later ports.
6489 	 */
6490 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6491 		async_synchronize_cookie(cookie);
6492 
6493 	(void)ata_port_probe(ap);
6494 
6495 	/* in order to keep device order, we need to synchronize at this point */
6496 	async_synchronize_cookie(cookie);
6497 
6498 	ata_scsi_scan_host(ap, 1);
6499 }
6500 
6501 /**
6502  *	ata_host_register - register initialized ATA host
6503  *	@host: ATA host to register
6504  *	@sht: template for SCSI host
6505  *
6506  *	Register initialized ATA host.  @host is allocated using
6507  *	ata_host_alloc() and fully initialized by LLD.  This function
6508  *	starts ports, registers @host with ATA and SCSI layers and
6509  *	probe registered devices.
6510  *
6511  *	LOCKING:
6512  *	Inherited from calling layer (may sleep).
6513  *
6514  *	RETURNS:
6515  *	0 on success, -errno otherwise.
6516  */
6517 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6518 {
6519 	int i, rc;
6520 
6521 	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
6522 
6523 	/* host must have been started */
6524 	if (!(host->flags & ATA_HOST_STARTED)) {
6525 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6526 		WARN_ON(1);
6527 		return -EINVAL;
6528 	}
6529 
6530 	/* Blow away unused ports.  This happens when LLD can't
6531 	 * determine the exact number of ports to allocate at
6532 	 * allocation time.
6533 	 */
6534 	for (i = host->n_ports; host->ports[i]; i++)
6535 		kfree(host->ports[i]);
6536 
6537 	/* give ports names and add SCSI hosts */
6538 	for (i = 0; i < host->n_ports; i++) {
6539 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6540 		host->ports[i]->local_port_no = i + 1;
6541 	}
6542 
6543 	/* Create associated sysfs transport objects  */
6544 	for (i = 0; i < host->n_ports; i++) {
6545 		rc = ata_tport_add(host->dev,host->ports[i]);
6546 		if (rc) {
6547 			goto err_tadd;
6548 		}
6549 	}
6550 
6551 	rc = ata_scsi_add_hosts(host, sht);
6552 	if (rc)
6553 		goto err_tadd;
6554 
6555 	/* set cable, sata_spd_limit and report */
6556 	for (i = 0; i < host->n_ports; i++) {
6557 		struct ata_port *ap = host->ports[i];
6558 		unsigned long xfer_mask;
6559 
6560 		/* set SATA cable type if still unset */
6561 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6562 			ap->cbl = ATA_CBL_SATA;
6563 
6564 		/* init sata_spd_limit to the current value */
6565 		sata_link_init_spd(&ap->link);
6566 		if (ap->slave_link)
6567 			sata_link_init_spd(ap->slave_link);
6568 
6569 		/* print per-port info to dmesg */
6570 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6571 					      ap->udma_mask);
6572 
6573 		if (!ata_port_is_dummy(ap)) {
6574 			ata_port_info(ap, "%cATA max %s %s\n",
6575 				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6576 				      ata_mode_string(xfer_mask),
6577 				      ap->link.eh_info.desc);
6578 			ata_ehi_clear_desc(&ap->link.eh_info);
6579 		} else
6580 			ata_port_info(ap, "DUMMY\n");
6581 	}
6582 
6583 	/* perform each probe asynchronously */
6584 	for (i = 0; i < host->n_ports; i++) {
6585 		struct ata_port *ap = host->ports[i];
6586 		async_schedule(async_port_probe, ap);
6587 	}
6588 
6589 	return 0;
6590 
6591  err_tadd:
6592 	while (--i >= 0) {
6593 		ata_tport_delete(host->ports[i]);
6594 	}
6595 	return rc;
6596 
6597 }
6598 
6599 /**
6600  *	ata_host_activate - start host, request IRQ and register it
6601  *	@host: target ATA host
6602  *	@irq: IRQ to request
6603  *	@irq_handler: irq_handler used when requesting IRQ
6604  *	@irq_flags: irq_flags used when requesting IRQ
6605  *	@sht: scsi_host_template to use when registering the host
6606  *
6607  *	After allocating an ATA host and initializing it, most libata
6608  *	LLDs perform three steps to activate the host - start host,
6609  *	request IRQ and register it.  This helper takes necessary
6610  *	arguments and performs the three steps in one go.
6611  *
6612  *	An invalid IRQ skips the IRQ registration and expects the host to
6613  *	have set polling mode on the port. In this case, @irq_handler
6614  *	should be NULL.
6615  *
6616  *	LOCKING:
6617  *	Inherited from calling layer (may sleep).
6618  *
6619  *	RETURNS:
6620  *	0 on success, -errno otherwise.
6621  */
6622 int ata_host_activate(struct ata_host *host, int irq,
6623 		      irq_handler_t irq_handler, unsigned long irq_flags,
6624 		      struct scsi_host_template *sht)
6625 {
6626 	int i, rc;
6627 	char *irq_desc;
6628 
6629 	rc = ata_host_start(host);
6630 	if (rc)
6631 		return rc;
6632 
6633 	/* Special case for polling mode */
6634 	if (!irq) {
6635 		WARN_ON(irq_handler);
6636 		return ata_host_register(host, sht);
6637 	}
6638 
6639 	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6640 				  dev_driver_string(host->dev),
6641 				  dev_name(host->dev));
6642 	if (!irq_desc)
6643 		return -ENOMEM;
6644 
6645 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6646 			      irq_desc, host);
6647 	if (rc)
6648 		return rc;
6649 
6650 	for (i = 0; i < host->n_ports; i++)
6651 		ata_port_desc(host->ports[i], "irq %d", irq);
6652 
6653 	rc = ata_host_register(host, sht);
6654 	/* if failed, just free the IRQ and leave ports alone */
6655 	if (rc)
6656 		devm_free_irq(host->dev, irq, host);
6657 
6658 	return rc;
6659 }
6660 
6661 /**
6662  *	ata_port_detach - Detach ATA port in preparation of device removal
6663  *	@ap: ATA port to be detached
6664  *
6665  *	Detach all ATA devices and the associated SCSI devices of @ap;
6666  *	then, remove the associated SCSI host.  @ap is guaranteed to
6667  *	be quiescent on return from this function.
6668  *
6669  *	LOCKING:
6670  *	Kernel thread context (may sleep).
6671  */
6672 static void ata_port_detach(struct ata_port *ap)
6673 {
6674 	unsigned long flags;
6675 	struct ata_link *link;
6676 	struct ata_device *dev;
6677 
6678 	if (!ap->ops->error_handler)
6679 		goto skip_eh;
6680 
6681 	/* tell EH we're leaving & flush EH */
6682 	spin_lock_irqsave(ap->lock, flags);
6683 	ap->pflags |= ATA_PFLAG_UNLOADING;
6684 	ata_port_schedule_eh(ap);
6685 	spin_unlock_irqrestore(ap->lock, flags);
6686 
6687 	/* wait till EH commits suicide */
6688 	ata_port_wait_eh(ap);
6689 
6690 	/* it better be dead now */
6691 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6692 
6693 	cancel_delayed_work_sync(&ap->hotplug_task);
6694 
6695  skip_eh:
6696 	/* clean up zpodd on port removal */
6697 	ata_for_each_link(link, ap, HOST_FIRST) {
6698 		ata_for_each_dev(dev, link, ALL) {
6699 			if (zpodd_dev_enabled(dev))
6700 				zpodd_exit(dev);
6701 		}
6702 	}
6703 	if (ap->pmp_link) {
6704 		int i;
6705 		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6706 			ata_tlink_delete(&ap->pmp_link[i]);
6707 	}
6708 	/* remove the associated SCSI host */
6709 	scsi_remove_host(ap->scsi_host);
6710 	ata_tport_delete(ap);
6711 }
6712 
6713 /**
6714  *	ata_host_detach - Detach all ports of an ATA host
6715  *	@host: Host to detach
6716  *
6717  *	Detach all ports of @host.
6718  *
6719  *	LOCKING:
6720  *	Kernel thread context (may sleep).
6721  */
6722 void ata_host_detach(struct ata_host *host)
6723 {
6724 	int i;
6725 
6726 	for (i = 0; i < host->n_ports; i++)
6727 		ata_port_detach(host->ports[i]);
6728 
6729 	/* the host is dead now, dissociate ACPI */
6730 	ata_acpi_dissociate(host);
6731 }
6732 
6733 #ifdef CONFIG_PCI
6734 
6735 /**
6736  *	ata_pci_remove_one - PCI layer callback for device removal
6737  *	@pdev: PCI device that was removed
6738  *
6739  *	PCI layer indicates to libata via this hook that hot-unplug or
6740  *	module unload event has occurred.  Detach all ports.  Resource
6741  *	release is handled via devres.
6742  *
6743  *	LOCKING:
6744  *	Inherited from PCI layer (may sleep).
6745  */
6746 void ata_pci_remove_one(struct pci_dev *pdev)
6747 {
6748 	struct ata_host *host = pci_get_drvdata(pdev);
6749 
6750 	ata_host_detach(host);
6751 }
6752 
6753 /* move to PCI subsystem */
6754 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6755 {
6756 	unsigned long tmp = 0;
6757 
6758 	switch (bits->width) {
6759 	case 1: {
6760 		u8 tmp8 = 0;
6761 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6762 		tmp = tmp8;
6763 		break;
6764 	}
6765 	case 2: {
6766 		u16 tmp16 = 0;
6767 		pci_read_config_word(pdev, bits->reg, &tmp16);
6768 		tmp = tmp16;
6769 		break;
6770 	}
6771 	case 4: {
6772 		u32 tmp32 = 0;
6773 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6774 		tmp = tmp32;
6775 		break;
6776 	}
6777 
6778 	default:
6779 		return -EINVAL;
6780 	}
6781 
6782 	tmp &= bits->mask;
6783 
6784 	return (tmp == bits->val) ? 1 : 0;
6785 }
6786 
6787 #ifdef CONFIG_PM
6788 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6789 {
6790 	pci_save_state(pdev);
6791 	pci_disable_device(pdev);
6792 
6793 	if (mesg.event & PM_EVENT_SLEEP)
6794 		pci_set_power_state(pdev, PCI_D3hot);
6795 }
6796 
6797 int ata_pci_device_do_resume(struct pci_dev *pdev)
6798 {
6799 	int rc;
6800 
6801 	pci_set_power_state(pdev, PCI_D0);
6802 	pci_restore_state(pdev);
6803 
6804 	rc = pcim_enable_device(pdev);
6805 	if (rc) {
6806 		dev_err(&pdev->dev,
6807 			"failed to enable device after resume (%d)\n", rc);
6808 		return rc;
6809 	}
6810 
6811 	pci_set_master(pdev);
6812 	return 0;
6813 }
6814 
6815 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6816 {
6817 	struct ata_host *host = pci_get_drvdata(pdev);
6818 	int rc = 0;
6819 
6820 	rc = ata_host_suspend(host, mesg);
6821 	if (rc)
6822 		return rc;
6823 
6824 	ata_pci_device_do_suspend(pdev, mesg);
6825 
6826 	return 0;
6827 }
6828 
6829 int ata_pci_device_resume(struct pci_dev *pdev)
6830 {
6831 	struct ata_host *host = pci_get_drvdata(pdev);
6832 	int rc;
6833 
6834 	rc = ata_pci_device_do_resume(pdev);
6835 	if (rc == 0)
6836 		ata_host_resume(host);
6837 	return rc;
6838 }
6839 #endif /* CONFIG_PM */
6840 
6841 #endif /* CONFIG_PCI */
6842 
6843 /**
6844  *	ata_platform_remove_one - Platform layer callback for device removal
6845  *	@pdev: Platform device that was removed
6846  *
6847  *	Platform layer indicates to libata via this hook that hot-unplug or
6848  *	module unload event has occurred.  Detach all ports.  Resource
6849  *	release is handled via devres.
6850  *
6851  *	LOCKING:
6852  *	Inherited from platform layer (may sleep).
6853  */
6854 int ata_platform_remove_one(struct platform_device *pdev)
6855 {
6856 	struct ata_host *host = platform_get_drvdata(pdev);
6857 
6858 	ata_host_detach(host);
6859 
6860 	return 0;
6861 }
6862 
6863 static int __init ata_parse_force_one(char **cur,
6864 				      struct ata_force_ent *force_ent,
6865 				      const char **reason)
6866 {
6867 	static const struct ata_force_param force_tbl[] __initconst = {
6868 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6869 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6870 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6871 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6872 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6873 		{ "sata",	.cbl		= ATA_CBL_SATA },
6874 		{ "1.5Gbps",	.spd_limit	= 1 },
6875 		{ "3.0Gbps",	.spd_limit	= 2 },
6876 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6877 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6878 		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
6879 		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
6880 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6881 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6882 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6883 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6884 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6885 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6886 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6887 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6888 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6889 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6890 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6891 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6892 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6893 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6894 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6895 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6896 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6897 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6898 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6899 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6900 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6901 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6902 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6903 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6904 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6905 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6906 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6907 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6908 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6909 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6910 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6911 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6912 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6913 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6914 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6915 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6916 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6917 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6918 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6919 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6920 		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6921 	};
6922 	char *start = *cur, *p = *cur;
6923 	char *id, *val, *endp;
6924 	const struct ata_force_param *match_fp = NULL;
6925 	int nr_matches = 0, i;
6926 
6927 	/* find where this param ends and update *cur */
6928 	while (*p != '\0' && *p != ',')
6929 		p++;
6930 
6931 	if (*p == '\0')
6932 		*cur = p;
6933 	else
6934 		*cur = p + 1;
6935 
6936 	*p = '\0';
6937 
6938 	/* parse */
6939 	p = strchr(start, ':');
6940 	if (!p) {
6941 		val = strstrip(start);
6942 		goto parse_val;
6943 	}
6944 	*p = '\0';
6945 
6946 	id = strstrip(start);
6947 	val = strstrip(p + 1);
6948 
6949 	/* parse id */
6950 	p = strchr(id, '.');
6951 	if (p) {
6952 		*p++ = '\0';
6953 		force_ent->device = simple_strtoul(p, &endp, 10);
6954 		if (p == endp || *endp != '\0') {
6955 			*reason = "invalid device";
6956 			return -EINVAL;
6957 		}
6958 	}
6959 
6960 	force_ent->port = simple_strtoul(id, &endp, 10);
6961 	if (id == endp || *endp != '\0') {
6962 		*reason = "invalid port/link";
6963 		return -EINVAL;
6964 	}
6965 
6966  parse_val:
6967 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6968 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6969 		const struct ata_force_param *fp = &force_tbl[i];
6970 
6971 		if (strncasecmp(val, fp->name, strlen(val)))
6972 			continue;
6973 
6974 		nr_matches++;
6975 		match_fp = fp;
6976 
6977 		if (strcasecmp(val, fp->name) == 0) {
6978 			nr_matches = 1;
6979 			break;
6980 		}
6981 	}
6982 
6983 	if (!nr_matches) {
6984 		*reason = "unknown value";
6985 		return -EINVAL;
6986 	}
6987 	if (nr_matches > 1) {
6988 		*reason = "ambiguous value";
6989 		return -EINVAL;
6990 	}
6991 
6992 	force_ent->param = *match_fp;
6993 
6994 	return 0;
6995 }
6996 
6997 static void __init ata_parse_force_param(void)
6998 {
6999 	int idx = 0, size = 1;
7000 	int last_port = -1, last_device = -1;
7001 	char *p, *cur, *next;
7002 
7003 	/* calculate maximum number of params and allocate force_tbl */
7004 	for (p = ata_force_param_buf; *p; p++)
7005 		if (*p == ',')
7006 			size++;
7007 
7008 	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
7009 	if (!ata_force_tbl) {
7010 		printk(KERN_WARNING "ata: failed to extend force table, "
7011 		       "libata.force ignored\n");
7012 		return;
7013 	}
7014 
7015 	/* parse and populate the table */
7016 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7017 		const char *reason = "";
7018 		struct ata_force_ent te = { .port = -1, .device = -1 };
7019 
7020 		next = cur;
7021 		if (ata_parse_force_one(&next, &te, &reason)) {
7022 			printk(KERN_WARNING "ata: failed to parse force "
7023 			       "parameter \"%s\" (%s)\n",
7024 			       cur, reason);
7025 			continue;
7026 		}
7027 
7028 		if (te.port == -1) {
7029 			te.port = last_port;
7030 			te.device = last_device;
7031 		}
7032 
7033 		ata_force_tbl[idx++] = te;
7034 
7035 		last_port = te.port;
7036 		last_device = te.device;
7037 	}
7038 
7039 	ata_force_tbl_size = idx;
7040 }
7041 
7042 static int __init ata_init(void)
7043 {
7044 	int rc;
7045 
7046 	ata_parse_force_param();
7047 
7048 	rc = ata_sff_init();
7049 	if (rc) {
7050 		kfree(ata_force_tbl);
7051 		return rc;
7052 	}
7053 
7054 	libata_transport_init();
7055 	ata_scsi_transport_template = ata_attach_transport();
7056 	if (!ata_scsi_transport_template) {
7057 		ata_sff_exit();
7058 		rc = -ENOMEM;
7059 		goto err_out;
7060 	}
7061 
7062 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7063 	return 0;
7064 
7065 err_out:
7066 	return rc;
7067 }
7068 
7069 static void __exit ata_exit(void)
7070 {
7071 	ata_release_transport(ata_scsi_transport_template);
7072 	libata_transport_exit();
7073 	ata_sff_exit();
7074 	kfree(ata_force_tbl);
7075 }
7076 
7077 subsys_initcall(ata_init);
7078 module_exit(ata_exit);
7079 
7080 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
7081 
7082 int ata_ratelimit(void)
7083 {
7084 	return __ratelimit(&ratelimit);
7085 }
7086 
7087 /**
7088  *	ata_msleep - ATA EH owner aware msleep
7089  *	@ap: ATA port to attribute the sleep to
7090  *	@msecs: duration to sleep in milliseconds
7091  *
7092  *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
7093  *	ownership is released before going to sleep and reacquired
7094  *	after the sleep is complete.  IOW, other ports sharing the
7095  *	@ap->host will be allowed to own the EH while this task is
7096  *	sleeping.
7097  *
7098  *	LOCKING:
7099  *	Might sleep.
7100  */
7101 void ata_msleep(struct ata_port *ap, unsigned int msecs)
7102 {
7103 	bool owns_eh = ap && ap->host->eh_owner == current;
7104 
7105 	if (owns_eh)
7106 		ata_eh_release(ap);
7107 
7108 	if (msecs < 20) {
7109 		unsigned long usecs = msecs * USEC_PER_MSEC;
7110 		usleep_range(usecs, usecs + 50);
7111 	} else {
7112 		msleep(msecs);
7113 	}
7114 
7115 	if (owns_eh)
7116 		ata_eh_acquire(ap);
7117 }
7118 
7119 /**
7120  *	ata_wait_register - wait until register value changes
7121  *	@ap: ATA port to wait register for, can be NULL
7122  *	@reg: IO-mapped register
7123  *	@mask: Mask to apply to read register value
7124  *	@val: Wait condition
7125  *	@interval: polling interval in milliseconds
7126  *	@timeout: timeout in milliseconds
7127  *
7128  *	Waiting for some bits of register to change is a common
7129  *	operation for ATA controllers.  This function reads 32bit LE
7130  *	IO-mapped register @reg and tests for the following condition.
7131  *
7132  *	(*@reg & mask) != val
7133  *
7134  *	If the condition is met, it returns; otherwise, the process is
7135  *	repeated after @interval_msec until timeout.
7136  *
7137  *	LOCKING:
7138  *	Kernel thread context (may sleep)
7139  *
7140  *	RETURNS:
7141  *	The final register value.
7142  */
7143 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
7144 		      unsigned long interval, unsigned long timeout)
7145 {
7146 	unsigned long deadline;
7147 	u32 tmp;
7148 
7149 	tmp = ioread32(reg);
7150 
7151 	/* Calculate timeout _after_ the first read to make sure
7152 	 * preceding writes reach the controller before starting to
7153 	 * eat away the timeout.
7154 	 */
7155 	deadline = ata_deadline(jiffies, timeout);
7156 
7157 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
7158 		ata_msleep(ap, interval);
7159 		tmp = ioread32(reg);
7160 	}
7161 
7162 	return tmp;
7163 }
7164 
7165 /**
7166  *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
7167  *	@link: Link receiving the event
7168  *
7169  *	Test whether the received PHY event has to be ignored or not.
7170  *
7171  *	LOCKING:
7172  *	None:
7173  *
7174  *	RETURNS:
7175  *	True if the event has to be ignored.
7176  */
7177 bool sata_lpm_ignore_phy_events(struct ata_link *link)
7178 {
7179 	unsigned long lpm_timeout = link->last_lpm_change +
7180 				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7181 
7182 	/* if LPM is enabled, PHYRDY doesn't mean anything */
7183 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
7184 		return true;
7185 
7186 	/* ignore the first PHY event after the LPM policy changed
7187 	 * as it is might be spurious
7188 	 */
7189 	if ((link->flags & ATA_LFLAG_CHANGED) &&
7190 	    time_before(jiffies, lpm_timeout))
7191 		return true;
7192 
7193 	return false;
7194 }
7195 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7196 
7197 /*
7198  * Dummy port_ops
7199  */
7200 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7201 {
7202 	return AC_ERR_SYSTEM;
7203 }
7204 
7205 static void ata_dummy_error_handler(struct ata_port *ap)
7206 {
7207 	/* truly dummy */
7208 }
7209 
7210 struct ata_port_operations ata_dummy_port_ops = {
7211 	.qc_prep		= ata_noop_qc_prep,
7212 	.qc_issue		= ata_dummy_qc_issue,
7213 	.error_handler		= ata_dummy_error_handler,
7214 	.sched_eh		= ata_std_sched_eh,
7215 	.end_eh			= ata_std_end_eh,
7216 };
7217 
7218 const struct ata_port_info ata_dummy_port_info = {
7219 	.port_ops		= &ata_dummy_port_ops,
7220 };
7221 
7222 /*
7223  * Utility print functions
7224  */
7225 void ata_port_printk(const struct ata_port *ap, const char *level,
7226 		     const char *fmt, ...)
7227 {
7228 	struct va_format vaf;
7229 	va_list args;
7230 
7231 	va_start(args, fmt);
7232 
7233 	vaf.fmt = fmt;
7234 	vaf.va = &args;
7235 
7236 	printk("%sata%u: %pV", level, ap->print_id, &vaf);
7237 
7238 	va_end(args);
7239 }
7240 EXPORT_SYMBOL(ata_port_printk);
7241 
7242 void ata_link_printk(const struct ata_link *link, const char *level,
7243 		     const char *fmt, ...)
7244 {
7245 	struct va_format vaf;
7246 	va_list args;
7247 
7248 	va_start(args, fmt);
7249 
7250 	vaf.fmt = fmt;
7251 	vaf.va = &args;
7252 
7253 	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7254 		printk("%sata%u.%02u: %pV",
7255 		       level, link->ap->print_id, link->pmp, &vaf);
7256 	else
7257 		printk("%sata%u: %pV",
7258 		       level, link->ap->print_id, &vaf);
7259 
7260 	va_end(args);
7261 }
7262 EXPORT_SYMBOL(ata_link_printk);
7263 
7264 void ata_dev_printk(const struct ata_device *dev, const char *level,
7265 		    const char *fmt, ...)
7266 {
7267 	struct va_format vaf;
7268 	va_list args;
7269 
7270 	va_start(args, fmt);
7271 
7272 	vaf.fmt = fmt;
7273 	vaf.va = &args;
7274 
7275 	printk("%sata%u.%02u: %pV",
7276 	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7277 	       &vaf);
7278 
7279 	va_end(args);
7280 }
7281 EXPORT_SYMBOL(ata_dev_printk);
7282 
7283 void ata_print_version(const struct device *dev, const char *version)
7284 {
7285 	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7286 }
7287 EXPORT_SYMBOL(ata_print_version);
7288 
7289 /*
7290  * libata is essentially a library of internal helper functions for
7291  * low-level ATA host controller drivers.  As such, the API/ABI is
7292  * likely to change as new drivers are added and updated.
7293  * Do not depend on ABI/API stability.
7294  */
7295 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7296 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7297 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7298 EXPORT_SYMBOL_GPL(ata_base_port_ops);
7299 EXPORT_SYMBOL_GPL(sata_port_ops);
7300 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7301 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7302 EXPORT_SYMBOL_GPL(ata_link_next);
7303 EXPORT_SYMBOL_GPL(ata_dev_next);
7304 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7305 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7306 EXPORT_SYMBOL_GPL(ata_host_init);
7307 EXPORT_SYMBOL_GPL(ata_host_alloc);
7308 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7309 EXPORT_SYMBOL_GPL(ata_slave_link_init);
7310 EXPORT_SYMBOL_GPL(ata_host_start);
7311 EXPORT_SYMBOL_GPL(ata_host_register);
7312 EXPORT_SYMBOL_GPL(ata_host_activate);
7313 EXPORT_SYMBOL_GPL(ata_host_detach);
7314 EXPORT_SYMBOL_GPL(ata_sg_init);
7315 EXPORT_SYMBOL_GPL(ata_qc_complete);
7316 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7317 EXPORT_SYMBOL_GPL(atapi_cmd_type);
7318 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7319 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7320 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7321 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7322 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7323 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7324 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7325 EXPORT_SYMBOL_GPL(ata_mode_string);
7326 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7327 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7328 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7329 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7330 EXPORT_SYMBOL_GPL(ata_dev_disable);
7331 EXPORT_SYMBOL_GPL(sata_set_spd);
7332 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7333 EXPORT_SYMBOL_GPL(sata_link_debounce);
7334 EXPORT_SYMBOL_GPL(sata_link_resume);
7335 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7336 EXPORT_SYMBOL_GPL(ata_std_prereset);
7337 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7338 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7339 EXPORT_SYMBOL_GPL(ata_std_postreset);
7340 EXPORT_SYMBOL_GPL(ata_dev_classify);
7341 EXPORT_SYMBOL_GPL(ata_dev_pair);
7342 EXPORT_SYMBOL_GPL(ata_ratelimit);
7343 EXPORT_SYMBOL_GPL(ata_msleep);
7344 EXPORT_SYMBOL_GPL(ata_wait_register);
7345 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7346 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7347 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7348 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7349 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7350 EXPORT_SYMBOL_GPL(sata_scr_valid);
7351 EXPORT_SYMBOL_GPL(sata_scr_read);
7352 EXPORT_SYMBOL_GPL(sata_scr_write);
7353 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7354 EXPORT_SYMBOL_GPL(ata_link_online);
7355 EXPORT_SYMBOL_GPL(ata_link_offline);
7356 #ifdef CONFIG_PM
7357 EXPORT_SYMBOL_GPL(ata_host_suspend);
7358 EXPORT_SYMBOL_GPL(ata_host_resume);
7359 #endif /* CONFIG_PM */
7360 EXPORT_SYMBOL_GPL(ata_id_string);
7361 EXPORT_SYMBOL_GPL(ata_id_c_string);
7362 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7363 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7364 
7365 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7366 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7367 EXPORT_SYMBOL_GPL(ata_timing_compute);
7368 EXPORT_SYMBOL_GPL(ata_timing_merge);
7369 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7370 
7371 #ifdef CONFIG_PCI
7372 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7373 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7374 #ifdef CONFIG_PM
7375 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7376 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7377 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7378 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7379 #endif /* CONFIG_PM */
7380 #endif /* CONFIG_PCI */
7381 
7382 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7383 
7384 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7385 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7386 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7387 EXPORT_SYMBOL_GPL(ata_port_desc);
7388 #ifdef CONFIG_PCI
7389 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7390 #endif /* CONFIG_PCI */
7391 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7392 EXPORT_SYMBOL_GPL(ata_link_abort);
7393 EXPORT_SYMBOL_GPL(ata_port_abort);
7394 EXPORT_SYMBOL_GPL(ata_port_freeze);
7395 EXPORT_SYMBOL_GPL(sata_async_notification);
7396 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7397 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7398 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7399 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7400 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7401 EXPORT_SYMBOL_GPL(ata_do_eh);
7402 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7403 
7404 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7405 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7406 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7407 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7408 EXPORT_SYMBOL_GPL(ata_cable_sata);
7409 EXPORT_SYMBOL_GPL(ata_host_get);
7410 EXPORT_SYMBOL_GPL(ata_host_put);
7411