xref: /openbmc/linux/drivers/ata/libata-core.c (revision 110e6f26)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/time.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <linux/async.h>
61 #include <linux/log2.h>
62 #include <linux/slab.h>
63 #include <linux/glob.h>
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_host.h>
67 #include <linux/libata.h>
68 #include <asm/byteorder.h>
69 #include <linux/cdrom.h>
70 #include <linux/ratelimit.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/platform_device.h>
73 
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/libata.h>
76 
77 #include "libata.h"
78 #include "libata-transport.h"
79 
80 /* debounce timing parameters in msecs { interval, duration, timeout } */
81 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
82 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
83 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
84 
85 const struct ata_port_operations ata_base_port_ops = {
86 	.prereset		= ata_std_prereset,
87 	.postreset		= ata_std_postreset,
88 	.error_handler		= ata_std_error_handler,
89 	.sched_eh		= ata_std_sched_eh,
90 	.end_eh			= ata_std_end_eh,
91 };
92 
93 const struct ata_port_operations sata_port_ops = {
94 	.inherits		= &ata_base_port_ops,
95 
96 	.qc_defer		= ata_std_qc_defer,
97 	.hardreset		= sata_std_hardreset,
98 };
99 
100 static unsigned int ata_dev_init_params(struct ata_device *dev,
101 					u16 heads, u16 sectors);
102 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
103 static void ata_dev_xfermask(struct ata_device *dev);
104 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
105 
106 atomic_t ata_print_id = ATOMIC_INIT(0);
107 
108 struct ata_force_param {
109 	const char	*name;
110 	unsigned int	cbl;
111 	int		spd_limit;
112 	unsigned long	xfer_mask;
113 	unsigned int	horkage_on;
114 	unsigned int	horkage_off;
115 	unsigned int	lflags;
116 };
117 
118 struct ata_force_ent {
119 	int			port;
120 	int			device;
121 	struct ata_force_param	param;
122 };
123 
124 static struct ata_force_ent *ata_force_tbl;
125 static int ata_force_tbl_size;
126 
127 static char ata_force_param_buf[PAGE_SIZE] __initdata;
128 /* param_buf is thrown away after initialization, disallow read */
129 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
130 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
131 
132 static int atapi_enabled = 1;
133 module_param(atapi_enabled, int, 0444);
134 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
135 
136 static int atapi_dmadir = 0;
137 module_param(atapi_dmadir, int, 0444);
138 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
139 
140 int atapi_passthru16 = 1;
141 module_param(atapi_passthru16, int, 0444);
142 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
143 
144 int libata_fua = 0;
145 module_param_named(fua, libata_fua, int, 0444);
146 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
147 
148 static int ata_ignore_hpa;
149 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
150 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
151 
152 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
153 module_param_named(dma, libata_dma_mask, int, 0444);
154 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
155 
156 static int ata_probe_timeout;
157 module_param(ata_probe_timeout, int, 0444);
158 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
159 
160 int libata_noacpi = 0;
161 module_param_named(noacpi, libata_noacpi, int, 0444);
162 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
163 
164 int libata_allow_tpm = 0;
165 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
166 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
167 
168 static int atapi_an;
169 module_param(atapi_an, int, 0444);
170 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
171 
172 MODULE_AUTHOR("Jeff Garzik");
173 MODULE_DESCRIPTION("Library module for ATA devices");
174 MODULE_LICENSE("GPL");
175 MODULE_VERSION(DRV_VERSION);
176 
177 
178 static bool ata_sstatus_online(u32 sstatus)
179 {
180 	return (sstatus & 0xf) == 0x3;
181 }
182 
183 /**
184  *	ata_link_next - link iteration helper
185  *	@link: the previous link, NULL to start
186  *	@ap: ATA port containing links to iterate
187  *	@mode: iteration mode, one of ATA_LITER_*
188  *
189  *	LOCKING:
190  *	Host lock or EH context.
191  *
192  *	RETURNS:
193  *	Pointer to the next link.
194  */
195 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
196 			       enum ata_link_iter_mode mode)
197 {
198 	BUG_ON(mode != ATA_LITER_EDGE &&
199 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
200 
201 	/* NULL link indicates start of iteration */
202 	if (!link)
203 		switch (mode) {
204 		case ATA_LITER_EDGE:
205 		case ATA_LITER_PMP_FIRST:
206 			if (sata_pmp_attached(ap))
207 				return ap->pmp_link;
208 			/* fall through */
209 		case ATA_LITER_HOST_FIRST:
210 			return &ap->link;
211 		}
212 
213 	/* we just iterated over the host link, what's next? */
214 	if (link == &ap->link)
215 		switch (mode) {
216 		case ATA_LITER_HOST_FIRST:
217 			if (sata_pmp_attached(ap))
218 				return ap->pmp_link;
219 			/* fall through */
220 		case ATA_LITER_PMP_FIRST:
221 			if (unlikely(ap->slave_link))
222 				return ap->slave_link;
223 			/* fall through */
224 		case ATA_LITER_EDGE:
225 			return NULL;
226 		}
227 
228 	/* slave_link excludes PMP */
229 	if (unlikely(link == ap->slave_link))
230 		return NULL;
231 
232 	/* we were over a PMP link */
233 	if (++link < ap->pmp_link + ap->nr_pmp_links)
234 		return link;
235 
236 	if (mode == ATA_LITER_PMP_FIRST)
237 		return &ap->link;
238 
239 	return NULL;
240 }
241 
242 /**
243  *	ata_dev_next - device iteration helper
244  *	@dev: the previous device, NULL to start
245  *	@link: ATA link containing devices to iterate
246  *	@mode: iteration mode, one of ATA_DITER_*
247  *
248  *	LOCKING:
249  *	Host lock or EH context.
250  *
251  *	RETURNS:
252  *	Pointer to the next device.
253  */
254 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
255 				enum ata_dev_iter_mode mode)
256 {
257 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
258 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
259 
260 	/* NULL dev indicates start of iteration */
261 	if (!dev)
262 		switch (mode) {
263 		case ATA_DITER_ENABLED:
264 		case ATA_DITER_ALL:
265 			dev = link->device;
266 			goto check;
267 		case ATA_DITER_ENABLED_REVERSE:
268 		case ATA_DITER_ALL_REVERSE:
269 			dev = link->device + ata_link_max_devices(link) - 1;
270 			goto check;
271 		}
272 
273  next:
274 	/* move to the next one */
275 	switch (mode) {
276 	case ATA_DITER_ENABLED:
277 	case ATA_DITER_ALL:
278 		if (++dev < link->device + ata_link_max_devices(link))
279 			goto check;
280 		return NULL;
281 	case ATA_DITER_ENABLED_REVERSE:
282 	case ATA_DITER_ALL_REVERSE:
283 		if (--dev >= link->device)
284 			goto check;
285 		return NULL;
286 	}
287 
288  check:
289 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
290 	    !ata_dev_enabled(dev))
291 		goto next;
292 	return dev;
293 }
294 
295 /**
296  *	ata_dev_phys_link - find physical link for a device
297  *	@dev: ATA device to look up physical link for
298  *
299  *	Look up physical link which @dev is attached to.  Note that
300  *	this is different from @dev->link only when @dev is on slave
301  *	link.  For all other cases, it's the same as @dev->link.
302  *
303  *	LOCKING:
304  *	Don't care.
305  *
306  *	RETURNS:
307  *	Pointer to the found physical link.
308  */
309 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
310 {
311 	struct ata_port *ap = dev->link->ap;
312 
313 	if (!ap->slave_link)
314 		return dev->link;
315 	if (!dev->devno)
316 		return &ap->link;
317 	return ap->slave_link;
318 }
319 
320 /**
321  *	ata_force_cbl - force cable type according to libata.force
322  *	@ap: ATA port of interest
323  *
324  *	Force cable type according to libata.force and whine about it.
325  *	The last entry which has matching port number is used, so it
326  *	can be specified as part of device force parameters.  For
327  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
328  *	same effect.
329  *
330  *	LOCKING:
331  *	EH context.
332  */
333 void ata_force_cbl(struct ata_port *ap)
334 {
335 	int i;
336 
337 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
338 		const struct ata_force_ent *fe = &ata_force_tbl[i];
339 
340 		if (fe->port != -1 && fe->port != ap->print_id)
341 			continue;
342 
343 		if (fe->param.cbl == ATA_CBL_NONE)
344 			continue;
345 
346 		ap->cbl = fe->param.cbl;
347 		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
348 		return;
349 	}
350 }
351 
352 /**
353  *	ata_force_link_limits - force link limits according to libata.force
354  *	@link: ATA link of interest
355  *
356  *	Force link flags and SATA spd limit according to libata.force
357  *	and whine about it.  When only the port part is specified
358  *	(e.g. 1:), the limit applies to all links connected to both
359  *	the host link and all fan-out ports connected via PMP.  If the
360  *	device part is specified as 0 (e.g. 1.00:), it specifies the
361  *	first fan-out link not the host link.  Device number 15 always
362  *	points to the host link whether PMP is attached or not.  If the
363  *	controller has slave link, device number 16 points to it.
364  *
365  *	LOCKING:
366  *	EH context.
367  */
368 static void ata_force_link_limits(struct ata_link *link)
369 {
370 	bool did_spd = false;
371 	int linkno = link->pmp;
372 	int i;
373 
374 	if (ata_is_host_link(link))
375 		linkno += 15;
376 
377 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
378 		const struct ata_force_ent *fe = &ata_force_tbl[i];
379 
380 		if (fe->port != -1 && fe->port != link->ap->print_id)
381 			continue;
382 
383 		if (fe->device != -1 && fe->device != linkno)
384 			continue;
385 
386 		/* only honor the first spd limit */
387 		if (!did_spd && fe->param.spd_limit) {
388 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
389 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
390 					fe->param.name);
391 			did_spd = true;
392 		}
393 
394 		/* let lflags stack */
395 		if (fe->param.lflags) {
396 			link->flags |= fe->param.lflags;
397 			ata_link_notice(link,
398 					"FORCE: link flag 0x%x forced -> 0x%x\n",
399 					fe->param.lflags, link->flags);
400 		}
401 	}
402 }
403 
404 /**
405  *	ata_force_xfermask - force xfermask according to libata.force
406  *	@dev: ATA device of interest
407  *
408  *	Force xfer_mask according to libata.force and whine about it.
409  *	For consistency with link selection, device number 15 selects
410  *	the first device connected to the host link.
411  *
412  *	LOCKING:
413  *	EH context.
414  */
415 static void ata_force_xfermask(struct ata_device *dev)
416 {
417 	int devno = dev->link->pmp + dev->devno;
418 	int alt_devno = devno;
419 	int i;
420 
421 	/* allow n.15/16 for devices attached to host port */
422 	if (ata_is_host_link(dev->link))
423 		alt_devno += 15;
424 
425 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
426 		const struct ata_force_ent *fe = &ata_force_tbl[i];
427 		unsigned long pio_mask, mwdma_mask, udma_mask;
428 
429 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
430 			continue;
431 
432 		if (fe->device != -1 && fe->device != devno &&
433 		    fe->device != alt_devno)
434 			continue;
435 
436 		if (!fe->param.xfer_mask)
437 			continue;
438 
439 		ata_unpack_xfermask(fe->param.xfer_mask,
440 				    &pio_mask, &mwdma_mask, &udma_mask);
441 		if (udma_mask)
442 			dev->udma_mask = udma_mask;
443 		else if (mwdma_mask) {
444 			dev->udma_mask = 0;
445 			dev->mwdma_mask = mwdma_mask;
446 		} else {
447 			dev->udma_mask = 0;
448 			dev->mwdma_mask = 0;
449 			dev->pio_mask = pio_mask;
450 		}
451 
452 		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
453 			       fe->param.name);
454 		return;
455 	}
456 }
457 
458 /**
459  *	ata_force_horkage - force horkage according to libata.force
460  *	@dev: ATA device of interest
461  *
462  *	Force horkage according to libata.force and whine about it.
463  *	For consistency with link selection, device number 15 selects
464  *	the first device connected to the host link.
465  *
466  *	LOCKING:
467  *	EH context.
468  */
469 static void ata_force_horkage(struct ata_device *dev)
470 {
471 	int devno = dev->link->pmp + dev->devno;
472 	int alt_devno = devno;
473 	int i;
474 
475 	/* allow n.15/16 for devices attached to host port */
476 	if (ata_is_host_link(dev->link))
477 		alt_devno += 15;
478 
479 	for (i = 0; i < ata_force_tbl_size; i++) {
480 		const struct ata_force_ent *fe = &ata_force_tbl[i];
481 
482 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
483 			continue;
484 
485 		if (fe->device != -1 && fe->device != devno &&
486 		    fe->device != alt_devno)
487 			continue;
488 
489 		if (!(~dev->horkage & fe->param.horkage_on) &&
490 		    !(dev->horkage & fe->param.horkage_off))
491 			continue;
492 
493 		dev->horkage |= fe->param.horkage_on;
494 		dev->horkage &= ~fe->param.horkage_off;
495 
496 		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
497 			       fe->param.name);
498 	}
499 }
500 
501 /**
502  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
503  *	@opcode: SCSI opcode
504  *
505  *	Determine ATAPI command type from @opcode.
506  *
507  *	LOCKING:
508  *	None.
509  *
510  *	RETURNS:
511  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
512  */
513 int atapi_cmd_type(u8 opcode)
514 {
515 	switch (opcode) {
516 	case GPCMD_READ_10:
517 	case GPCMD_READ_12:
518 		return ATAPI_READ;
519 
520 	case GPCMD_WRITE_10:
521 	case GPCMD_WRITE_12:
522 	case GPCMD_WRITE_AND_VERIFY_10:
523 		return ATAPI_WRITE;
524 
525 	case GPCMD_READ_CD:
526 	case GPCMD_READ_CD_MSF:
527 		return ATAPI_READ_CD;
528 
529 	case ATA_16:
530 	case ATA_12:
531 		if (atapi_passthru16)
532 			return ATAPI_PASS_THRU;
533 		/* fall thru */
534 	default:
535 		return ATAPI_MISC;
536 	}
537 }
538 
539 /**
540  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
541  *	@tf: Taskfile to convert
542  *	@pmp: Port multiplier port
543  *	@is_cmd: This FIS is for command
544  *	@fis: Buffer into which data will output
545  *
546  *	Converts a standard ATA taskfile to a Serial ATA
547  *	FIS structure (Register - Host to Device).
548  *
549  *	LOCKING:
550  *	Inherited from caller.
551  */
552 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
553 {
554 	fis[0] = 0x27;			/* Register - Host to Device FIS */
555 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
556 	if (is_cmd)
557 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
558 
559 	fis[2] = tf->command;
560 	fis[3] = tf->feature;
561 
562 	fis[4] = tf->lbal;
563 	fis[5] = tf->lbam;
564 	fis[6] = tf->lbah;
565 	fis[7] = tf->device;
566 
567 	fis[8] = tf->hob_lbal;
568 	fis[9] = tf->hob_lbam;
569 	fis[10] = tf->hob_lbah;
570 	fis[11] = tf->hob_feature;
571 
572 	fis[12] = tf->nsect;
573 	fis[13] = tf->hob_nsect;
574 	fis[14] = 0;
575 	fis[15] = tf->ctl;
576 
577 	fis[16] = tf->auxiliary & 0xff;
578 	fis[17] = (tf->auxiliary >> 8) & 0xff;
579 	fis[18] = (tf->auxiliary >> 16) & 0xff;
580 	fis[19] = (tf->auxiliary >> 24) & 0xff;
581 }
582 
583 /**
584  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
585  *	@fis: Buffer from which data will be input
586  *	@tf: Taskfile to output
587  *
588  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
589  *
590  *	LOCKING:
591  *	Inherited from caller.
592  */
593 
594 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
595 {
596 	tf->command	= fis[2];	/* status */
597 	tf->feature	= fis[3];	/* error */
598 
599 	tf->lbal	= fis[4];
600 	tf->lbam	= fis[5];
601 	tf->lbah	= fis[6];
602 	tf->device	= fis[7];
603 
604 	tf->hob_lbal	= fis[8];
605 	tf->hob_lbam	= fis[9];
606 	tf->hob_lbah	= fis[10];
607 
608 	tf->nsect	= fis[12];
609 	tf->hob_nsect	= fis[13];
610 }
611 
612 static const u8 ata_rw_cmds[] = {
613 	/* pio multi */
614 	ATA_CMD_READ_MULTI,
615 	ATA_CMD_WRITE_MULTI,
616 	ATA_CMD_READ_MULTI_EXT,
617 	ATA_CMD_WRITE_MULTI_EXT,
618 	0,
619 	0,
620 	0,
621 	ATA_CMD_WRITE_MULTI_FUA_EXT,
622 	/* pio */
623 	ATA_CMD_PIO_READ,
624 	ATA_CMD_PIO_WRITE,
625 	ATA_CMD_PIO_READ_EXT,
626 	ATA_CMD_PIO_WRITE_EXT,
627 	0,
628 	0,
629 	0,
630 	0,
631 	/* dma */
632 	ATA_CMD_READ,
633 	ATA_CMD_WRITE,
634 	ATA_CMD_READ_EXT,
635 	ATA_CMD_WRITE_EXT,
636 	0,
637 	0,
638 	0,
639 	ATA_CMD_WRITE_FUA_EXT
640 };
641 
642 /**
643  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
644  *	@tf: command to examine and configure
645  *	@dev: device tf belongs to
646  *
647  *	Examine the device configuration and tf->flags to calculate
648  *	the proper read/write commands and protocol to use.
649  *
650  *	LOCKING:
651  *	caller.
652  */
653 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
654 {
655 	u8 cmd;
656 
657 	int index, fua, lba48, write;
658 
659 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
660 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
661 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
662 
663 	if (dev->flags & ATA_DFLAG_PIO) {
664 		tf->protocol = ATA_PROT_PIO;
665 		index = dev->multi_count ? 0 : 8;
666 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
667 		/* Unable to use DMA due to host limitation */
668 		tf->protocol = ATA_PROT_PIO;
669 		index = dev->multi_count ? 0 : 8;
670 	} else {
671 		tf->protocol = ATA_PROT_DMA;
672 		index = 16;
673 	}
674 
675 	cmd = ata_rw_cmds[index + fua + lba48 + write];
676 	if (cmd) {
677 		tf->command = cmd;
678 		return 0;
679 	}
680 	return -1;
681 }
682 
683 /**
684  *	ata_tf_read_block - Read block address from ATA taskfile
685  *	@tf: ATA taskfile of interest
686  *	@dev: ATA device @tf belongs to
687  *
688  *	LOCKING:
689  *	None.
690  *
691  *	Read block address from @tf.  This function can handle all
692  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
693  *	flags select the address format to use.
694  *
695  *	RETURNS:
696  *	Block address read from @tf.
697  */
698 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
699 {
700 	u64 block = 0;
701 
702 	if (tf->flags & ATA_TFLAG_LBA) {
703 		if (tf->flags & ATA_TFLAG_LBA48) {
704 			block |= (u64)tf->hob_lbah << 40;
705 			block |= (u64)tf->hob_lbam << 32;
706 			block |= (u64)tf->hob_lbal << 24;
707 		} else
708 			block |= (tf->device & 0xf) << 24;
709 
710 		block |= tf->lbah << 16;
711 		block |= tf->lbam << 8;
712 		block |= tf->lbal;
713 	} else {
714 		u32 cyl, head, sect;
715 
716 		cyl = tf->lbam | (tf->lbah << 8);
717 		head = tf->device & 0xf;
718 		sect = tf->lbal;
719 
720 		if (!sect) {
721 			ata_dev_warn(dev,
722 				     "device reported invalid CHS sector 0\n");
723 			sect = 1; /* oh well */
724 		}
725 
726 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
727 	}
728 
729 	return block;
730 }
731 
732 /**
733  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
734  *	@tf: Target ATA taskfile
735  *	@dev: ATA device @tf belongs to
736  *	@block: Block address
737  *	@n_block: Number of blocks
738  *	@tf_flags: RW/FUA etc...
739  *	@tag: tag
740  *
741  *	LOCKING:
742  *	None.
743  *
744  *	Build ATA taskfile @tf for read/write request described by
745  *	@block, @n_block, @tf_flags and @tag on @dev.
746  *
747  *	RETURNS:
748  *
749  *	0 on success, -ERANGE if the request is too large for @dev,
750  *	-EINVAL if the request is invalid.
751  */
752 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
753 		    u64 block, u32 n_block, unsigned int tf_flags,
754 		    unsigned int tag)
755 {
756 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
757 	tf->flags |= tf_flags;
758 
759 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
760 		/* yay, NCQ */
761 		if (!lba_48_ok(block, n_block))
762 			return -ERANGE;
763 
764 		tf->protocol = ATA_PROT_NCQ;
765 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
766 
767 		if (tf->flags & ATA_TFLAG_WRITE)
768 			tf->command = ATA_CMD_FPDMA_WRITE;
769 		else
770 			tf->command = ATA_CMD_FPDMA_READ;
771 
772 		tf->nsect = tag << 3;
773 		tf->hob_feature = (n_block >> 8) & 0xff;
774 		tf->feature = n_block & 0xff;
775 
776 		tf->hob_lbah = (block >> 40) & 0xff;
777 		tf->hob_lbam = (block >> 32) & 0xff;
778 		tf->hob_lbal = (block >> 24) & 0xff;
779 		tf->lbah = (block >> 16) & 0xff;
780 		tf->lbam = (block >> 8) & 0xff;
781 		tf->lbal = block & 0xff;
782 
783 		tf->device = ATA_LBA;
784 		if (tf->flags & ATA_TFLAG_FUA)
785 			tf->device |= 1 << 7;
786 	} else if (dev->flags & ATA_DFLAG_LBA) {
787 		tf->flags |= ATA_TFLAG_LBA;
788 
789 		if (lba_28_ok(block, n_block)) {
790 			/* use LBA28 */
791 			tf->device |= (block >> 24) & 0xf;
792 		} else if (lba_48_ok(block, n_block)) {
793 			if (!(dev->flags & ATA_DFLAG_LBA48))
794 				return -ERANGE;
795 
796 			/* use LBA48 */
797 			tf->flags |= ATA_TFLAG_LBA48;
798 
799 			tf->hob_nsect = (n_block >> 8) & 0xff;
800 
801 			tf->hob_lbah = (block >> 40) & 0xff;
802 			tf->hob_lbam = (block >> 32) & 0xff;
803 			tf->hob_lbal = (block >> 24) & 0xff;
804 		} else
805 			/* request too large even for LBA48 */
806 			return -ERANGE;
807 
808 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
809 			return -EINVAL;
810 
811 		tf->nsect = n_block & 0xff;
812 
813 		tf->lbah = (block >> 16) & 0xff;
814 		tf->lbam = (block >> 8) & 0xff;
815 		tf->lbal = block & 0xff;
816 
817 		tf->device |= ATA_LBA;
818 	} else {
819 		/* CHS */
820 		u32 sect, head, cyl, track;
821 
822 		/* The request -may- be too large for CHS addressing. */
823 		if (!lba_28_ok(block, n_block))
824 			return -ERANGE;
825 
826 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
827 			return -EINVAL;
828 
829 		/* Convert LBA to CHS */
830 		track = (u32)block / dev->sectors;
831 		cyl   = track / dev->heads;
832 		head  = track % dev->heads;
833 		sect  = (u32)block % dev->sectors + 1;
834 
835 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
836 			(u32)block, track, cyl, head, sect);
837 
838 		/* Check whether the converted CHS can fit.
839 		   Cylinder: 0-65535
840 		   Head: 0-15
841 		   Sector: 1-255*/
842 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
843 			return -ERANGE;
844 
845 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
846 		tf->lbal = sect;
847 		tf->lbam = cyl;
848 		tf->lbah = cyl >> 8;
849 		tf->device |= head;
850 	}
851 
852 	return 0;
853 }
854 
855 /**
856  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
857  *	@pio_mask: pio_mask
858  *	@mwdma_mask: mwdma_mask
859  *	@udma_mask: udma_mask
860  *
861  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
862  *	unsigned int xfer_mask.
863  *
864  *	LOCKING:
865  *	None.
866  *
867  *	RETURNS:
868  *	Packed xfer_mask.
869  */
870 unsigned long ata_pack_xfermask(unsigned long pio_mask,
871 				unsigned long mwdma_mask,
872 				unsigned long udma_mask)
873 {
874 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
875 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
876 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
877 }
878 
879 /**
880  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
881  *	@xfer_mask: xfer_mask to unpack
882  *	@pio_mask: resulting pio_mask
883  *	@mwdma_mask: resulting mwdma_mask
884  *	@udma_mask: resulting udma_mask
885  *
886  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
887  *	Any NULL distination masks will be ignored.
888  */
889 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
890 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
891 {
892 	if (pio_mask)
893 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
894 	if (mwdma_mask)
895 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
896 	if (udma_mask)
897 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
898 }
899 
900 static const struct ata_xfer_ent {
901 	int shift, bits;
902 	u8 base;
903 } ata_xfer_tbl[] = {
904 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
905 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
906 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
907 	{ -1, },
908 };
909 
910 /**
911  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
912  *	@xfer_mask: xfer_mask of interest
913  *
914  *	Return matching XFER_* value for @xfer_mask.  Only the highest
915  *	bit of @xfer_mask is considered.
916  *
917  *	LOCKING:
918  *	None.
919  *
920  *	RETURNS:
921  *	Matching XFER_* value, 0xff if no match found.
922  */
923 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
924 {
925 	int highbit = fls(xfer_mask) - 1;
926 	const struct ata_xfer_ent *ent;
927 
928 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
929 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
930 			return ent->base + highbit - ent->shift;
931 	return 0xff;
932 }
933 
934 /**
935  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
936  *	@xfer_mode: XFER_* of interest
937  *
938  *	Return matching xfer_mask for @xfer_mode.
939  *
940  *	LOCKING:
941  *	None.
942  *
943  *	RETURNS:
944  *	Matching xfer_mask, 0 if no match found.
945  */
946 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
947 {
948 	const struct ata_xfer_ent *ent;
949 
950 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
951 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
952 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
953 				& ~((1 << ent->shift) - 1);
954 	return 0;
955 }
956 
957 /**
958  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
959  *	@xfer_mode: XFER_* of interest
960  *
961  *	Return matching xfer_shift for @xfer_mode.
962  *
963  *	LOCKING:
964  *	None.
965  *
966  *	RETURNS:
967  *	Matching xfer_shift, -1 if no match found.
968  */
969 int ata_xfer_mode2shift(unsigned long xfer_mode)
970 {
971 	const struct ata_xfer_ent *ent;
972 
973 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
974 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
975 			return ent->shift;
976 	return -1;
977 }
978 
979 /**
980  *	ata_mode_string - convert xfer_mask to string
981  *	@xfer_mask: mask of bits supported; only highest bit counts.
982  *
983  *	Determine string which represents the highest speed
984  *	(highest bit in @modemask).
985  *
986  *	LOCKING:
987  *	None.
988  *
989  *	RETURNS:
990  *	Constant C string representing highest speed listed in
991  *	@mode_mask, or the constant C string "<n/a>".
992  */
993 const char *ata_mode_string(unsigned long xfer_mask)
994 {
995 	static const char * const xfer_mode_str[] = {
996 		"PIO0",
997 		"PIO1",
998 		"PIO2",
999 		"PIO3",
1000 		"PIO4",
1001 		"PIO5",
1002 		"PIO6",
1003 		"MWDMA0",
1004 		"MWDMA1",
1005 		"MWDMA2",
1006 		"MWDMA3",
1007 		"MWDMA4",
1008 		"UDMA/16",
1009 		"UDMA/25",
1010 		"UDMA/33",
1011 		"UDMA/44",
1012 		"UDMA/66",
1013 		"UDMA/100",
1014 		"UDMA/133",
1015 		"UDMA7",
1016 	};
1017 	int highbit;
1018 
1019 	highbit = fls(xfer_mask) - 1;
1020 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1021 		return xfer_mode_str[highbit];
1022 	return "<n/a>";
1023 }
1024 
1025 const char *sata_spd_string(unsigned int spd)
1026 {
1027 	static const char * const spd_str[] = {
1028 		"1.5 Gbps",
1029 		"3.0 Gbps",
1030 		"6.0 Gbps",
1031 	};
1032 
1033 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1034 		return "<unknown>";
1035 	return spd_str[spd - 1];
1036 }
1037 
1038 /**
1039  *	ata_dev_classify - determine device type based on ATA-spec signature
1040  *	@tf: ATA taskfile register set for device to be identified
1041  *
1042  *	Determine from taskfile register contents whether a device is
1043  *	ATA or ATAPI, as per "Signature and persistence" section
1044  *	of ATA/PI spec (volume 1, sect 5.14).
1045  *
1046  *	LOCKING:
1047  *	None.
1048  *
1049  *	RETURNS:
1050  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1051  *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1052  */
1053 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1054 {
1055 	/* Apple's open source Darwin code hints that some devices only
1056 	 * put a proper signature into the LBA mid/high registers,
1057 	 * So, we only check those.  It's sufficient for uniqueness.
1058 	 *
1059 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1060 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1061 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1062 	 * spec has never mentioned about using different signatures
1063 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1064 	 * Multiplier specification began to use 0x69/0x96 to identify
1065 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1066 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1067 	 * 0x69/0x96 shortly and described them as reserved for
1068 	 * SerialATA.
1069 	 *
1070 	 * We follow the current spec and consider that 0x69/0x96
1071 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1072 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1073 	 * SEMB signature.  This is worked around in
1074 	 * ata_dev_read_id().
1075 	 */
1076 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1077 		DPRINTK("found ATA device by sig\n");
1078 		return ATA_DEV_ATA;
1079 	}
1080 
1081 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1082 		DPRINTK("found ATAPI device by sig\n");
1083 		return ATA_DEV_ATAPI;
1084 	}
1085 
1086 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1087 		DPRINTK("found PMP device by sig\n");
1088 		return ATA_DEV_PMP;
1089 	}
1090 
1091 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1092 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1093 		return ATA_DEV_SEMB;
1094 	}
1095 
1096 	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1097 		DPRINTK("found ZAC device by sig\n");
1098 		return ATA_DEV_ZAC;
1099 	}
1100 
1101 	DPRINTK("unknown device\n");
1102 	return ATA_DEV_UNKNOWN;
1103 }
1104 
1105 /**
1106  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1107  *	@id: IDENTIFY DEVICE results we will examine
1108  *	@s: string into which data is output
1109  *	@ofs: offset into identify device page
1110  *	@len: length of string to return. must be an even number.
1111  *
1112  *	The strings in the IDENTIFY DEVICE page are broken up into
1113  *	16-bit chunks.  Run through the string, and output each
1114  *	8-bit chunk linearly, regardless of platform.
1115  *
1116  *	LOCKING:
1117  *	caller.
1118  */
1119 
1120 void ata_id_string(const u16 *id, unsigned char *s,
1121 		   unsigned int ofs, unsigned int len)
1122 {
1123 	unsigned int c;
1124 
1125 	BUG_ON(len & 1);
1126 
1127 	while (len > 0) {
1128 		c = id[ofs] >> 8;
1129 		*s = c;
1130 		s++;
1131 
1132 		c = id[ofs] & 0xff;
1133 		*s = c;
1134 		s++;
1135 
1136 		ofs++;
1137 		len -= 2;
1138 	}
1139 }
1140 
1141 /**
1142  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1143  *	@id: IDENTIFY DEVICE results we will examine
1144  *	@s: string into which data is output
1145  *	@ofs: offset into identify device page
1146  *	@len: length of string to return. must be an odd number.
1147  *
1148  *	This function is identical to ata_id_string except that it
1149  *	trims trailing spaces and terminates the resulting string with
1150  *	null.  @len must be actual maximum length (even number) + 1.
1151  *
1152  *	LOCKING:
1153  *	caller.
1154  */
1155 void ata_id_c_string(const u16 *id, unsigned char *s,
1156 		     unsigned int ofs, unsigned int len)
1157 {
1158 	unsigned char *p;
1159 
1160 	ata_id_string(id, s, ofs, len - 1);
1161 
1162 	p = s + strnlen(s, len - 1);
1163 	while (p > s && p[-1] == ' ')
1164 		p--;
1165 	*p = '\0';
1166 }
1167 
1168 static u64 ata_id_n_sectors(const u16 *id)
1169 {
1170 	if (ata_id_has_lba(id)) {
1171 		if (ata_id_has_lba48(id))
1172 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1173 		else
1174 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1175 	} else {
1176 		if (ata_id_current_chs_valid(id))
1177 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1178 			       id[ATA_ID_CUR_SECTORS];
1179 		else
1180 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1181 			       id[ATA_ID_SECTORS];
1182 	}
1183 }
1184 
1185 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1186 {
1187 	u64 sectors = 0;
1188 
1189 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1190 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1191 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1192 	sectors |= (tf->lbah & 0xff) << 16;
1193 	sectors |= (tf->lbam & 0xff) << 8;
1194 	sectors |= (tf->lbal & 0xff);
1195 
1196 	return sectors;
1197 }
1198 
1199 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1200 {
1201 	u64 sectors = 0;
1202 
1203 	sectors |= (tf->device & 0x0f) << 24;
1204 	sectors |= (tf->lbah & 0xff) << 16;
1205 	sectors |= (tf->lbam & 0xff) << 8;
1206 	sectors |= (tf->lbal & 0xff);
1207 
1208 	return sectors;
1209 }
1210 
1211 /**
1212  *	ata_read_native_max_address - Read native max address
1213  *	@dev: target device
1214  *	@max_sectors: out parameter for the result native max address
1215  *
1216  *	Perform an LBA48 or LBA28 native size query upon the device in
1217  *	question.
1218  *
1219  *	RETURNS:
1220  *	0 on success, -EACCES if command is aborted by the drive.
1221  *	-EIO on other errors.
1222  */
1223 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1224 {
1225 	unsigned int err_mask;
1226 	struct ata_taskfile tf;
1227 	int lba48 = ata_id_has_lba48(dev->id);
1228 
1229 	ata_tf_init(dev, &tf);
1230 
1231 	/* always clear all address registers */
1232 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1233 
1234 	if (lba48) {
1235 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1236 		tf.flags |= ATA_TFLAG_LBA48;
1237 	} else
1238 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1239 
1240 	tf.protocol |= ATA_PROT_NODATA;
1241 	tf.device |= ATA_LBA;
1242 
1243 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1244 	if (err_mask) {
1245 		ata_dev_warn(dev,
1246 			     "failed to read native max address (err_mask=0x%x)\n",
1247 			     err_mask);
1248 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1249 			return -EACCES;
1250 		return -EIO;
1251 	}
1252 
1253 	if (lba48)
1254 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1255 	else
1256 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1257 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1258 		(*max_sectors)--;
1259 	return 0;
1260 }
1261 
1262 /**
1263  *	ata_set_max_sectors - Set max sectors
1264  *	@dev: target device
1265  *	@new_sectors: new max sectors value to set for the device
1266  *
1267  *	Set max sectors of @dev to @new_sectors.
1268  *
1269  *	RETURNS:
1270  *	0 on success, -EACCES if command is aborted or denied (due to
1271  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1272  *	errors.
1273  */
1274 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1275 {
1276 	unsigned int err_mask;
1277 	struct ata_taskfile tf;
1278 	int lba48 = ata_id_has_lba48(dev->id);
1279 
1280 	new_sectors--;
1281 
1282 	ata_tf_init(dev, &tf);
1283 
1284 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1285 
1286 	if (lba48) {
1287 		tf.command = ATA_CMD_SET_MAX_EXT;
1288 		tf.flags |= ATA_TFLAG_LBA48;
1289 
1290 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1291 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1292 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1293 	} else {
1294 		tf.command = ATA_CMD_SET_MAX;
1295 
1296 		tf.device |= (new_sectors >> 24) & 0xf;
1297 	}
1298 
1299 	tf.protocol |= ATA_PROT_NODATA;
1300 	tf.device |= ATA_LBA;
1301 
1302 	tf.lbal = (new_sectors >> 0) & 0xff;
1303 	tf.lbam = (new_sectors >> 8) & 0xff;
1304 	tf.lbah = (new_sectors >> 16) & 0xff;
1305 
1306 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1307 	if (err_mask) {
1308 		ata_dev_warn(dev,
1309 			     "failed to set max address (err_mask=0x%x)\n",
1310 			     err_mask);
1311 		if (err_mask == AC_ERR_DEV &&
1312 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1313 			return -EACCES;
1314 		return -EIO;
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 /**
1321  *	ata_hpa_resize		-	Resize a device with an HPA set
1322  *	@dev: Device to resize
1323  *
1324  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1325  *	it if required to the full size of the media. The caller must check
1326  *	the drive has the HPA feature set enabled.
1327  *
1328  *	RETURNS:
1329  *	0 on success, -errno on failure.
1330  */
1331 static int ata_hpa_resize(struct ata_device *dev)
1332 {
1333 	struct ata_eh_context *ehc = &dev->link->eh_context;
1334 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1335 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1336 	u64 sectors = ata_id_n_sectors(dev->id);
1337 	u64 native_sectors;
1338 	int rc;
1339 
1340 	/* do we need to do it? */
1341 	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1342 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1343 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1344 		return 0;
1345 
1346 	/* read native max address */
1347 	rc = ata_read_native_max_address(dev, &native_sectors);
1348 	if (rc) {
1349 		/* If device aborted the command or HPA isn't going to
1350 		 * be unlocked, skip HPA resizing.
1351 		 */
1352 		if (rc == -EACCES || !unlock_hpa) {
1353 			ata_dev_warn(dev,
1354 				     "HPA support seems broken, skipping HPA handling\n");
1355 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1356 
1357 			/* we can continue if device aborted the command */
1358 			if (rc == -EACCES)
1359 				rc = 0;
1360 		}
1361 
1362 		return rc;
1363 	}
1364 	dev->n_native_sectors = native_sectors;
1365 
1366 	/* nothing to do? */
1367 	if (native_sectors <= sectors || !unlock_hpa) {
1368 		if (!print_info || native_sectors == sectors)
1369 			return 0;
1370 
1371 		if (native_sectors > sectors)
1372 			ata_dev_info(dev,
1373 				"HPA detected: current %llu, native %llu\n",
1374 				(unsigned long long)sectors,
1375 				(unsigned long long)native_sectors);
1376 		else if (native_sectors < sectors)
1377 			ata_dev_warn(dev,
1378 				"native sectors (%llu) is smaller than sectors (%llu)\n",
1379 				(unsigned long long)native_sectors,
1380 				(unsigned long long)sectors);
1381 		return 0;
1382 	}
1383 
1384 	/* let's unlock HPA */
1385 	rc = ata_set_max_sectors(dev, native_sectors);
1386 	if (rc == -EACCES) {
1387 		/* if device aborted the command, skip HPA resizing */
1388 		ata_dev_warn(dev,
1389 			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1390 			     (unsigned long long)sectors,
1391 			     (unsigned long long)native_sectors);
1392 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1393 		return 0;
1394 	} else if (rc)
1395 		return rc;
1396 
1397 	/* re-read IDENTIFY data */
1398 	rc = ata_dev_reread_id(dev, 0);
1399 	if (rc) {
1400 		ata_dev_err(dev,
1401 			    "failed to re-read IDENTIFY data after HPA resizing\n");
1402 		return rc;
1403 	}
1404 
1405 	if (print_info) {
1406 		u64 new_sectors = ata_id_n_sectors(dev->id);
1407 		ata_dev_info(dev,
1408 			"HPA unlocked: %llu -> %llu, native %llu\n",
1409 			(unsigned long long)sectors,
1410 			(unsigned long long)new_sectors,
1411 			(unsigned long long)native_sectors);
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 /**
1418  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1419  *	@id: IDENTIFY DEVICE page to dump
1420  *
1421  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1422  *	page.
1423  *
1424  *	LOCKING:
1425  *	caller.
1426  */
1427 
1428 static inline void ata_dump_id(const u16 *id)
1429 {
1430 	DPRINTK("49==0x%04x  "
1431 		"53==0x%04x  "
1432 		"63==0x%04x  "
1433 		"64==0x%04x  "
1434 		"75==0x%04x  \n",
1435 		id[49],
1436 		id[53],
1437 		id[63],
1438 		id[64],
1439 		id[75]);
1440 	DPRINTK("80==0x%04x  "
1441 		"81==0x%04x  "
1442 		"82==0x%04x  "
1443 		"83==0x%04x  "
1444 		"84==0x%04x  \n",
1445 		id[80],
1446 		id[81],
1447 		id[82],
1448 		id[83],
1449 		id[84]);
1450 	DPRINTK("88==0x%04x  "
1451 		"93==0x%04x\n",
1452 		id[88],
1453 		id[93]);
1454 }
1455 
1456 /**
1457  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1458  *	@id: IDENTIFY data to compute xfer mask from
1459  *
1460  *	Compute the xfermask for this device. This is not as trivial
1461  *	as it seems if we must consider early devices correctly.
1462  *
1463  *	FIXME: pre IDE drive timing (do we care ?).
1464  *
1465  *	LOCKING:
1466  *	None.
1467  *
1468  *	RETURNS:
1469  *	Computed xfermask
1470  */
1471 unsigned long ata_id_xfermask(const u16 *id)
1472 {
1473 	unsigned long pio_mask, mwdma_mask, udma_mask;
1474 
1475 	/* Usual case. Word 53 indicates word 64 is valid */
1476 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1477 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1478 		pio_mask <<= 3;
1479 		pio_mask |= 0x7;
1480 	} else {
1481 		/* If word 64 isn't valid then Word 51 high byte holds
1482 		 * the PIO timing number for the maximum. Turn it into
1483 		 * a mask.
1484 		 */
1485 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1486 		if (mode < 5)	/* Valid PIO range */
1487 			pio_mask = (2 << mode) - 1;
1488 		else
1489 			pio_mask = 1;
1490 
1491 		/* But wait.. there's more. Design your standards by
1492 		 * committee and you too can get a free iordy field to
1493 		 * process. However its the speeds not the modes that
1494 		 * are supported... Note drivers using the timing API
1495 		 * will get this right anyway
1496 		 */
1497 	}
1498 
1499 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1500 
1501 	if (ata_id_is_cfa(id)) {
1502 		/*
1503 		 *	Process compact flash extended modes
1504 		 */
1505 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1506 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1507 
1508 		if (pio)
1509 			pio_mask |= (1 << 5);
1510 		if (pio > 1)
1511 			pio_mask |= (1 << 6);
1512 		if (dma)
1513 			mwdma_mask |= (1 << 3);
1514 		if (dma > 1)
1515 			mwdma_mask |= (1 << 4);
1516 	}
1517 
1518 	udma_mask = 0;
1519 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1520 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1521 
1522 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1523 }
1524 
1525 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1526 {
1527 	struct completion *waiting = qc->private_data;
1528 
1529 	complete(waiting);
1530 }
1531 
1532 /**
1533  *	ata_exec_internal_sg - execute libata internal command
1534  *	@dev: Device to which the command is sent
1535  *	@tf: Taskfile registers for the command and the result
1536  *	@cdb: CDB for packet command
1537  *	@dma_dir: Data transfer direction of the command
1538  *	@sgl: sg list for the data buffer of the command
1539  *	@n_elem: Number of sg entries
1540  *	@timeout: Timeout in msecs (0 for default)
1541  *
1542  *	Executes libata internal command with timeout.  @tf contains
1543  *	command on entry and result on return.  Timeout and error
1544  *	conditions are reported via return value.  No recovery action
1545  *	is taken after a command times out.  It's caller's duty to
1546  *	clean up after timeout.
1547  *
1548  *	LOCKING:
1549  *	None.  Should be called with kernel context, might sleep.
1550  *
1551  *	RETURNS:
1552  *	Zero on success, AC_ERR_* mask on failure
1553  */
1554 unsigned ata_exec_internal_sg(struct ata_device *dev,
1555 			      struct ata_taskfile *tf, const u8 *cdb,
1556 			      int dma_dir, struct scatterlist *sgl,
1557 			      unsigned int n_elem, unsigned long timeout)
1558 {
1559 	struct ata_link *link = dev->link;
1560 	struct ata_port *ap = link->ap;
1561 	u8 command = tf->command;
1562 	int auto_timeout = 0;
1563 	struct ata_queued_cmd *qc;
1564 	unsigned int tag, preempted_tag;
1565 	u32 preempted_sactive, preempted_qc_active;
1566 	int preempted_nr_active_links;
1567 	DECLARE_COMPLETION_ONSTACK(wait);
1568 	unsigned long flags;
1569 	unsigned int err_mask;
1570 	int rc;
1571 
1572 	spin_lock_irqsave(ap->lock, flags);
1573 
1574 	/* no internal command while frozen */
1575 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1576 		spin_unlock_irqrestore(ap->lock, flags);
1577 		return AC_ERR_SYSTEM;
1578 	}
1579 
1580 	/* initialize internal qc */
1581 
1582 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1583 	 * drivers choke if any other tag is given.  This breaks
1584 	 * ata_tag_internal() test for those drivers.  Don't use new
1585 	 * EH stuff without converting to it.
1586 	 */
1587 	if (ap->ops->error_handler)
1588 		tag = ATA_TAG_INTERNAL;
1589 	else
1590 		tag = 0;
1591 
1592 	qc = __ata_qc_from_tag(ap, tag);
1593 
1594 	qc->tag = tag;
1595 	qc->scsicmd = NULL;
1596 	qc->ap = ap;
1597 	qc->dev = dev;
1598 	ata_qc_reinit(qc);
1599 
1600 	preempted_tag = link->active_tag;
1601 	preempted_sactive = link->sactive;
1602 	preempted_qc_active = ap->qc_active;
1603 	preempted_nr_active_links = ap->nr_active_links;
1604 	link->active_tag = ATA_TAG_POISON;
1605 	link->sactive = 0;
1606 	ap->qc_active = 0;
1607 	ap->nr_active_links = 0;
1608 
1609 	/* prepare & issue qc */
1610 	qc->tf = *tf;
1611 	if (cdb)
1612 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1613 
1614 	/* some SATA bridges need us to indicate data xfer direction */
1615 	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1616 	    dma_dir == DMA_FROM_DEVICE)
1617 		qc->tf.feature |= ATAPI_DMADIR;
1618 
1619 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1620 	qc->dma_dir = dma_dir;
1621 	if (dma_dir != DMA_NONE) {
1622 		unsigned int i, buflen = 0;
1623 		struct scatterlist *sg;
1624 
1625 		for_each_sg(sgl, sg, n_elem, i)
1626 			buflen += sg->length;
1627 
1628 		ata_sg_init(qc, sgl, n_elem);
1629 		qc->nbytes = buflen;
1630 	}
1631 
1632 	qc->private_data = &wait;
1633 	qc->complete_fn = ata_qc_complete_internal;
1634 
1635 	ata_qc_issue(qc);
1636 
1637 	spin_unlock_irqrestore(ap->lock, flags);
1638 
1639 	if (!timeout) {
1640 		if (ata_probe_timeout)
1641 			timeout = ata_probe_timeout * 1000;
1642 		else {
1643 			timeout = ata_internal_cmd_timeout(dev, command);
1644 			auto_timeout = 1;
1645 		}
1646 	}
1647 
1648 	if (ap->ops->error_handler)
1649 		ata_eh_release(ap);
1650 
1651 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1652 
1653 	if (ap->ops->error_handler)
1654 		ata_eh_acquire(ap);
1655 
1656 	ata_sff_flush_pio_task(ap);
1657 
1658 	if (!rc) {
1659 		spin_lock_irqsave(ap->lock, flags);
1660 
1661 		/* We're racing with irq here.  If we lose, the
1662 		 * following test prevents us from completing the qc
1663 		 * twice.  If we win, the port is frozen and will be
1664 		 * cleaned up by ->post_internal_cmd().
1665 		 */
1666 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1667 			qc->err_mask |= AC_ERR_TIMEOUT;
1668 
1669 			if (ap->ops->error_handler)
1670 				ata_port_freeze(ap);
1671 			else
1672 				ata_qc_complete(qc);
1673 
1674 			if (ata_msg_warn(ap))
1675 				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1676 					     command);
1677 		}
1678 
1679 		spin_unlock_irqrestore(ap->lock, flags);
1680 	}
1681 
1682 	/* do post_internal_cmd */
1683 	if (ap->ops->post_internal_cmd)
1684 		ap->ops->post_internal_cmd(qc);
1685 
1686 	/* perform minimal error analysis */
1687 	if (qc->flags & ATA_QCFLAG_FAILED) {
1688 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1689 			qc->err_mask |= AC_ERR_DEV;
1690 
1691 		if (!qc->err_mask)
1692 			qc->err_mask |= AC_ERR_OTHER;
1693 
1694 		if (qc->err_mask & ~AC_ERR_OTHER)
1695 			qc->err_mask &= ~AC_ERR_OTHER;
1696 	}
1697 
1698 	/* finish up */
1699 	spin_lock_irqsave(ap->lock, flags);
1700 
1701 	*tf = qc->result_tf;
1702 	err_mask = qc->err_mask;
1703 
1704 	ata_qc_free(qc);
1705 	link->active_tag = preempted_tag;
1706 	link->sactive = preempted_sactive;
1707 	ap->qc_active = preempted_qc_active;
1708 	ap->nr_active_links = preempted_nr_active_links;
1709 
1710 	spin_unlock_irqrestore(ap->lock, flags);
1711 
1712 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1713 		ata_internal_cmd_timed_out(dev, command);
1714 
1715 	return err_mask;
1716 }
1717 
1718 /**
1719  *	ata_exec_internal - execute libata internal command
1720  *	@dev: Device to which the command is sent
1721  *	@tf: Taskfile registers for the command and the result
1722  *	@cdb: CDB for packet command
1723  *	@dma_dir: Data transfer direction of the command
1724  *	@buf: Data buffer of the command
1725  *	@buflen: Length of data buffer
1726  *	@timeout: Timeout in msecs (0 for default)
1727  *
1728  *	Wrapper around ata_exec_internal_sg() which takes simple
1729  *	buffer instead of sg list.
1730  *
1731  *	LOCKING:
1732  *	None.  Should be called with kernel context, might sleep.
1733  *
1734  *	RETURNS:
1735  *	Zero on success, AC_ERR_* mask on failure
1736  */
1737 unsigned ata_exec_internal(struct ata_device *dev,
1738 			   struct ata_taskfile *tf, const u8 *cdb,
1739 			   int dma_dir, void *buf, unsigned int buflen,
1740 			   unsigned long timeout)
1741 {
1742 	struct scatterlist *psg = NULL, sg;
1743 	unsigned int n_elem = 0;
1744 
1745 	if (dma_dir != DMA_NONE) {
1746 		WARN_ON(!buf);
1747 		sg_init_one(&sg, buf, buflen);
1748 		psg = &sg;
1749 		n_elem++;
1750 	}
1751 
1752 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1753 				    timeout);
1754 }
1755 
1756 /**
1757  *	ata_pio_need_iordy	-	check if iordy needed
1758  *	@adev: ATA device
1759  *
1760  *	Check if the current speed of the device requires IORDY. Used
1761  *	by various controllers for chip configuration.
1762  */
1763 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1764 {
1765 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1766 	 * lead to controller lock up on certain controllers if the
1767 	 * port is not occupied.  See bko#11703 for details.
1768 	 */
1769 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1770 		return 0;
1771 	/* Controller doesn't support IORDY.  Probably a pointless
1772 	 * check as the caller should know this.
1773 	 */
1774 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1775 		return 0;
1776 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1777 	if (ata_id_is_cfa(adev->id)
1778 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1779 		return 0;
1780 	/* PIO3 and higher it is mandatory */
1781 	if (adev->pio_mode > XFER_PIO_2)
1782 		return 1;
1783 	/* We turn it on when possible */
1784 	if (ata_id_has_iordy(adev->id))
1785 		return 1;
1786 	return 0;
1787 }
1788 
1789 /**
1790  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1791  *	@adev: ATA device
1792  *
1793  *	Compute the highest mode possible if we are not using iordy. Return
1794  *	-1 if no iordy mode is available.
1795  */
1796 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1797 {
1798 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1799 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1800 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1801 		/* Is the speed faster than the drive allows non IORDY ? */
1802 		if (pio) {
1803 			/* This is cycle times not frequency - watch the logic! */
1804 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1805 				return 3 << ATA_SHIFT_PIO;
1806 			return 7 << ATA_SHIFT_PIO;
1807 		}
1808 	}
1809 	return 3 << ATA_SHIFT_PIO;
1810 }
1811 
1812 /**
1813  *	ata_do_dev_read_id		-	default ID read method
1814  *	@dev: device
1815  *	@tf: proposed taskfile
1816  *	@id: data buffer
1817  *
1818  *	Issue the identify taskfile and hand back the buffer containing
1819  *	identify data. For some RAID controllers and for pre ATA devices
1820  *	this function is wrapped or replaced by the driver
1821  */
1822 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1823 					struct ata_taskfile *tf, u16 *id)
1824 {
1825 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1826 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1827 }
1828 
1829 /**
1830  *	ata_dev_read_id - Read ID data from the specified device
1831  *	@dev: target device
1832  *	@p_class: pointer to class of the target device (may be changed)
1833  *	@flags: ATA_READID_* flags
1834  *	@id: buffer to read IDENTIFY data into
1835  *
1836  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1837  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1838  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1839  *	for pre-ATA4 drives.
1840  *
1841  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1842  *	now we abort if we hit that case.
1843  *
1844  *	LOCKING:
1845  *	Kernel thread context (may sleep)
1846  *
1847  *	RETURNS:
1848  *	0 on success, -errno otherwise.
1849  */
1850 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1851 		    unsigned int flags, u16 *id)
1852 {
1853 	struct ata_port *ap = dev->link->ap;
1854 	unsigned int class = *p_class;
1855 	struct ata_taskfile tf;
1856 	unsigned int err_mask = 0;
1857 	const char *reason;
1858 	bool is_semb = class == ATA_DEV_SEMB;
1859 	int may_fallback = 1, tried_spinup = 0;
1860 	int rc;
1861 
1862 	if (ata_msg_ctl(ap))
1863 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1864 
1865 retry:
1866 	ata_tf_init(dev, &tf);
1867 
1868 	switch (class) {
1869 	case ATA_DEV_SEMB:
1870 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1871 	case ATA_DEV_ATA:
1872 	case ATA_DEV_ZAC:
1873 		tf.command = ATA_CMD_ID_ATA;
1874 		break;
1875 	case ATA_DEV_ATAPI:
1876 		tf.command = ATA_CMD_ID_ATAPI;
1877 		break;
1878 	default:
1879 		rc = -ENODEV;
1880 		reason = "unsupported class";
1881 		goto err_out;
1882 	}
1883 
1884 	tf.protocol = ATA_PROT_PIO;
1885 
1886 	/* Some devices choke if TF registers contain garbage.  Make
1887 	 * sure those are properly initialized.
1888 	 */
1889 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1890 
1891 	/* Device presence detection is unreliable on some
1892 	 * controllers.  Always poll IDENTIFY if available.
1893 	 */
1894 	tf.flags |= ATA_TFLAG_POLLING;
1895 
1896 	if (ap->ops->read_id)
1897 		err_mask = ap->ops->read_id(dev, &tf, id);
1898 	else
1899 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1900 
1901 	if (err_mask) {
1902 		if (err_mask & AC_ERR_NODEV_HINT) {
1903 			ata_dev_dbg(dev, "NODEV after polling detection\n");
1904 			return -ENOENT;
1905 		}
1906 
1907 		if (is_semb) {
1908 			ata_dev_info(dev,
1909 		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1910 			/* SEMB is not supported yet */
1911 			*p_class = ATA_DEV_SEMB_UNSUP;
1912 			return 0;
1913 		}
1914 
1915 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1916 			/* Device or controller might have reported
1917 			 * the wrong device class.  Give a shot at the
1918 			 * other IDENTIFY if the current one is
1919 			 * aborted by the device.
1920 			 */
1921 			if (may_fallback) {
1922 				may_fallback = 0;
1923 
1924 				if (class == ATA_DEV_ATA)
1925 					class = ATA_DEV_ATAPI;
1926 				else
1927 					class = ATA_DEV_ATA;
1928 				goto retry;
1929 			}
1930 
1931 			/* Control reaches here iff the device aborted
1932 			 * both flavors of IDENTIFYs which happens
1933 			 * sometimes with phantom devices.
1934 			 */
1935 			ata_dev_dbg(dev,
1936 				    "both IDENTIFYs aborted, assuming NODEV\n");
1937 			return -ENOENT;
1938 		}
1939 
1940 		rc = -EIO;
1941 		reason = "I/O error";
1942 		goto err_out;
1943 	}
1944 
1945 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1946 		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1947 			    "class=%d may_fallback=%d tried_spinup=%d\n",
1948 			    class, may_fallback, tried_spinup);
1949 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1950 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1951 	}
1952 
1953 	/* Falling back doesn't make sense if ID data was read
1954 	 * successfully at least once.
1955 	 */
1956 	may_fallback = 0;
1957 
1958 	swap_buf_le16(id, ATA_ID_WORDS);
1959 
1960 	/* sanity check */
1961 	rc = -EINVAL;
1962 	reason = "device reports invalid type";
1963 
1964 	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1965 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1966 			goto err_out;
1967 		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1968 							ata_id_is_ata(id)) {
1969 			ata_dev_dbg(dev,
1970 				"host indicates ignore ATA devices, ignored\n");
1971 			return -ENOENT;
1972 		}
1973 	} else {
1974 		if (ata_id_is_ata(id))
1975 			goto err_out;
1976 	}
1977 
1978 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1979 		tried_spinup = 1;
1980 		/*
1981 		 * Drive powered-up in standby mode, and requires a specific
1982 		 * SET_FEATURES spin-up subcommand before it will accept
1983 		 * anything other than the original IDENTIFY command.
1984 		 */
1985 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1986 		if (err_mask && id[2] != 0x738c) {
1987 			rc = -EIO;
1988 			reason = "SPINUP failed";
1989 			goto err_out;
1990 		}
1991 		/*
1992 		 * If the drive initially returned incomplete IDENTIFY info,
1993 		 * we now must reissue the IDENTIFY command.
1994 		 */
1995 		if (id[2] == 0x37c8)
1996 			goto retry;
1997 	}
1998 
1999 	if ((flags & ATA_READID_POSTRESET) &&
2000 	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2001 		/*
2002 		 * The exact sequence expected by certain pre-ATA4 drives is:
2003 		 * SRST RESET
2004 		 * IDENTIFY (optional in early ATA)
2005 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2006 		 * anything else..
2007 		 * Some drives were very specific about that exact sequence.
2008 		 *
2009 		 * Note that ATA4 says lba is mandatory so the second check
2010 		 * should never trigger.
2011 		 */
2012 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2013 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2014 			if (err_mask) {
2015 				rc = -EIO;
2016 				reason = "INIT_DEV_PARAMS failed";
2017 				goto err_out;
2018 			}
2019 
2020 			/* current CHS translation info (id[53-58]) might be
2021 			 * changed. reread the identify device info.
2022 			 */
2023 			flags &= ~ATA_READID_POSTRESET;
2024 			goto retry;
2025 		}
2026 	}
2027 
2028 	*p_class = class;
2029 
2030 	return 0;
2031 
2032  err_out:
2033 	if (ata_msg_warn(ap))
2034 		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2035 			     reason, err_mask);
2036 	return rc;
2037 }
2038 
2039 static int ata_do_link_spd_horkage(struct ata_device *dev)
2040 {
2041 	struct ata_link *plink = ata_dev_phys_link(dev);
2042 	u32 target, target_limit;
2043 
2044 	if (!sata_scr_valid(plink))
2045 		return 0;
2046 
2047 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2048 		target = 1;
2049 	else
2050 		return 0;
2051 
2052 	target_limit = (1 << target) - 1;
2053 
2054 	/* if already on stricter limit, no need to push further */
2055 	if (plink->sata_spd_limit <= target_limit)
2056 		return 0;
2057 
2058 	plink->sata_spd_limit = target_limit;
2059 
2060 	/* Request another EH round by returning -EAGAIN if link is
2061 	 * going faster than the target speed.  Forward progress is
2062 	 * guaranteed by setting sata_spd_limit to target_limit above.
2063 	 */
2064 	if (plink->sata_spd > target) {
2065 		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2066 			     sata_spd_string(target));
2067 		return -EAGAIN;
2068 	}
2069 	return 0;
2070 }
2071 
2072 static inline u8 ata_dev_knobble(struct ata_device *dev)
2073 {
2074 	struct ata_port *ap = dev->link->ap;
2075 
2076 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2077 		return 0;
2078 
2079 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2080 }
2081 
2082 static int ata_dev_config_ncq(struct ata_device *dev,
2083 			       char *desc, size_t desc_sz)
2084 {
2085 	struct ata_port *ap = dev->link->ap;
2086 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2087 	unsigned int err_mask;
2088 	char *aa_desc = "";
2089 
2090 	if (!ata_id_has_ncq(dev->id)) {
2091 		desc[0] = '\0';
2092 		return 0;
2093 	}
2094 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2095 		snprintf(desc, desc_sz, "NCQ (not used)");
2096 		return 0;
2097 	}
2098 	if (ap->flags & ATA_FLAG_NCQ) {
2099 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2100 		dev->flags |= ATA_DFLAG_NCQ;
2101 	}
2102 
2103 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2104 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2105 		ata_id_has_fpdma_aa(dev->id)) {
2106 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2107 			SATA_FPDMA_AA);
2108 		if (err_mask) {
2109 			ata_dev_err(dev,
2110 				    "failed to enable AA (error_mask=0x%x)\n",
2111 				    err_mask);
2112 			if (err_mask != AC_ERR_DEV) {
2113 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2114 				return -EIO;
2115 			}
2116 		} else
2117 			aa_desc = ", AA";
2118 	}
2119 
2120 	if (hdepth >= ddepth)
2121 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2122 	else
2123 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2124 			ddepth, aa_desc);
2125 
2126 	if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2127 	    ata_id_has_ncq_send_and_recv(dev->id)) {
2128 		err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2129 					     0, ap->sector_buf, 1);
2130 		if (err_mask) {
2131 			ata_dev_dbg(dev,
2132 				    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2133 				    err_mask);
2134 		} else {
2135 			u8 *cmds = dev->ncq_send_recv_cmds;
2136 
2137 			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2138 			memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2139 
2140 			if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2141 				ata_dev_dbg(dev, "disabling queued TRIM support\n");
2142 				cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2143 					~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2144 			}
2145 		}
2146 	}
2147 
2148 	return 0;
2149 }
2150 
2151 /**
2152  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2153  *	@dev: Target device to configure
2154  *
2155  *	Configure @dev according to @dev->id.  Generic and low-level
2156  *	driver specific fixups are also applied.
2157  *
2158  *	LOCKING:
2159  *	Kernel thread context (may sleep)
2160  *
2161  *	RETURNS:
2162  *	0 on success, -errno otherwise
2163  */
2164 int ata_dev_configure(struct ata_device *dev)
2165 {
2166 	struct ata_port *ap = dev->link->ap;
2167 	struct ata_eh_context *ehc = &dev->link->eh_context;
2168 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2169 	const u16 *id = dev->id;
2170 	unsigned long xfer_mask;
2171 	unsigned int err_mask;
2172 	char revbuf[7];		/* XYZ-99\0 */
2173 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2174 	char modelbuf[ATA_ID_PROD_LEN+1];
2175 	int rc;
2176 
2177 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2178 		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2179 		return 0;
2180 	}
2181 
2182 	if (ata_msg_probe(ap))
2183 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2184 
2185 	/* set horkage */
2186 	dev->horkage |= ata_dev_blacklisted(dev);
2187 	ata_force_horkage(dev);
2188 
2189 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2190 		ata_dev_info(dev, "unsupported device, disabling\n");
2191 		ata_dev_disable(dev);
2192 		return 0;
2193 	}
2194 
2195 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2196 	    dev->class == ATA_DEV_ATAPI) {
2197 		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2198 			     atapi_enabled ? "not supported with this driver"
2199 			     : "disabled");
2200 		ata_dev_disable(dev);
2201 		return 0;
2202 	}
2203 
2204 	rc = ata_do_link_spd_horkage(dev);
2205 	if (rc)
2206 		return rc;
2207 
2208 	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2209 	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2210 	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2211 		dev->horkage |= ATA_HORKAGE_NOLPM;
2212 
2213 	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2214 		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2215 		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2216 	}
2217 
2218 	/* let ACPI work its magic */
2219 	rc = ata_acpi_on_devcfg(dev);
2220 	if (rc)
2221 		return rc;
2222 
2223 	/* massage HPA, do it early as it might change IDENTIFY data */
2224 	rc = ata_hpa_resize(dev);
2225 	if (rc)
2226 		return rc;
2227 
2228 	/* print device capabilities */
2229 	if (ata_msg_probe(ap))
2230 		ata_dev_dbg(dev,
2231 			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2232 			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2233 			    __func__,
2234 			    id[49], id[82], id[83], id[84],
2235 			    id[85], id[86], id[87], id[88]);
2236 
2237 	/* initialize to-be-configured parameters */
2238 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2239 	dev->max_sectors = 0;
2240 	dev->cdb_len = 0;
2241 	dev->n_sectors = 0;
2242 	dev->cylinders = 0;
2243 	dev->heads = 0;
2244 	dev->sectors = 0;
2245 	dev->multi_count = 0;
2246 
2247 	/*
2248 	 * common ATA, ATAPI feature tests
2249 	 */
2250 
2251 	/* find max transfer mode; for printk only */
2252 	xfer_mask = ata_id_xfermask(id);
2253 
2254 	if (ata_msg_probe(ap))
2255 		ata_dump_id(id);
2256 
2257 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2258 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2259 			sizeof(fwrevbuf));
2260 
2261 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2262 			sizeof(modelbuf));
2263 
2264 	/* ATA-specific feature tests */
2265 	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2266 		if (ata_id_is_cfa(id)) {
2267 			/* CPRM may make this media unusable */
2268 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2269 				ata_dev_warn(dev,
2270 	"supports DRM functions and may not be fully accessible\n");
2271 			snprintf(revbuf, 7, "CFA");
2272 		} else {
2273 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2274 			/* Warn the user if the device has TPM extensions */
2275 			if (ata_id_has_tpm(id))
2276 				ata_dev_warn(dev,
2277 	"supports DRM functions and may not be fully accessible\n");
2278 		}
2279 
2280 		dev->n_sectors = ata_id_n_sectors(id);
2281 
2282 		/* get current R/W Multiple count setting */
2283 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2284 			unsigned int max = dev->id[47] & 0xff;
2285 			unsigned int cnt = dev->id[59] & 0xff;
2286 			/* only recognize/allow powers of two here */
2287 			if (is_power_of_2(max) && is_power_of_2(cnt))
2288 				if (cnt <= max)
2289 					dev->multi_count = cnt;
2290 		}
2291 
2292 		if (ata_id_has_lba(id)) {
2293 			const char *lba_desc;
2294 			char ncq_desc[24];
2295 
2296 			lba_desc = "LBA";
2297 			dev->flags |= ATA_DFLAG_LBA;
2298 			if (ata_id_has_lba48(id)) {
2299 				dev->flags |= ATA_DFLAG_LBA48;
2300 				lba_desc = "LBA48";
2301 
2302 				if (dev->n_sectors >= (1UL << 28) &&
2303 				    ata_id_has_flush_ext(id))
2304 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2305 			}
2306 
2307 			/* config NCQ */
2308 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2309 			if (rc)
2310 				return rc;
2311 
2312 			/* print device info to dmesg */
2313 			if (ata_msg_drv(ap) && print_info) {
2314 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2315 					     revbuf, modelbuf, fwrevbuf,
2316 					     ata_mode_string(xfer_mask));
2317 				ata_dev_info(dev,
2318 					     "%llu sectors, multi %u: %s %s\n",
2319 					(unsigned long long)dev->n_sectors,
2320 					dev->multi_count, lba_desc, ncq_desc);
2321 			}
2322 		} else {
2323 			/* CHS */
2324 
2325 			/* Default translation */
2326 			dev->cylinders	= id[1];
2327 			dev->heads	= id[3];
2328 			dev->sectors	= id[6];
2329 
2330 			if (ata_id_current_chs_valid(id)) {
2331 				/* Current CHS translation is valid. */
2332 				dev->cylinders = id[54];
2333 				dev->heads     = id[55];
2334 				dev->sectors   = id[56];
2335 			}
2336 
2337 			/* print device info to dmesg */
2338 			if (ata_msg_drv(ap) && print_info) {
2339 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2340 					     revbuf,	modelbuf, fwrevbuf,
2341 					     ata_mode_string(xfer_mask));
2342 				ata_dev_info(dev,
2343 					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2344 					     (unsigned long long)dev->n_sectors,
2345 					     dev->multi_count, dev->cylinders,
2346 					     dev->heads, dev->sectors);
2347 			}
2348 		}
2349 
2350 		/* Check and mark DevSlp capability. Get DevSlp timing variables
2351 		 * from SATA Settings page of Identify Device Data Log.
2352 		 */
2353 		if (ata_id_has_devslp(dev->id)) {
2354 			u8 *sata_setting = ap->sector_buf;
2355 			int i, j;
2356 
2357 			dev->flags |= ATA_DFLAG_DEVSLP;
2358 			err_mask = ata_read_log_page(dev,
2359 						     ATA_LOG_SATA_ID_DEV_DATA,
2360 						     ATA_LOG_SATA_SETTINGS,
2361 						     sata_setting,
2362 						     1);
2363 			if (err_mask)
2364 				ata_dev_dbg(dev,
2365 					    "failed to get Identify Device Data, Emask 0x%x\n",
2366 					    err_mask);
2367 			else
2368 				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2369 					j = ATA_LOG_DEVSLP_OFFSET + i;
2370 					dev->devslp_timing[i] = sata_setting[j];
2371 				}
2372 		}
2373 
2374 		dev->cdb_len = 16;
2375 	}
2376 
2377 	/* ATAPI-specific feature tests */
2378 	else if (dev->class == ATA_DEV_ATAPI) {
2379 		const char *cdb_intr_string = "";
2380 		const char *atapi_an_string = "";
2381 		const char *dma_dir_string = "";
2382 		u32 sntf;
2383 
2384 		rc = atapi_cdb_len(id);
2385 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2386 			if (ata_msg_warn(ap))
2387 				ata_dev_warn(dev, "unsupported CDB len\n");
2388 			rc = -EINVAL;
2389 			goto err_out_nosup;
2390 		}
2391 		dev->cdb_len = (unsigned int) rc;
2392 
2393 		/* Enable ATAPI AN if both the host and device have
2394 		 * the support.  If PMP is attached, SNTF is required
2395 		 * to enable ATAPI AN to discern between PHY status
2396 		 * changed notifications and ATAPI ANs.
2397 		 */
2398 		if (atapi_an &&
2399 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2400 		    (!sata_pmp_attached(ap) ||
2401 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2402 			/* issue SET feature command to turn this on */
2403 			err_mask = ata_dev_set_feature(dev,
2404 					SETFEATURES_SATA_ENABLE, SATA_AN);
2405 			if (err_mask)
2406 				ata_dev_err(dev,
2407 					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2408 					    err_mask);
2409 			else {
2410 				dev->flags |= ATA_DFLAG_AN;
2411 				atapi_an_string = ", ATAPI AN";
2412 			}
2413 		}
2414 
2415 		if (ata_id_cdb_intr(dev->id)) {
2416 			dev->flags |= ATA_DFLAG_CDB_INTR;
2417 			cdb_intr_string = ", CDB intr";
2418 		}
2419 
2420 		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2421 			dev->flags |= ATA_DFLAG_DMADIR;
2422 			dma_dir_string = ", DMADIR";
2423 		}
2424 
2425 		if (ata_id_has_da(dev->id)) {
2426 			dev->flags |= ATA_DFLAG_DA;
2427 			zpodd_init(dev);
2428 		}
2429 
2430 		/* print device info to dmesg */
2431 		if (ata_msg_drv(ap) && print_info)
2432 			ata_dev_info(dev,
2433 				     "ATAPI: %s, %s, max %s%s%s%s\n",
2434 				     modelbuf, fwrevbuf,
2435 				     ata_mode_string(xfer_mask),
2436 				     cdb_intr_string, atapi_an_string,
2437 				     dma_dir_string);
2438 	}
2439 
2440 	/* determine max_sectors */
2441 	dev->max_sectors = ATA_MAX_SECTORS;
2442 	if (dev->flags & ATA_DFLAG_LBA48)
2443 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2444 
2445 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2446 	   200 sectors */
2447 	if (ata_dev_knobble(dev)) {
2448 		if (ata_msg_drv(ap) && print_info)
2449 			ata_dev_info(dev, "applying bridge limits\n");
2450 		dev->udma_mask &= ATA_UDMA5;
2451 		dev->max_sectors = ATA_MAX_SECTORS;
2452 	}
2453 
2454 	if ((dev->class == ATA_DEV_ATAPI) &&
2455 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2456 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2457 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2458 	}
2459 
2460 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2461 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2462 					 dev->max_sectors);
2463 
2464 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2465 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2466 					 dev->max_sectors);
2467 
2468 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2469 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2470 
2471 	if (ap->ops->dev_config)
2472 		ap->ops->dev_config(dev);
2473 
2474 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2475 		/* Let the user know. We don't want to disallow opens for
2476 		   rescue purposes, or in case the vendor is just a blithering
2477 		   idiot. Do this after the dev_config call as some controllers
2478 		   with buggy firmware may want to avoid reporting false device
2479 		   bugs */
2480 
2481 		if (print_info) {
2482 			ata_dev_warn(dev,
2483 "Drive reports diagnostics failure. This may indicate a drive\n");
2484 			ata_dev_warn(dev,
2485 "fault or invalid emulation. Contact drive vendor for information.\n");
2486 		}
2487 	}
2488 
2489 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2490 		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2491 		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2492 	}
2493 
2494 	return 0;
2495 
2496 err_out_nosup:
2497 	if (ata_msg_probe(ap))
2498 		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2499 	return rc;
2500 }
2501 
2502 /**
2503  *	ata_cable_40wire	-	return 40 wire cable type
2504  *	@ap: port
2505  *
2506  *	Helper method for drivers which want to hardwire 40 wire cable
2507  *	detection.
2508  */
2509 
2510 int ata_cable_40wire(struct ata_port *ap)
2511 {
2512 	return ATA_CBL_PATA40;
2513 }
2514 
2515 /**
2516  *	ata_cable_80wire	-	return 80 wire cable type
2517  *	@ap: port
2518  *
2519  *	Helper method for drivers which want to hardwire 80 wire cable
2520  *	detection.
2521  */
2522 
2523 int ata_cable_80wire(struct ata_port *ap)
2524 {
2525 	return ATA_CBL_PATA80;
2526 }
2527 
2528 /**
2529  *	ata_cable_unknown	-	return unknown PATA cable.
2530  *	@ap: port
2531  *
2532  *	Helper method for drivers which have no PATA cable detection.
2533  */
2534 
2535 int ata_cable_unknown(struct ata_port *ap)
2536 {
2537 	return ATA_CBL_PATA_UNK;
2538 }
2539 
2540 /**
2541  *	ata_cable_ignore	-	return ignored PATA cable.
2542  *	@ap: port
2543  *
2544  *	Helper method for drivers which don't use cable type to limit
2545  *	transfer mode.
2546  */
2547 int ata_cable_ignore(struct ata_port *ap)
2548 {
2549 	return ATA_CBL_PATA_IGN;
2550 }
2551 
2552 /**
2553  *	ata_cable_sata	-	return SATA cable type
2554  *	@ap: port
2555  *
2556  *	Helper method for drivers which have SATA cables
2557  */
2558 
2559 int ata_cable_sata(struct ata_port *ap)
2560 {
2561 	return ATA_CBL_SATA;
2562 }
2563 
2564 /**
2565  *	ata_bus_probe - Reset and probe ATA bus
2566  *	@ap: Bus to probe
2567  *
2568  *	Master ATA bus probing function.  Initiates a hardware-dependent
2569  *	bus reset, then attempts to identify any devices found on
2570  *	the bus.
2571  *
2572  *	LOCKING:
2573  *	PCI/etc. bus probe sem.
2574  *
2575  *	RETURNS:
2576  *	Zero on success, negative errno otherwise.
2577  */
2578 
2579 int ata_bus_probe(struct ata_port *ap)
2580 {
2581 	unsigned int classes[ATA_MAX_DEVICES];
2582 	int tries[ATA_MAX_DEVICES];
2583 	int rc;
2584 	struct ata_device *dev;
2585 
2586 	ata_for_each_dev(dev, &ap->link, ALL)
2587 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2588 
2589  retry:
2590 	ata_for_each_dev(dev, &ap->link, ALL) {
2591 		/* If we issue an SRST then an ATA drive (not ATAPI)
2592 		 * may change configuration and be in PIO0 timing. If
2593 		 * we do a hard reset (or are coming from power on)
2594 		 * this is true for ATA or ATAPI. Until we've set a
2595 		 * suitable controller mode we should not touch the
2596 		 * bus as we may be talking too fast.
2597 		 */
2598 		dev->pio_mode = XFER_PIO_0;
2599 		dev->dma_mode = 0xff;
2600 
2601 		/* If the controller has a pio mode setup function
2602 		 * then use it to set the chipset to rights. Don't
2603 		 * touch the DMA setup as that will be dealt with when
2604 		 * configuring devices.
2605 		 */
2606 		if (ap->ops->set_piomode)
2607 			ap->ops->set_piomode(ap, dev);
2608 	}
2609 
2610 	/* reset and determine device classes */
2611 	ap->ops->phy_reset(ap);
2612 
2613 	ata_for_each_dev(dev, &ap->link, ALL) {
2614 		if (dev->class != ATA_DEV_UNKNOWN)
2615 			classes[dev->devno] = dev->class;
2616 		else
2617 			classes[dev->devno] = ATA_DEV_NONE;
2618 
2619 		dev->class = ATA_DEV_UNKNOWN;
2620 	}
2621 
2622 	/* read IDENTIFY page and configure devices. We have to do the identify
2623 	   specific sequence bass-ackwards so that PDIAG- is released by
2624 	   the slave device */
2625 
2626 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2627 		if (tries[dev->devno])
2628 			dev->class = classes[dev->devno];
2629 
2630 		if (!ata_dev_enabled(dev))
2631 			continue;
2632 
2633 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2634 				     dev->id);
2635 		if (rc)
2636 			goto fail;
2637 	}
2638 
2639 	/* Now ask for the cable type as PDIAG- should have been released */
2640 	if (ap->ops->cable_detect)
2641 		ap->cbl = ap->ops->cable_detect(ap);
2642 
2643 	/* We may have SATA bridge glue hiding here irrespective of
2644 	 * the reported cable types and sensed types.  When SATA
2645 	 * drives indicate we have a bridge, we don't know which end
2646 	 * of the link the bridge is which is a problem.
2647 	 */
2648 	ata_for_each_dev(dev, &ap->link, ENABLED)
2649 		if (ata_id_is_sata(dev->id))
2650 			ap->cbl = ATA_CBL_SATA;
2651 
2652 	/* After the identify sequence we can now set up the devices. We do
2653 	   this in the normal order so that the user doesn't get confused */
2654 
2655 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2656 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2657 		rc = ata_dev_configure(dev);
2658 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2659 		if (rc)
2660 			goto fail;
2661 	}
2662 
2663 	/* configure transfer mode */
2664 	rc = ata_set_mode(&ap->link, &dev);
2665 	if (rc)
2666 		goto fail;
2667 
2668 	ata_for_each_dev(dev, &ap->link, ENABLED)
2669 		return 0;
2670 
2671 	return -ENODEV;
2672 
2673  fail:
2674 	tries[dev->devno]--;
2675 
2676 	switch (rc) {
2677 	case -EINVAL:
2678 		/* eeek, something went very wrong, give up */
2679 		tries[dev->devno] = 0;
2680 		break;
2681 
2682 	case -ENODEV:
2683 		/* give it just one more chance */
2684 		tries[dev->devno] = min(tries[dev->devno], 1);
2685 	case -EIO:
2686 		if (tries[dev->devno] == 1) {
2687 			/* This is the last chance, better to slow
2688 			 * down than lose it.
2689 			 */
2690 			sata_down_spd_limit(&ap->link, 0);
2691 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2692 		}
2693 	}
2694 
2695 	if (!tries[dev->devno])
2696 		ata_dev_disable(dev);
2697 
2698 	goto retry;
2699 }
2700 
2701 /**
2702  *	sata_print_link_status - Print SATA link status
2703  *	@link: SATA link to printk link status about
2704  *
2705  *	This function prints link speed and status of a SATA link.
2706  *
2707  *	LOCKING:
2708  *	None.
2709  */
2710 static void sata_print_link_status(struct ata_link *link)
2711 {
2712 	u32 sstatus, scontrol, tmp;
2713 
2714 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2715 		return;
2716 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2717 
2718 	if (ata_phys_link_online(link)) {
2719 		tmp = (sstatus >> 4) & 0xf;
2720 		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2721 			      sata_spd_string(tmp), sstatus, scontrol);
2722 	} else {
2723 		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2724 			      sstatus, scontrol);
2725 	}
2726 }
2727 
2728 /**
2729  *	ata_dev_pair		-	return other device on cable
2730  *	@adev: device
2731  *
2732  *	Obtain the other device on the same cable, or if none is
2733  *	present NULL is returned
2734  */
2735 
2736 struct ata_device *ata_dev_pair(struct ata_device *adev)
2737 {
2738 	struct ata_link *link = adev->link;
2739 	struct ata_device *pair = &link->device[1 - adev->devno];
2740 	if (!ata_dev_enabled(pair))
2741 		return NULL;
2742 	return pair;
2743 }
2744 
2745 /**
2746  *	sata_down_spd_limit - adjust SATA spd limit downward
2747  *	@link: Link to adjust SATA spd limit for
2748  *	@spd_limit: Additional limit
2749  *
2750  *	Adjust SATA spd limit of @link downward.  Note that this
2751  *	function only adjusts the limit.  The change must be applied
2752  *	using sata_set_spd().
2753  *
2754  *	If @spd_limit is non-zero, the speed is limited to equal to or
2755  *	lower than @spd_limit if such speed is supported.  If
2756  *	@spd_limit is slower than any supported speed, only the lowest
2757  *	supported speed is allowed.
2758  *
2759  *	LOCKING:
2760  *	Inherited from caller.
2761  *
2762  *	RETURNS:
2763  *	0 on success, negative errno on failure
2764  */
2765 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2766 {
2767 	u32 sstatus, spd, mask;
2768 	int rc, bit;
2769 
2770 	if (!sata_scr_valid(link))
2771 		return -EOPNOTSUPP;
2772 
2773 	/* If SCR can be read, use it to determine the current SPD.
2774 	 * If not, use cached value in link->sata_spd.
2775 	 */
2776 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2777 	if (rc == 0 && ata_sstatus_online(sstatus))
2778 		spd = (sstatus >> 4) & 0xf;
2779 	else
2780 		spd = link->sata_spd;
2781 
2782 	mask = link->sata_spd_limit;
2783 	if (mask <= 1)
2784 		return -EINVAL;
2785 
2786 	/* unconditionally mask off the highest bit */
2787 	bit = fls(mask) - 1;
2788 	mask &= ~(1 << bit);
2789 
2790 	/* Mask off all speeds higher than or equal to the current
2791 	 * one.  Force 1.5Gbps if current SPD is not available.
2792 	 */
2793 	if (spd > 1)
2794 		mask &= (1 << (spd - 1)) - 1;
2795 	else
2796 		mask &= 1;
2797 
2798 	/* were we already at the bottom? */
2799 	if (!mask)
2800 		return -EINVAL;
2801 
2802 	if (spd_limit) {
2803 		if (mask & ((1 << spd_limit) - 1))
2804 			mask &= (1 << spd_limit) - 1;
2805 		else {
2806 			bit = ffs(mask) - 1;
2807 			mask = 1 << bit;
2808 		}
2809 	}
2810 
2811 	link->sata_spd_limit = mask;
2812 
2813 	ata_link_warn(link, "limiting SATA link speed to %s\n",
2814 		      sata_spd_string(fls(mask)));
2815 
2816 	return 0;
2817 }
2818 
2819 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2820 {
2821 	struct ata_link *host_link = &link->ap->link;
2822 	u32 limit, target, spd;
2823 
2824 	limit = link->sata_spd_limit;
2825 
2826 	/* Don't configure downstream link faster than upstream link.
2827 	 * It doesn't speed up anything and some PMPs choke on such
2828 	 * configuration.
2829 	 */
2830 	if (!ata_is_host_link(link) && host_link->sata_spd)
2831 		limit &= (1 << host_link->sata_spd) - 1;
2832 
2833 	if (limit == UINT_MAX)
2834 		target = 0;
2835 	else
2836 		target = fls(limit);
2837 
2838 	spd = (*scontrol >> 4) & 0xf;
2839 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2840 
2841 	return spd != target;
2842 }
2843 
2844 /**
2845  *	sata_set_spd_needed - is SATA spd configuration needed
2846  *	@link: Link in question
2847  *
2848  *	Test whether the spd limit in SControl matches
2849  *	@link->sata_spd_limit.  This function is used to determine
2850  *	whether hardreset is necessary to apply SATA spd
2851  *	configuration.
2852  *
2853  *	LOCKING:
2854  *	Inherited from caller.
2855  *
2856  *	RETURNS:
2857  *	1 if SATA spd configuration is needed, 0 otherwise.
2858  */
2859 static int sata_set_spd_needed(struct ata_link *link)
2860 {
2861 	u32 scontrol;
2862 
2863 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2864 		return 1;
2865 
2866 	return __sata_set_spd_needed(link, &scontrol);
2867 }
2868 
2869 /**
2870  *	sata_set_spd - set SATA spd according to spd limit
2871  *	@link: Link to set SATA spd for
2872  *
2873  *	Set SATA spd of @link according to sata_spd_limit.
2874  *
2875  *	LOCKING:
2876  *	Inherited from caller.
2877  *
2878  *	RETURNS:
2879  *	0 if spd doesn't need to be changed, 1 if spd has been
2880  *	changed.  Negative errno if SCR registers are inaccessible.
2881  */
2882 int sata_set_spd(struct ata_link *link)
2883 {
2884 	u32 scontrol;
2885 	int rc;
2886 
2887 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2888 		return rc;
2889 
2890 	if (!__sata_set_spd_needed(link, &scontrol))
2891 		return 0;
2892 
2893 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2894 		return rc;
2895 
2896 	return 1;
2897 }
2898 
2899 /*
2900  * This mode timing computation functionality is ported over from
2901  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2902  */
2903 /*
2904  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2905  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2906  * for UDMA6, which is currently supported only by Maxtor drives.
2907  *
2908  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2909  */
2910 
2911 static const struct ata_timing ata_timing[] = {
2912 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
2913 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
2914 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
2915 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
2916 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
2917 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
2918 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
2919 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
2920 
2921 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
2922 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
2923 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
2924 
2925 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
2926 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
2927 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
2928 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
2929 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
2930 
2931 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
2932 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
2933 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
2934 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
2935 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
2936 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
2937 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
2938 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
2939 
2940 	{ 0xFF }
2941 };
2942 
2943 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2944 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2945 
2946 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2947 {
2948 	q->setup	= EZ(t->setup      * 1000,  T);
2949 	q->act8b	= EZ(t->act8b      * 1000,  T);
2950 	q->rec8b	= EZ(t->rec8b      * 1000,  T);
2951 	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
2952 	q->active	= EZ(t->active     * 1000,  T);
2953 	q->recover	= EZ(t->recover    * 1000,  T);
2954 	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
2955 	q->cycle	= EZ(t->cycle      * 1000,  T);
2956 	q->udma		= EZ(t->udma       * 1000, UT);
2957 }
2958 
2959 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2960 		      struct ata_timing *m, unsigned int what)
2961 {
2962 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2963 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2964 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2965 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2966 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2967 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2968 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2969 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2970 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2971 }
2972 
2973 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2974 {
2975 	const struct ata_timing *t = ata_timing;
2976 
2977 	while (xfer_mode > t->mode)
2978 		t++;
2979 
2980 	if (xfer_mode == t->mode)
2981 		return t;
2982 
2983 	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2984 			__func__, xfer_mode);
2985 
2986 	return NULL;
2987 }
2988 
2989 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2990 		       struct ata_timing *t, int T, int UT)
2991 {
2992 	const u16 *id = adev->id;
2993 	const struct ata_timing *s;
2994 	struct ata_timing p;
2995 
2996 	/*
2997 	 * Find the mode.
2998 	 */
2999 
3000 	if (!(s = ata_timing_find_mode(speed)))
3001 		return -EINVAL;
3002 
3003 	memcpy(t, s, sizeof(*s));
3004 
3005 	/*
3006 	 * If the drive is an EIDE drive, it can tell us it needs extended
3007 	 * PIO/MW_DMA cycle timing.
3008 	 */
3009 
3010 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3011 		memset(&p, 0, sizeof(p));
3012 
3013 		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3014 			if (speed <= XFER_PIO_2)
3015 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3016 			else if ((speed <= XFER_PIO_4) ||
3017 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3018 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3019 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3020 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3021 
3022 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3023 	}
3024 
3025 	/*
3026 	 * Convert the timing to bus clock counts.
3027 	 */
3028 
3029 	ata_timing_quantize(t, t, T, UT);
3030 
3031 	/*
3032 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3033 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3034 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3035 	 */
3036 
3037 	if (speed > XFER_PIO_6) {
3038 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3039 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3040 	}
3041 
3042 	/*
3043 	 * Lengthen active & recovery time so that cycle time is correct.
3044 	 */
3045 
3046 	if (t->act8b + t->rec8b < t->cyc8b) {
3047 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3048 		t->rec8b = t->cyc8b - t->act8b;
3049 	}
3050 
3051 	if (t->active + t->recover < t->cycle) {
3052 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3053 		t->recover = t->cycle - t->active;
3054 	}
3055 
3056 	/* In a few cases quantisation may produce enough errors to
3057 	   leave t->cycle too low for the sum of active and recovery
3058 	   if so we must correct this */
3059 	if (t->active + t->recover > t->cycle)
3060 		t->cycle = t->active + t->recover;
3061 
3062 	return 0;
3063 }
3064 
3065 /**
3066  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3067  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3068  *	@cycle: cycle duration in ns
3069  *
3070  *	Return matching xfer mode for @cycle.  The returned mode is of
3071  *	the transfer type specified by @xfer_shift.  If @cycle is too
3072  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3073  *	than the fastest known mode, the fasted mode is returned.
3074  *
3075  *	LOCKING:
3076  *	None.
3077  *
3078  *	RETURNS:
3079  *	Matching xfer_mode, 0xff if no match found.
3080  */
3081 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3082 {
3083 	u8 base_mode = 0xff, last_mode = 0xff;
3084 	const struct ata_xfer_ent *ent;
3085 	const struct ata_timing *t;
3086 
3087 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3088 		if (ent->shift == xfer_shift)
3089 			base_mode = ent->base;
3090 
3091 	for (t = ata_timing_find_mode(base_mode);
3092 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3093 		unsigned short this_cycle;
3094 
3095 		switch (xfer_shift) {
3096 		case ATA_SHIFT_PIO:
3097 		case ATA_SHIFT_MWDMA:
3098 			this_cycle = t->cycle;
3099 			break;
3100 		case ATA_SHIFT_UDMA:
3101 			this_cycle = t->udma;
3102 			break;
3103 		default:
3104 			return 0xff;
3105 		}
3106 
3107 		if (cycle > this_cycle)
3108 			break;
3109 
3110 		last_mode = t->mode;
3111 	}
3112 
3113 	return last_mode;
3114 }
3115 
3116 /**
3117  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3118  *	@dev: Device to adjust xfer masks
3119  *	@sel: ATA_DNXFER_* selector
3120  *
3121  *	Adjust xfer masks of @dev downward.  Note that this function
3122  *	does not apply the change.  Invoking ata_set_mode() afterwards
3123  *	will apply the limit.
3124  *
3125  *	LOCKING:
3126  *	Inherited from caller.
3127  *
3128  *	RETURNS:
3129  *	0 on success, negative errno on failure
3130  */
3131 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3132 {
3133 	char buf[32];
3134 	unsigned long orig_mask, xfer_mask;
3135 	unsigned long pio_mask, mwdma_mask, udma_mask;
3136 	int quiet, highbit;
3137 
3138 	quiet = !!(sel & ATA_DNXFER_QUIET);
3139 	sel &= ~ATA_DNXFER_QUIET;
3140 
3141 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3142 						  dev->mwdma_mask,
3143 						  dev->udma_mask);
3144 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3145 
3146 	switch (sel) {
3147 	case ATA_DNXFER_PIO:
3148 		highbit = fls(pio_mask) - 1;
3149 		pio_mask &= ~(1 << highbit);
3150 		break;
3151 
3152 	case ATA_DNXFER_DMA:
3153 		if (udma_mask) {
3154 			highbit = fls(udma_mask) - 1;
3155 			udma_mask &= ~(1 << highbit);
3156 			if (!udma_mask)
3157 				return -ENOENT;
3158 		} else if (mwdma_mask) {
3159 			highbit = fls(mwdma_mask) - 1;
3160 			mwdma_mask &= ~(1 << highbit);
3161 			if (!mwdma_mask)
3162 				return -ENOENT;
3163 		}
3164 		break;
3165 
3166 	case ATA_DNXFER_40C:
3167 		udma_mask &= ATA_UDMA_MASK_40C;
3168 		break;
3169 
3170 	case ATA_DNXFER_FORCE_PIO0:
3171 		pio_mask &= 1;
3172 	case ATA_DNXFER_FORCE_PIO:
3173 		mwdma_mask = 0;
3174 		udma_mask = 0;
3175 		break;
3176 
3177 	default:
3178 		BUG();
3179 	}
3180 
3181 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3182 
3183 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3184 		return -ENOENT;
3185 
3186 	if (!quiet) {
3187 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3188 			snprintf(buf, sizeof(buf), "%s:%s",
3189 				 ata_mode_string(xfer_mask),
3190 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3191 		else
3192 			snprintf(buf, sizeof(buf), "%s",
3193 				 ata_mode_string(xfer_mask));
3194 
3195 		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3196 	}
3197 
3198 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3199 			    &dev->udma_mask);
3200 
3201 	return 0;
3202 }
3203 
3204 static int ata_dev_set_mode(struct ata_device *dev)
3205 {
3206 	struct ata_port *ap = dev->link->ap;
3207 	struct ata_eh_context *ehc = &dev->link->eh_context;
3208 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3209 	const char *dev_err_whine = "";
3210 	int ign_dev_err = 0;
3211 	unsigned int err_mask = 0;
3212 	int rc;
3213 
3214 	dev->flags &= ~ATA_DFLAG_PIO;
3215 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3216 		dev->flags |= ATA_DFLAG_PIO;
3217 
3218 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3219 		dev_err_whine = " (SET_XFERMODE skipped)";
3220 	else {
3221 		if (nosetxfer)
3222 			ata_dev_warn(dev,
3223 				     "NOSETXFER but PATA detected - can't "
3224 				     "skip SETXFER, might malfunction\n");
3225 		err_mask = ata_dev_set_xfermode(dev);
3226 	}
3227 
3228 	if (err_mask & ~AC_ERR_DEV)
3229 		goto fail;
3230 
3231 	/* revalidate */
3232 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3233 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3234 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3235 	if (rc)
3236 		return rc;
3237 
3238 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3239 		/* Old CFA may refuse this command, which is just fine */
3240 		if (ata_id_is_cfa(dev->id))
3241 			ign_dev_err = 1;
3242 		/* Catch several broken garbage emulations plus some pre
3243 		   ATA devices */
3244 		if (ata_id_major_version(dev->id) == 0 &&
3245 					dev->pio_mode <= XFER_PIO_2)
3246 			ign_dev_err = 1;
3247 		/* Some very old devices and some bad newer ones fail
3248 		   any kind of SET_XFERMODE request but support PIO0-2
3249 		   timings and no IORDY */
3250 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3251 			ign_dev_err = 1;
3252 	}
3253 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3254 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3255 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3256 	    dev->dma_mode == XFER_MW_DMA_0 &&
3257 	    (dev->id[63] >> 8) & 1)
3258 		ign_dev_err = 1;
3259 
3260 	/* if the device is actually configured correctly, ignore dev err */
3261 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3262 		ign_dev_err = 1;
3263 
3264 	if (err_mask & AC_ERR_DEV) {
3265 		if (!ign_dev_err)
3266 			goto fail;
3267 		else
3268 			dev_err_whine = " (device error ignored)";
3269 	}
3270 
3271 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3272 		dev->xfer_shift, (int)dev->xfer_mode);
3273 
3274 	ata_dev_info(dev, "configured for %s%s\n",
3275 		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3276 		     dev_err_whine);
3277 
3278 	return 0;
3279 
3280  fail:
3281 	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3282 	return -EIO;
3283 }
3284 
3285 /**
3286  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3287  *	@link: link on which timings will be programmed
3288  *	@r_failed_dev: out parameter for failed device
3289  *
3290  *	Standard implementation of the function used to tune and set
3291  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3292  *	ata_dev_set_mode() fails, pointer to the failing device is
3293  *	returned in @r_failed_dev.
3294  *
3295  *	LOCKING:
3296  *	PCI/etc. bus probe sem.
3297  *
3298  *	RETURNS:
3299  *	0 on success, negative errno otherwise
3300  */
3301 
3302 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3303 {
3304 	struct ata_port *ap = link->ap;
3305 	struct ata_device *dev;
3306 	int rc = 0, used_dma = 0, found = 0;
3307 
3308 	/* step 1: calculate xfer_mask */
3309 	ata_for_each_dev(dev, link, ENABLED) {
3310 		unsigned long pio_mask, dma_mask;
3311 		unsigned int mode_mask;
3312 
3313 		mode_mask = ATA_DMA_MASK_ATA;
3314 		if (dev->class == ATA_DEV_ATAPI)
3315 			mode_mask = ATA_DMA_MASK_ATAPI;
3316 		else if (ata_id_is_cfa(dev->id))
3317 			mode_mask = ATA_DMA_MASK_CFA;
3318 
3319 		ata_dev_xfermask(dev);
3320 		ata_force_xfermask(dev);
3321 
3322 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3323 
3324 		if (libata_dma_mask & mode_mask)
3325 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3326 						     dev->udma_mask);
3327 		else
3328 			dma_mask = 0;
3329 
3330 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3331 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3332 
3333 		found = 1;
3334 		if (ata_dma_enabled(dev))
3335 			used_dma = 1;
3336 	}
3337 	if (!found)
3338 		goto out;
3339 
3340 	/* step 2: always set host PIO timings */
3341 	ata_for_each_dev(dev, link, ENABLED) {
3342 		if (dev->pio_mode == 0xff) {
3343 			ata_dev_warn(dev, "no PIO support\n");
3344 			rc = -EINVAL;
3345 			goto out;
3346 		}
3347 
3348 		dev->xfer_mode = dev->pio_mode;
3349 		dev->xfer_shift = ATA_SHIFT_PIO;
3350 		if (ap->ops->set_piomode)
3351 			ap->ops->set_piomode(ap, dev);
3352 	}
3353 
3354 	/* step 3: set host DMA timings */
3355 	ata_for_each_dev(dev, link, ENABLED) {
3356 		if (!ata_dma_enabled(dev))
3357 			continue;
3358 
3359 		dev->xfer_mode = dev->dma_mode;
3360 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3361 		if (ap->ops->set_dmamode)
3362 			ap->ops->set_dmamode(ap, dev);
3363 	}
3364 
3365 	/* step 4: update devices' xfer mode */
3366 	ata_for_each_dev(dev, link, ENABLED) {
3367 		rc = ata_dev_set_mode(dev);
3368 		if (rc)
3369 			goto out;
3370 	}
3371 
3372 	/* Record simplex status. If we selected DMA then the other
3373 	 * host channels are not permitted to do so.
3374 	 */
3375 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3376 		ap->host->simplex_claimed = ap;
3377 
3378  out:
3379 	if (rc)
3380 		*r_failed_dev = dev;
3381 	return rc;
3382 }
3383 
3384 /**
3385  *	ata_wait_ready - wait for link to become ready
3386  *	@link: link to be waited on
3387  *	@deadline: deadline jiffies for the operation
3388  *	@check_ready: callback to check link readiness
3389  *
3390  *	Wait for @link to become ready.  @check_ready should return
3391  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3392  *	link doesn't seem to be occupied, other errno for other error
3393  *	conditions.
3394  *
3395  *	Transient -ENODEV conditions are allowed for
3396  *	ATA_TMOUT_FF_WAIT.
3397  *
3398  *	LOCKING:
3399  *	EH context.
3400  *
3401  *	RETURNS:
3402  *	0 if @linke is ready before @deadline; otherwise, -errno.
3403  */
3404 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3405 		   int (*check_ready)(struct ata_link *link))
3406 {
3407 	unsigned long start = jiffies;
3408 	unsigned long nodev_deadline;
3409 	int warned = 0;
3410 
3411 	/* choose which 0xff timeout to use, read comment in libata.h */
3412 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3413 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3414 	else
3415 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3416 
3417 	/* Slave readiness can't be tested separately from master.  On
3418 	 * M/S emulation configuration, this function should be called
3419 	 * only on the master and it will handle both master and slave.
3420 	 */
3421 	WARN_ON(link == link->ap->slave_link);
3422 
3423 	if (time_after(nodev_deadline, deadline))
3424 		nodev_deadline = deadline;
3425 
3426 	while (1) {
3427 		unsigned long now = jiffies;
3428 		int ready, tmp;
3429 
3430 		ready = tmp = check_ready(link);
3431 		if (ready > 0)
3432 			return 0;
3433 
3434 		/*
3435 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3436 		 * is online.  Also, some SATA devices take a long
3437 		 * time to clear 0xff after reset.  Wait for
3438 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3439 		 * offline.
3440 		 *
3441 		 * Note that some PATA controllers (pata_ali) explode
3442 		 * if status register is read more than once when
3443 		 * there's no device attached.
3444 		 */
3445 		if (ready == -ENODEV) {
3446 			if (ata_link_online(link))
3447 				ready = 0;
3448 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3449 				 !ata_link_offline(link) &&
3450 				 time_before(now, nodev_deadline))
3451 				ready = 0;
3452 		}
3453 
3454 		if (ready)
3455 			return ready;
3456 		if (time_after(now, deadline))
3457 			return -EBUSY;
3458 
3459 		if (!warned && time_after(now, start + 5 * HZ) &&
3460 		    (deadline - now > 3 * HZ)) {
3461 			ata_link_warn(link,
3462 				"link is slow to respond, please be patient "
3463 				"(ready=%d)\n", tmp);
3464 			warned = 1;
3465 		}
3466 
3467 		ata_msleep(link->ap, 50);
3468 	}
3469 }
3470 
3471 /**
3472  *	ata_wait_after_reset - wait for link to become ready after reset
3473  *	@link: link to be waited on
3474  *	@deadline: deadline jiffies for the operation
3475  *	@check_ready: callback to check link readiness
3476  *
3477  *	Wait for @link to become ready after reset.
3478  *
3479  *	LOCKING:
3480  *	EH context.
3481  *
3482  *	RETURNS:
3483  *	0 if @linke is ready before @deadline; otherwise, -errno.
3484  */
3485 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3486 				int (*check_ready)(struct ata_link *link))
3487 {
3488 	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3489 
3490 	return ata_wait_ready(link, deadline, check_ready);
3491 }
3492 
3493 /**
3494  *	sata_link_debounce - debounce SATA phy status
3495  *	@link: ATA link to debounce SATA phy status for
3496  *	@params: timing parameters { interval, duratinon, timeout } in msec
3497  *	@deadline: deadline jiffies for the operation
3498  *
3499  *	Make sure SStatus of @link reaches stable state, determined by
3500  *	holding the same value where DET is not 1 for @duration polled
3501  *	every @interval, before @timeout.  Timeout constraints the
3502  *	beginning of the stable state.  Because DET gets stuck at 1 on
3503  *	some controllers after hot unplugging, this functions waits
3504  *	until timeout then returns 0 if DET is stable at 1.
3505  *
3506  *	@timeout is further limited by @deadline.  The sooner of the
3507  *	two is used.
3508  *
3509  *	LOCKING:
3510  *	Kernel thread context (may sleep)
3511  *
3512  *	RETURNS:
3513  *	0 on success, -errno on failure.
3514  */
3515 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3516 		       unsigned long deadline)
3517 {
3518 	unsigned long interval = params[0];
3519 	unsigned long duration = params[1];
3520 	unsigned long last_jiffies, t;
3521 	u32 last, cur;
3522 	int rc;
3523 
3524 	t = ata_deadline(jiffies, params[2]);
3525 	if (time_before(t, deadline))
3526 		deadline = t;
3527 
3528 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3529 		return rc;
3530 	cur &= 0xf;
3531 
3532 	last = cur;
3533 	last_jiffies = jiffies;
3534 
3535 	while (1) {
3536 		ata_msleep(link->ap, interval);
3537 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3538 			return rc;
3539 		cur &= 0xf;
3540 
3541 		/* DET stable? */
3542 		if (cur == last) {
3543 			if (cur == 1 && time_before(jiffies, deadline))
3544 				continue;
3545 			if (time_after(jiffies,
3546 				       ata_deadline(last_jiffies, duration)))
3547 				return 0;
3548 			continue;
3549 		}
3550 
3551 		/* unstable, start over */
3552 		last = cur;
3553 		last_jiffies = jiffies;
3554 
3555 		/* Check deadline.  If debouncing failed, return
3556 		 * -EPIPE to tell upper layer to lower link speed.
3557 		 */
3558 		if (time_after(jiffies, deadline))
3559 			return -EPIPE;
3560 	}
3561 }
3562 
3563 /**
3564  *	sata_link_resume - resume SATA link
3565  *	@link: ATA link to resume SATA
3566  *	@params: timing parameters { interval, duratinon, timeout } in msec
3567  *	@deadline: deadline jiffies for the operation
3568  *
3569  *	Resume SATA phy @link and debounce it.
3570  *
3571  *	LOCKING:
3572  *	Kernel thread context (may sleep)
3573  *
3574  *	RETURNS:
3575  *	0 on success, -errno on failure.
3576  */
3577 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3578 		     unsigned long deadline)
3579 {
3580 	int tries = ATA_LINK_RESUME_TRIES;
3581 	u32 scontrol, serror;
3582 	int rc;
3583 
3584 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3585 		return rc;
3586 
3587 	/*
3588 	 * Writes to SControl sometimes get ignored under certain
3589 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3590 	 * cleared.
3591 	 */
3592 	do {
3593 		scontrol = (scontrol & 0x0f0) | 0x300;
3594 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3595 			return rc;
3596 		/*
3597 		 * Some PHYs react badly if SStatus is pounded
3598 		 * immediately after resuming.  Delay 200ms before
3599 		 * debouncing.
3600 		 */
3601 		if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3602 			ata_msleep(link->ap, 200);
3603 
3604 		/* is SControl restored correctly? */
3605 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3606 			return rc;
3607 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3608 
3609 	if ((scontrol & 0xf0f) != 0x300) {
3610 		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3611 			     scontrol);
3612 		return 0;
3613 	}
3614 
3615 	if (tries < ATA_LINK_RESUME_TRIES)
3616 		ata_link_warn(link, "link resume succeeded after %d retries\n",
3617 			      ATA_LINK_RESUME_TRIES - tries);
3618 
3619 	if ((rc = sata_link_debounce(link, params, deadline)))
3620 		return rc;
3621 
3622 	/* clear SError, some PHYs require this even for SRST to work */
3623 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3624 		rc = sata_scr_write(link, SCR_ERROR, serror);
3625 
3626 	return rc != -EINVAL ? rc : 0;
3627 }
3628 
3629 /**
3630  *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3631  *	@link: ATA link to manipulate SControl for
3632  *	@policy: LPM policy to configure
3633  *	@spm_wakeup: initiate LPM transition to active state
3634  *
3635  *	Manipulate the IPM field of the SControl register of @link
3636  *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3637  *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3638  *	the link.  This function also clears PHYRDY_CHG before
3639  *	returning.
3640  *
3641  *	LOCKING:
3642  *	EH context.
3643  *
3644  *	RETURNS:
3645  *	0 on success, -errno otherwise.
3646  */
3647 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3648 		      bool spm_wakeup)
3649 {
3650 	struct ata_eh_context *ehc = &link->eh_context;
3651 	bool woken_up = false;
3652 	u32 scontrol;
3653 	int rc;
3654 
3655 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3656 	if (rc)
3657 		return rc;
3658 
3659 	switch (policy) {
3660 	case ATA_LPM_MAX_POWER:
3661 		/* disable all LPM transitions */
3662 		scontrol |= (0x7 << 8);
3663 		/* initiate transition to active state */
3664 		if (spm_wakeup) {
3665 			scontrol |= (0x4 << 12);
3666 			woken_up = true;
3667 		}
3668 		break;
3669 	case ATA_LPM_MED_POWER:
3670 		/* allow LPM to PARTIAL */
3671 		scontrol &= ~(0x1 << 8);
3672 		scontrol |= (0x6 << 8);
3673 		break;
3674 	case ATA_LPM_MIN_POWER:
3675 		if (ata_link_nr_enabled(link) > 0)
3676 			/* no restrictions on LPM transitions */
3677 			scontrol &= ~(0x7 << 8);
3678 		else {
3679 			/* empty port, power off */
3680 			scontrol &= ~0xf;
3681 			scontrol |= (0x1 << 2);
3682 		}
3683 		break;
3684 	default:
3685 		WARN_ON(1);
3686 	}
3687 
3688 	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3689 	if (rc)
3690 		return rc;
3691 
3692 	/* give the link time to transit out of LPM state */
3693 	if (woken_up)
3694 		msleep(10);
3695 
3696 	/* clear PHYRDY_CHG from SError */
3697 	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3698 	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3699 }
3700 
3701 /**
3702  *	ata_std_prereset - prepare for reset
3703  *	@link: ATA link to be reset
3704  *	@deadline: deadline jiffies for the operation
3705  *
3706  *	@link is about to be reset.  Initialize it.  Failure from
3707  *	prereset makes libata abort whole reset sequence and give up
3708  *	that port, so prereset should be best-effort.  It does its
3709  *	best to prepare for reset sequence but if things go wrong, it
3710  *	should just whine, not fail.
3711  *
3712  *	LOCKING:
3713  *	Kernel thread context (may sleep)
3714  *
3715  *	RETURNS:
3716  *	0 on success, -errno otherwise.
3717  */
3718 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3719 {
3720 	struct ata_port *ap = link->ap;
3721 	struct ata_eh_context *ehc = &link->eh_context;
3722 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3723 	int rc;
3724 
3725 	/* if we're about to do hardreset, nothing more to do */
3726 	if (ehc->i.action & ATA_EH_HARDRESET)
3727 		return 0;
3728 
3729 	/* if SATA, resume link */
3730 	if (ap->flags & ATA_FLAG_SATA) {
3731 		rc = sata_link_resume(link, timing, deadline);
3732 		/* whine about phy resume failure but proceed */
3733 		if (rc && rc != -EOPNOTSUPP)
3734 			ata_link_warn(link,
3735 				      "failed to resume link for reset (errno=%d)\n",
3736 				      rc);
3737 	}
3738 
3739 	/* no point in trying softreset on offline link */
3740 	if (ata_phys_link_offline(link))
3741 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3742 
3743 	return 0;
3744 }
3745 
3746 /**
3747  *	sata_link_hardreset - reset link via SATA phy reset
3748  *	@link: link to reset
3749  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3750  *	@deadline: deadline jiffies for the operation
3751  *	@online: optional out parameter indicating link onlineness
3752  *	@check_ready: optional callback to check link readiness
3753  *
3754  *	SATA phy-reset @link using DET bits of SControl register.
3755  *	After hardreset, link readiness is waited upon using
3756  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3757  *	allowed to not specify @check_ready and wait itself after this
3758  *	function returns.  Device classification is LLD's
3759  *	responsibility.
3760  *
3761  *	*@online is set to one iff reset succeeded and @link is online
3762  *	after reset.
3763  *
3764  *	LOCKING:
3765  *	Kernel thread context (may sleep)
3766  *
3767  *	RETURNS:
3768  *	0 on success, -errno otherwise.
3769  */
3770 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3771 			unsigned long deadline,
3772 			bool *online, int (*check_ready)(struct ata_link *))
3773 {
3774 	u32 scontrol;
3775 	int rc;
3776 
3777 	DPRINTK("ENTER\n");
3778 
3779 	if (online)
3780 		*online = false;
3781 
3782 	if (sata_set_spd_needed(link)) {
3783 		/* SATA spec says nothing about how to reconfigure
3784 		 * spd.  To be on the safe side, turn off phy during
3785 		 * reconfiguration.  This works for at least ICH7 AHCI
3786 		 * and Sil3124.
3787 		 */
3788 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3789 			goto out;
3790 
3791 		scontrol = (scontrol & 0x0f0) | 0x304;
3792 
3793 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3794 			goto out;
3795 
3796 		sata_set_spd(link);
3797 	}
3798 
3799 	/* issue phy wake/reset */
3800 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3801 		goto out;
3802 
3803 	scontrol = (scontrol & 0x0f0) | 0x301;
3804 
3805 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3806 		goto out;
3807 
3808 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3809 	 * 10.4.2 says at least 1 ms.
3810 	 */
3811 	ata_msleep(link->ap, 1);
3812 
3813 	/* bring link back */
3814 	rc = sata_link_resume(link, timing, deadline);
3815 	if (rc)
3816 		goto out;
3817 	/* if link is offline nothing more to do */
3818 	if (ata_phys_link_offline(link))
3819 		goto out;
3820 
3821 	/* Link is online.  From this point, -ENODEV too is an error. */
3822 	if (online)
3823 		*online = true;
3824 
3825 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3826 		/* If PMP is supported, we have to do follow-up SRST.
3827 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3828 		 * the first port is empty.  Wait only for
3829 		 * ATA_TMOUT_PMP_SRST_WAIT.
3830 		 */
3831 		if (check_ready) {
3832 			unsigned long pmp_deadline;
3833 
3834 			pmp_deadline = ata_deadline(jiffies,
3835 						    ATA_TMOUT_PMP_SRST_WAIT);
3836 			if (time_after(pmp_deadline, deadline))
3837 				pmp_deadline = deadline;
3838 			ata_wait_ready(link, pmp_deadline, check_ready);
3839 		}
3840 		rc = -EAGAIN;
3841 		goto out;
3842 	}
3843 
3844 	rc = 0;
3845 	if (check_ready)
3846 		rc = ata_wait_ready(link, deadline, check_ready);
3847  out:
3848 	if (rc && rc != -EAGAIN) {
3849 		/* online is set iff link is online && reset succeeded */
3850 		if (online)
3851 			*online = false;
3852 		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3853 	}
3854 	DPRINTK("EXIT, rc=%d\n", rc);
3855 	return rc;
3856 }
3857 
3858 /**
3859  *	sata_std_hardreset - COMRESET w/o waiting or classification
3860  *	@link: link to reset
3861  *	@class: resulting class of attached device
3862  *	@deadline: deadline jiffies for the operation
3863  *
3864  *	Standard SATA COMRESET w/o waiting or classification.
3865  *
3866  *	LOCKING:
3867  *	Kernel thread context (may sleep)
3868  *
3869  *	RETURNS:
3870  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3871  */
3872 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3873 		       unsigned long deadline)
3874 {
3875 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3876 	bool online;
3877 	int rc;
3878 
3879 	/* do hardreset */
3880 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3881 	return online ? -EAGAIN : rc;
3882 }
3883 
3884 /**
3885  *	ata_std_postreset - standard postreset callback
3886  *	@link: the target ata_link
3887  *	@classes: classes of attached devices
3888  *
3889  *	This function is invoked after a successful reset.  Note that
3890  *	the device might have been reset more than once using
3891  *	different reset methods before postreset is invoked.
3892  *
3893  *	LOCKING:
3894  *	Kernel thread context (may sleep)
3895  */
3896 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3897 {
3898 	u32 serror;
3899 
3900 	DPRINTK("ENTER\n");
3901 
3902 	/* reset complete, clear SError */
3903 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3904 		sata_scr_write(link, SCR_ERROR, serror);
3905 
3906 	/* print link status */
3907 	sata_print_link_status(link);
3908 
3909 	DPRINTK("EXIT\n");
3910 }
3911 
3912 /**
3913  *	ata_dev_same_device - Determine whether new ID matches configured device
3914  *	@dev: device to compare against
3915  *	@new_class: class of the new device
3916  *	@new_id: IDENTIFY page of the new device
3917  *
3918  *	Compare @new_class and @new_id against @dev and determine
3919  *	whether @dev is the device indicated by @new_class and
3920  *	@new_id.
3921  *
3922  *	LOCKING:
3923  *	None.
3924  *
3925  *	RETURNS:
3926  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3927  */
3928 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3929 			       const u16 *new_id)
3930 {
3931 	const u16 *old_id = dev->id;
3932 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3933 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3934 
3935 	if (dev->class != new_class) {
3936 		ata_dev_info(dev, "class mismatch %d != %d\n",
3937 			     dev->class, new_class);
3938 		return 0;
3939 	}
3940 
3941 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3942 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3943 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3944 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3945 
3946 	if (strcmp(model[0], model[1])) {
3947 		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3948 			     model[0], model[1]);
3949 		return 0;
3950 	}
3951 
3952 	if (strcmp(serial[0], serial[1])) {
3953 		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3954 			     serial[0], serial[1]);
3955 		return 0;
3956 	}
3957 
3958 	return 1;
3959 }
3960 
3961 /**
3962  *	ata_dev_reread_id - Re-read IDENTIFY data
3963  *	@dev: target ATA device
3964  *	@readid_flags: read ID flags
3965  *
3966  *	Re-read IDENTIFY page and make sure @dev is still attached to
3967  *	the port.
3968  *
3969  *	LOCKING:
3970  *	Kernel thread context (may sleep)
3971  *
3972  *	RETURNS:
3973  *	0 on success, negative errno otherwise
3974  */
3975 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3976 {
3977 	unsigned int class = dev->class;
3978 	u16 *id = (void *)dev->link->ap->sector_buf;
3979 	int rc;
3980 
3981 	/* read ID data */
3982 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3983 	if (rc)
3984 		return rc;
3985 
3986 	/* is the device still there? */
3987 	if (!ata_dev_same_device(dev, class, id))
3988 		return -ENODEV;
3989 
3990 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3991 	return 0;
3992 }
3993 
3994 /**
3995  *	ata_dev_revalidate - Revalidate ATA device
3996  *	@dev: device to revalidate
3997  *	@new_class: new class code
3998  *	@readid_flags: read ID flags
3999  *
4000  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4001  *	port and reconfigure it according to the new IDENTIFY page.
4002  *
4003  *	LOCKING:
4004  *	Kernel thread context (may sleep)
4005  *
4006  *	RETURNS:
4007  *	0 on success, negative errno otherwise
4008  */
4009 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4010 		       unsigned int readid_flags)
4011 {
4012 	u64 n_sectors = dev->n_sectors;
4013 	u64 n_native_sectors = dev->n_native_sectors;
4014 	int rc;
4015 
4016 	if (!ata_dev_enabled(dev))
4017 		return -ENODEV;
4018 
4019 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4020 	if (ata_class_enabled(new_class) &&
4021 	    new_class != ATA_DEV_ATA &&
4022 	    new_class != ATA_DEV_ATAPI &&
4023 	    new_class != ATA_DEV_ZAC &&
4024 	    new_class != ATA_DEV_SEMB) {
4025 		ata_dev_info(dev, "class mismatch %u != %u\n",
4026 			     dev->class, new_class);
4027 		rc = -ENODEV;
4028 		goto fail;
4029 	}
4030 
4031 	/* re-read ID */
4032 	rc = ata_dev_reread_id(dev, readid_flags);
4033 	if (rc)
4034 		goto fail;
4035 
4036 	/* configure device according to the new ID */
4037 	rc = ata_dev_configure(dev);
4038 	if (rc)
4039 		goto fail;
4040 
4041 	/* verify n_sectors hasn't changed */
4042 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4043 	    dev->n_sectors == n_sectors)
4044 		return 0;
4045 
4046 	/* n_sectors has changed */
4047 	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4048 		     (unsigned long long)n_sectors,
4049 		     (unsigned long long)dev->n_sectors);
4050 
4051 	/*
4052 	 * Something could have caused HPA to be unlocked
4053 	 * involuntarily.  If n_native_sectors hasn't changed and the
4054 	 * new size matches it, keep the device.
4055 	 */
4056 	if (dev->n_native_sectors == n_native_sectors &&
4057 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4058 		ata_dev_warn(dev,
4059 			     "new n_sectors matches native, probably "
4060 			     "late HPA unlock, n_sectors updated\n");
4061 		/* use the larger n_sectors */
4062 		return 0;
4063 	}
4064 
4065 	/*
4066 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4067 	 * unlocking HPA in those cases.
4068 	 *
4069 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4070 	 */
4071 	if (dev->n_native_sectors == n_native_sectors &&
4072 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4073 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4074 		ata_dev_warn(dev,
4075 			     "old n_sectors matches native, probably "
4076 			     "late HPA lock, will try to unlock HPA\n");
4077 		/* try unlocking HPA */
4078 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4079 		rc = -EIO;
4080 	} else
4081 		rc = -ENODEV;
4082 
4083 	/* restore original n_[native_]sectors and fail */
4084 	dev->n_native_sectors = n_native_sectors;
4085 	dev->n_sectors = n_sectors;
4086  fail:
4087 	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4088 	return rc;
4089 }
4090 
4091 struct ata_blacklist_entry {
4092 	const char *model_num;
4093 	const char *model_rev;
4094 	unsigned long horkage;
4095 };
4096 
4097 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4098 	/* Devices with DMA related problems under Linux */
4099 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4100 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4101 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4102 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4103 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4104 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4105 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4106 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4107 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4108 	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4109 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4110 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4111 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4112 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4113 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4114 	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4115 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4116 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4117 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4118 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4119 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4120 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4121 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4122 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4123 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4124 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4125 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4126 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4127 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4128 	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
4129 	/* Odd clown on sil3726/4726 PMPs */
4130 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4131 
4132 	/* Weird ATAPI devices */
4133 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4134 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4135 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4136 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4137 
4138 	/*
4139 	 * Causes silent data corruption with higher max sects.
4140 	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4141 	 */
4142 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4143 
4144 	/* Devices we expect to fail diagnostics */
4145 
4146 	/* Devices where NCQ should be avoided */
4147 	/* NCQ is slow */
4148 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4149 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4150 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4151 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4152 	/* NCQ is broken */
4153 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4154 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4155 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4156 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4157 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4158 
4159 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4160 	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4161 						ATA_HORKAGE_FIRMWARE_WARN },
4162 
4163 	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4164 						ATA_HORKAGE_FIRMWARE_WARN },
4165 
4166 	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4167 						ATA_HORKAGE_FIRMWARE_WARN },
4168 
4169 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4170 						ATA_HORKAGE_FIRMWARE_WARN },
4171 
4172 	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4173 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4174 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4175 	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4176 
4177 	/* Blacklist entries taken from Silicon Image 3124/3132
4178 	   Windows driver .inf file - also several Linux problem reports */
4179 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4180 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4181 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4182 
4183 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4184 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4185 
4186 	/* devices which puke on READ_NATIVE_MAX */
4187 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4188 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4189 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4190 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4191 
4192 	/* this one allows HPA unlocking but fails IOs on the area */
4193 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4194 
4195 	/* Devices which report 1 sector over size HPA */
4196 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4197 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4198 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4199 
4200 	/* Devices which get the IVB wrong */
4201 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4202 	/* Maybe we should just blacklist TSSTcorp... */
4203 	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4204 
4205 	/* Devices that do not need bridging limits applied */
4206 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4207 	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4208 
4209 	/* Devices which aren't very happy with higher link speeds */
4210 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4211 	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4212 
4213 	/*
4214 	 * Devices which choke on SETXFER.  Applies only if both the
4215 	 * device and controller are SATA.
4216 	 */
4217 	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4218 	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4219 	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4220 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4221 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4222 
4223 	/* devices that don't properly handle queued TRIM commands */
4224 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4225 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4226 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4227 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4228 	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4229 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4231 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4232 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4233 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4234 	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4235 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4236 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4237 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4238 
4239 	/* devices that don't properly handle TRIM commands */
4240 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4241 
4242 	/*
4243 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4244 	 * (Return Zero After Trim) flags in the ATA Command Set are
4245 	 * unreliable in the sense that they only define what happens if
4246 	 * the device successfully executed the DSM TRIM command. TRIM
4247 	 * is only advisory, however, and the device is free to silently
4248 	 * ignore all or parts of the request.
4249 	 *
4250 	 * Whitelist drives that are known to reliably return zeroes
4251 	 * after TRIM.
4252 	 */
4253 
4254 	/*
4255 	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4256 	 * that model before whitelisting all other intel SSDs.
4257 	 */
4258 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4259 
4260 	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4261 	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4262 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4263 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4264 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4265 	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4266 	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4267 
4268 	/*
4269 	 * Some WD SATA-I drives spin up and down erratically when the link
4270 	 * is put into the slumber mode.  We don't have full list of the
4271 	 * affected devices.  Disable LPM if the device matches one of the
4272 	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4273 	 * lost too.
4274 	 *
4275 	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4276 	 */
4277 	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4278 	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4279 	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4280 	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4281 	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4282 	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4283 	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4284 
4285 	/* End Marker */
4286 	{ }
4287 };
4288 
4289 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4290 {
4291 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4292 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4293 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4294 
4295 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4296 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4297 
4298 	while (ad->model_num) {
4299 		if (glob_match(ad->model_num, model_num)) {
4300 			if (ad->model_rev == NULL)
4301 				return ad->horkage;
4302 			if (glob_match(ad->model_rev, model_rev))
4303 				return ad->horkage;
4304 		}
4305 		ad++;
4306 	}
4307 	return 0;
4308 }
4309 
4310 static int ata_dma_blacklisted(const struct ata_device *dev)
4311 {
4312 	/* We don't support polling DMA.
4313 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4314 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4315 	 */
4316 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4317 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4318 		return 1;
4319 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4320 }
4321 
4322 /**
4323  *	ata_is_40wire		-	check drive side detection
4324  *	@dev: device
4325  *
4326  *	Perform drive side detection decoding, allowing for device vendors
4327  *	who can't follow the documentation.
4328  */
4329 
4330 static int ata_is_40wire(struct ata_device *dev)
4331 {
4332 	if (dev->horkage & ATA_HORKAGE_IVB)
4333 		return ata_drive_40wire_relaxed(dev->id);
4334 	return ata_drive_40wire(dev->id);
4335 }
4336 
4337 /**
4338  *	cable_is_40wire		-	40/80/SATA decider
4339  *	@ap: port to consider
4340  *
4341  *	This function encapsulates the policy for speed management
4342  *	in one place. At the moment we don't cache the result but
4343  *	there is a good case for setting ap->cbl to the result when
4344  *	we are called with unknown cables (and figuring out if it
4345  *	impacts hotplug at all).
4346  *
4347  *	Return 1 if the cable appears to be 40 wire.
4348  */
4349 
4350 static int cable_is_40wire(struct ata_port *ap)
4351 {
4352 	struct ata_link *link;
4353 	struct ata_device *dev;
4354 
4355 	/* If the controller thinks we are 40 wire, we are. */
4356 	if (ap->cbl == ATA_CBL_PATA40)
4357 		return 1;
4358 
4359 	/* If the controller thinks we are 80 wire, we are. */
4360 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4361 		return 0;
4362 
4363 	/* If the system is known to be 40 wire short cable (eg
4364 	 * laptop), then we allow 80 wire modes even if the drive
4365 	 * isn't sure.
4366 	 */
4367 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4368 		return 0;
4369 
4370 	/* If the controller doesn't know, we scan.
4371 	 *
4372 	 * Note: We look for all 40 wire detects at this point.  Any
4373 	 *       80 wire detect is taken to be 80 wire cable because
4374 	 * - in many setups only the one drive (slave if present) will
4375 	 *   give a valid detect
4376 	 * - if you have a non detect capable drive you don't want it
4377 	 *   to colour the choice
4378 	 */
4379 	ata_for_each_link(link, ap, EDGE) {
4380 		ata_for_each_dev(dev, link, ENABLED) {
4381 			if (!ata_is_40wire(dev))
4382 				return 0;
4383 		}
4384 	}
4385 	return 1;
4386 }
4387 
4388 /**
4389  *	ata_dev_xfermask - Compute supported xfermask of the given device
4390  *	@dev: Device to compute xfermask for
4391  *
4392  *	Compute supported xfermask of @dev and store it in
4393  *	dev->*_mask.  This function is responsible for applying all
4394  *	known limits including host controller limits, device
4395  *	blacklist, etc...
4396  *
4397  *	LOCKING:
4398  *	None.
4399  */
4400 static void ata_dev_xfermask(struct ata_device *dev)
4401 {
4402 	struct ata_link *link = dev->link;
4403 	struct ata_port *ap = link->ap;
4404 	struct ata_host *host = ap->host;
4405 	unsigned long xfer_mask;
4406 
4407 	/* controller modes available */
4408 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4409 				      ap->mwdma_mask, ap->udma_mask);
4410 
4411 	/* drive modes available */
4412 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4413 				       dev->mwdma_mask, dev->udma_mask);
4414 	xfer_mask &= ata_id_xfermask(dev->id);
4415 
4416 	/*
4417 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4418 	 *	cable
4419 	 */
4420 	if (ata_dev_pair(dev)) {
4421 		/* No PIO5 or PIO6 */
4422 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4423 		/* No MWDMA3 or MWDMA 4 */
4424 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4425 	}
4426 
4427 	if (ata_dma_blacklisted(dev)) {
4428 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4429 		ata_dev_warn(dev,
4430 			     "device is on DMA blacklist, disabling DMA\n");
4431 	}
4432 
4433 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4434 	    host->simplex_claimed && host->simplex_claimed != ap) {
4435 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4436 		ata_dev_warn(dev,
4437 			     "simplex DMA is claimed by other device, disabling DMA\n");
4438 	}
4439 
4440 	if (ap->flags & ATA_FLAG_NO_IORDY)
4441 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4442 
4443 	if (ap->ops->mode_filter)
4444 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4445 
4446 	/* Apply cable rule here.  Don't apply it early because when
4447 	 * we handle hot plug the cable type can itself change.
4448 	 * Check this last so that we know if the transfer rate was
4449 	 * solely limited by the cable.
4450 	 * Unknown or 80 wire cables reported host side are checked
4451 	 * drive side as well. Cases where we know a 40wire cable
4452 	 * is used safely for 80 are not checked here.
4453 	 */
4454 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4455 		/* UDMA/44 or higher would be available */
4456 		if (cable_is_40wire(ap)) {
4457 			ata_dev_warn(dev,
4458 				     "limited to UDMA/33 due to 40-wire cable\n");
4459 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4460 		}
4461 
4462 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4463 			    &dev->mwdma_mask, &dev->udma_mask);
4464 }
4465 
4466 /**
4467  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4468  *	@dev: Device to which command will be sent
4469  *
4470  *	Issue SET FEATURES - XFER MODE command to device @dev
4471  *	on port @ap.
4472  *
4473  *	LOCKING:
4474  *	PCI/etc. bus probe sem.
4475  *
4476  *	RETURNS:
4477  *	0 on success, AC_ERR_* mask otherwise.
4478  */
4479 
4480 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4481 {
4482 	struct ata_taskfile tf;
4483 	unsigned int err_mask;
4484 
4485 	/* set up set-features taskfile */
4486 	DPRINTK("set features - xfer mode\n");
4487 
4488 	/* Some controllers and ATAPI devices show flaky interrupt
4489 	 * behavior after setting xfer mode.  Use polling instead.
4490 	 */
4491 	ata_tf_init(dev, &tf);
4492 	tf.command = ATA_CMD_SET_FEATURES;
4493 	tf.feature = SETFEATURES_XFER;
4494 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4495 	tf.protocol = ATA_PROT_NODATA;
4496 	/* If we are using IORDY we must send the mode setting command */
4497 	if (ata_pio_need_iordy(dev))
4498 		tf.nsect = dev->xfer_mode;
4499 	/* If the device has IORDY and the controller does not - turn it off */
4500  	else if (ata_id_has_iordy(dev->id))
4501 		tf.nsect = 0x01;
4502 	else /* In the ancient relic department - skip all of this */
4503 		return 0;
4504 
4505 	/* On some disks, this command causes spin-up, so we need longer timeout */
4506 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4507 
4508 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4509 	return err_mask;
4510 }
4511 
4512 /**
4513  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4514  *	@dev: Device to which command will be sent
4515  *	@enable: Whether to enable or disable the feature
4516  *	@feature: The sector count represents the feature to set
4517  *
4518  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4519  *	on port @ap with sector count
4520  *
4521  *	LOCKING:
4522  *	PCI/etc. bus probe sem.
4523  *
4524  *	RETURNS:
4525  *	0 on success, AC_ERR_* mask otherwise.
4526  */
4527 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4528 {
4529 	struct ata_taskfile tf;
4530 	unsigned int err_mask;
4531 
4532 	/* set up set-features taskfile */
4533 	DPRINTK("set features - SATA features\n");
4534 
4535 	ata_tf_init(dev, &tf);
4536 	tf.command = ATA_CMD_SET_FEATURES;
4537 	tf.feature = enable;
4538 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4539 	tf.protocol = ATA_PROT_NODATA;
4540 	tf.nsect = feature;
4541 
4542 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4543 
4544 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4545 	return err_mask;
4546 }
4547 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4548 
4549 /**
4550  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4551  *	@dev: Device to which command will be sent
4552  *	@heads: Number of heads (taskfile parameter)
4553  *	@sectors: Number of sectors (taskfile parameter)
4554  *
4555  *	LOCKING:
4556  *	Kernel thread context (may sleep)
4557  *
4558  *	RETURNS:
4559  *	0 on success, AC_ERR_* mask otherwise.
4560  */
4561 static unsigned int ata_dev_init_params(struct ata_device *dev,
4562 					u16 heads, u16 sectors)
4563 {
4564 	struct ata_taskfile tf;
4565 	unsigned int err_mask;
4566 
4567 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4568 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4569 		return AC_ERR_INVALID;
4570 
4571 	/* set up init dev params taskfile */
4572 	DPRINTK("init dev params \n");
4573 
4574 	ata_tf_init(dev, &tf);
4575 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4576 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4577 	tf.protocol = ATA_PROT_NODATA;
4578 	tf.nsect = sectors;
4579 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4580 
4581 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4582 	/* A clean abort indicates an original or just out of spec drive
4583 	   and we should continue as we issue the setup based on the
4584 	   drive reported working geometry */
4585 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4586 		err_mask = 0;
4587 
4588 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4589 	return err_mask;
4590 }
4591 
4592 /**
4593  *	ata_sg_clean - Unmap DMA memory associated with command
4594  *	@qc: Command containing DMA memory to be released
4595  *
4596  *	Unmap all mapped DMA memory associated with this command.
4597  *
4598  *	LOCKING:
4599  *	spin_lock_irqsave(host lock)
4600  */
4601 void ata_sg_clean(struct ata_queued_cmd *qc)
4602 {
4603 	struct ata_port *ap = qc->ap;
4604 	struct scatterlist *sg = qc->sg;
4605 	int dir = qc->dma_dir;
4606 
4607 	WARN_ON_ONCE(sg == NULL);
4608 
4609 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4610 
4611 	if (qc->n_elem)
4612 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4613 
4614 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4615 	qc->sg = NULL;
4616 }
4617 
4618 /**
4619  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4620  *	@qc: Metadata associated with taskfile to check
4621  *
4622  *	Allow low-level driver to filter ATA PACKET commands, returning
4623  *	a status indicating whether or not it is OK to use DMA for the
4624  *	supplied PACKET command.
4625  *
4626  *	LOCKING:
4627  *	spin_lock_irqsave(host lock)
4628  *
4629  *	RETURNS: 0 when ATAPI DMA can be used
4630  *               nonzero otherwise
4631  */
4632 int atapi_check_dma(struct ata_queued_cmd *qc)
4633 {
4634 	struct ata_port *ap = qc->ap;
4635 
4636 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4637 	 * few ATAPI devices choke on such DMA requests.
4638 	 */
4639 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4640 	    unlikely(qc->nbytes & 15))
4641 		return 1;
4642 
4643 	if (ap->ops->check_atapi_dma)
4644 		return ap->ops->check_atapi_dma(qc);
4645 
4646 	return 0;
4647 }
4648 
4649 /**
4650  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4651  *	@qc: ATA command in question
4652  *
4653  *	Non-NCQ commands cannot run with any other command, NCQ or
4654  *	not.  As upper layer only knows the queue depth, we are
4655  *	responsible for maintaining exclusion.  This function checks
4656  *	whether a new command @qc can be issued.
4657  *
4658  *	LOCKING:
4659  *	spin_lock_irqsave(host lock)
4660  *
4661  *	RETURNS:
4662  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4663  */
4664 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4665 {
4666 	struct ata_link *link = qc->dev->link;
4667 
4668 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4669 		if (!ata_tag_valid(link->active_tag))
4670 			return 0;
4671 	} else {
4672 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4673 			return 0;
4674 	}
4675 
4676 	return ATA_DEFER_LINK;
4677 }
4678 
4679 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4680 
4681 /**
4682  *	ata_sg_init - Associate command with scatter-gather table.
4683  *	@qc: Command to be associated
4684  *	@sg: Scatter-gather table.
4685  *	@n_elem: Number of elements in s/g table.
4686  *
4687  *	Initialize the data-related elements of queued_cmd @qc
4688  *	to point to a scatter-gather table @sg, containing @n_elem
4689  *	elements.
4690  *
4691  *	LOCKING:
4692  *	spin_lock_irqsave(host lock)
4693  */
4694 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4695 		 unsigned int n_elem)
4696 {
4697 	qc->sg = sg;
4698 	qc->n_elem = n_elem;
4699 	qc->cursg = qc->sg;
4700 }
4701 
4702 /**
4703  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4704  *	@qc: Command with scatter-gather table to be mapped.
4705  *
4706  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4707  *
4708  *	LOCKING:
4709  *	spin_lock_irqsave(host lock)
4710  *
4711  *	RETURNS:
4712  *	Zero on success, negative on error.
4713  *
4714  */
4715 static int ata_sg_setup(struct ata_queued_cmd *qc)
4716 {
4717 	struct ata_port *ap = qc->ap;
4718 	unsigned int n_elem;
4719 
4720 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4721 
4722 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4723 	if (n_elem < 1)
4724 		return -1;
4725 
4726 	DPRINTK("%d sg elements mapped\n", n_elem);
4727 	qc->orig_n_elem = qc->n_elem;
4728 	qc->n_elem = n_elem;
4729 	qc->flags |= ATA_QCFLAG_DMAMAP;
4730 
4731 	return 0;
4732 }
4733 
4734 /**
4735  *	swap_buf_le16 - swap halves of 16-bit words in place
4736  *	@buf:  Buffer to swap
4737  *	@buf_words:  Number of 16-bit words in buffer.
4738  *
4739  *	Swap halves of 16-bit words if needed to convert from
4740  *	little-endian byte order to native cpu byte order, or
4741  *	vice-versa.
4742  *
4743  *	LOCKING:
4744  *	Inherited from caller.
4745  */
4746 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4747 {
4748 #ifdef __BIG_ENDIAN
4749 	unsigned int i;
4750 
4751 	for (i = 0; i < buf_words; i++)
4752 		buf[i] = le16_to_cpu(buf[i]);
4753 #endif /* __BIG_ENDIAN */
4754 }
4755 
4756 /**
4757  *	ata_qc_new_init - Request an available ATA command, and initialize it
4758  *	@dev: Device from whom we request an available command structure
4759  *	@tag: tag
4760  *
4761  *	LOCKING:
4762  *	None.
4763  */
4764 
4765 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4766 {
4767 	struct ata_port *ap = dev->link->ap;
4768 	struct ata_queued_cmd *qc;
4769 
4770 	/* no command while frozen */
4771 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4772 		return NULL;
4773 
4774 	/* libsas case */
4775 	if (ap->flags & ATA_FLAG_SAS_HOST) {
4776 		tag = ata_sas_allocate_tag(ap);
4777 		if (tag < 0)
4778 			return NULL;
4779 	}
4780 
4781 	qc = __ata_qc_from_tag(ap, tag);
4782 	qc->tag = tag;
4783 	qc->scsicmd = NULL;
4784 	qc->ap = ap;
4785 	qc->dev = dev;
4786 
4787 	ata_qc_reinit(qc);
4788 
4789 	return qc;
4790 }
4791 
4792 /**
4793  *	ata_qc_free - free unused ata_queued_cmd
4794  *	@qc: Command to complete
4795  *
4796  *	Designed to free unused ata_queued_cmd object
4797  *	in case something prevents using it.
4798  *
4799  *	LOCKING:
4800  *	spin_lock_irqsave(host lock)
4801  */
4802 void ata_qc_free(struct ata_queued_cmd *qc)
4803 {
4804 	struct ata_port *ap;
4805 	unsigned int tag;
4806 
4807 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4808 	ap = qc->ap;
4809 
4810 	qc->flags = 0;
4811 	tag = qc->tag;
4812 	if (likely(ata_tag_valid(tag))) {
4813 		qc->tag = ATA_TAG_POISON;
4814 		if (ap->flags & ATA_FLAG_SAS_HOST)
4815 			ata_sas_free_tag(tag, ap);
4816 	}
4817 }
4818 
4819 void __ata_qc_complete(struct ata_queued_cmd *qc)
4820 {
4821 	struct ata_port *ap;
4822 	struct ata_link *link;
4823 
4824 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4825 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4826 	ap = qc->ap;
4827 	link = qc->dev->link;
4828 
4829 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4830 		ata_sg_clean(qc);
4831 
4832 	/* command should be marked inactive atomically with qc completion */
4833 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4834 		link->sactive &= ~(1 << qc->tag);
4835 		if (!link->sactive)
4836 			ap->nr_active_links--;
4837 	} else {
4838 		link->active_tag = ATA_TAG_POISON;
4839 		ap->nr_active_links--;
4840 	}
4841 
4842 	/* clear exclusive status */
4843 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4844 		     ap->excl_link == link))
4845 		ap->excl_link = NULL;
4846 
4847 	/* atapi: mark qc as inactive to prevent the interrupt handler
4848 	 * from completing the command twice later, before the error handler
4849 	 * is called. (when rc != 0 and atapi request sense is needed)
4850 	 */
4851 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4852 	ap->qc_active &= ~(1 << qc->tag);
4853 
4854 	/* call completion callback */
4855 	qc->complete_fn(qc);
4856 }
4857 
4858 static void fill_result_tf(struct ata_queued_cmd *qc)
4859 {
4860 	struct ata_port *ap = qc->ap;
4861 
4862 	qc->result_tf.flags = qc->tf.flags;
4863 	ap->ops->qc_fill_rtf(qc);
4864 }
4865 
4866 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4867 {
4868 	struct ata_device *dev = qc->dev;
4869 
4870 	if (ata_is_nodata(qc->tf.protocol))
4871 		return;
4872 
4873 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4874 		return;
4875 
4876 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4877 }
4878 
4879 /**
4880  *	ata_qc_complete - Complete an active ATA command
4881  *	@qc: Command to complete
4882  *
4883  *	Indicate to the mid and upper layers that an ATA command has
4884  *	completed, with either an ok or not-ok status.
4885  *
4886  *	Refrain from calling this function multiple times when
4887  *	successfully completing multiple NCQ commands.
4888  *	ata_qc_complete_multiple() should be used instead, which will
4889  *	properly update IRQ expect state.
4890  *
4891  *	LOCKING:
4892  *	spin_lock_irqsave(host lock)
4893  */
4894 void ata_qc_complete(struct ata_queued_cmd *qc)
4895 {
4896 	struct ata_port *ap = qc->ap;
4897 
4898 	/* XXX: New EH and old EH use different mechanisms to
4899 	 * synchronize EH with regular execution path.
4900 	 *
4901 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4902 	 * Normal execution path is responsible for not accessing a
4903 	 * failed qc.  libata core enforces the rule by returning NULL
4904 	 * from ata_qc_from_tag() for failed qcs.
4905 	 *
4906 	 * Old EH depends on ata_qc_complete() nullifying completion
4907 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4908 	 * not synchronize with interrupt handler.  Only PIO task is
4909 	 * taken care of.
4910 	 */
4911 	if (ap->ops->error_handler) {
4912 		struct ata_device *dev = qc->dev;
4913 		struct ata_eh_info *ehi = &dev->link->eh_info;
4914 
4915 		if (unlikely(qc->err_mask))
4916 			qc->flags |= ATA_QCFLAG_FAILED;
4917 
4918 		/*
4919 		 * Finish internal commands without any further processing
4920 		 * and always with the result TF filled.
4921 		 */
4922 		if (unlikely(ata_tag_internal(qc->tag))) {
4923 			fill_result_tf(qc);
4924 			trace_ata_qc_complete_internal(qc);
4925 			__ata_qc_complete(qc);
4926 			return;
4927 		}
4928 
4929 		/*
4930 		 * Non-internal qc has failed.  Fill the result TF and
4931 		 * summon EH.
4932 		 */
4933 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4934 			fill_result_tf(qc);
4935 			trace_ata_qc_complete_failed(qc);
4936 			ata_qc_schedule_eh(qc);
4937 			return;
4938 		}
4939 
4940 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4941 
4942 		/* read result TF if requested */
4943 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4944 			fill_result_tf(qc);
4945 
4946 		trace_ata_qc_complete_done(qc);
4947 		/* Some commands need post-processing after successful
4948 		 * completion.
4949 		 */
4950 		switch (qc->tf.command) {
4951 		case ATA_CMD_SET_FEATURES:
4952 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4953 			    qc->tf.feature != SETFEATURES_WC_OFF)
4954 				break;
4955 			/* fall through */
4956 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4957 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4958 			/* revalidate device */
4959 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4960 			ata_port_schedule_eh(ap);
4961 			break;
4962 
4963 		case ATA_CMD_SLEEP:
4964 			dev->flags |= ATA_DFLAG_SLEEPING;
4965 			break;
4966 		}
4967 
4968 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4969 			ata_verify_xfer(qc);
4970 
4971 		__ata_qc_complete(qc);
4972 	} else {
4973 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4974 			return;
4975 
4976 		/* read result TF if failed or requested */
4977 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4978 			fill_result_tf(qc);
4979 
4980 		__ata_qc_complete(qc);
4981 	}
4982 }
4983 
4984 /**
4985  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4986  *	@ap: port in question
4987  *	@qc_active: new qc_active mask
4988  *
4989  *	Complete in-flight commands.  This functions is meant to be
4990  *	called from low-level driver's interrupt routine to complete
4991  *	requests normally.  ap->qc_active and @qc_active is compared
4992  *	and commands are completed accordingly.
4993  *
4994  *	Always use this function when completing multiple NCQ commands
4995  *	from IRQ handlers instead of calling ata_qc_complete()
4996  *	multiple times to keep IRQ expect status properly in sync.
4997  *
4998  *	LOCKING:
4999  *	spin_lock_irqsave(host lock)
5000  *
5001  *	RETURNS:
5002  *	Number of completed commands on success, -errno otherwise.
5003  */
5004 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5005 {
5006 	int nr_done = 0;
5007 	u32 done_mask;
5008 
5009 	done_mask = ap->qc_active ^ qc_active;
5010 
5011 	if (unlikely(done_mask & qc_active)) {
5012 		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5013 			     ap->qc_active, qc_active);
5014 		return -EINVAL;
5015 	}
5016 
5017 	while (done_mask) {
5018 		struct ata_queued_cmd *qc;
5019 		unsigned int tag = __ffs(done_mask);
5020 
5021 		qc = ata_qc_from_tag(ap, tag);
5022 		if (qc) {
5023 			ata_qc_complete(qc);
5024 			nr_done++;
5025 		}
5026 		done_mask &= ~(1 << tag);
5027 	}
5028 
5029 	return nr_done;
5030 }
5031 
5032 /**
5033  *	ata_qc_issue - issue taskfile to device
5034  *	@qc: command to issue to device
5035  *
5036  *	Prepare an ATA command to submission to device.
5037  *	This includes mapping the data into a DMA-able
5038  *	area, filling in the S/G table, and finally
5039  *	writing the taskfile to hardware, starting the command.
5040  *
5041  *	LOCKING:
5042  *	spin_lock_irqsave(host lock)
5043  */
5044 void ata_qc_issue(struct ata_queued_cmd *qc)
5045 {
5046 	struct ata_port *ap = qc->ap;
5047 	struct ata_link *link = qc->dev->link;
5048 	u8 prot = qc->tf.protocol;
5049 
5050 	/* Make sure only one non-NCQ command is outstanding.  The
5051 	 * check is skipped for old EH because it reuses active qc to
5052 	 * request ATAPI sense.
5053 	 */
5054 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5055 
5056 	if (ata_is_ncq(prot)) {
5057 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5058 
5059 		if (!link->sactive)
5060 			ap->nr_active_links++;
5061 		link->sactive |= 1 << qc->tag;
5062 	} else {
5063 		WARN_ON_ONCE(link->sactive);
5064 
5065 		ap->nr_active_links++;
5066 		link->active_tag = qc->tag;
5067 	}
5068 
5069 	qc->flags |= ATA_QCFLAG_ACTIVE;
5070 	ap->qc_active |= 1 << qc->tag;
5071 
5072 	/*
5073 	 * We guarantee to LLDs that they will have at least one
5074 	 * non-zero sg if the command is a data command.
5075 	 */
5076 	if (WARN_ON_ONCE(ata_is_data(prot) &&
5077 			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5078 		goto sys_err;
5079 
5080 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5081 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5082 		if (ata_sg_setup(qc))
5083 			goto sys_err;
5084 
5085 	/* if device is sleeping, schedule reset and abort the link */
5086 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5087 		link->eh_info.action |= ATA_EH_RESET;
5088 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5089 		ata_link_abort(link);
5090 		return;
5091 	}
5092 
5093 	ap->ops->qc_prep(qc);
5094 	trace_ata_qc_issue(qc);
5095 	qc->err_mask |= ap->ops->qc_issue(qc);
5096 	if (unlikely(qc->err_mask))
5097 		goto err;
5098 	return;
5099 
5100 sys_err:
5101 	qc->err_mask |= AC_ERR_SYSTEM;
5102 err:
5103 	ata_qc_complete(qc);
5104 }
5105 
5106 /**
5107  *	sata_scr_valid - test whether SCRs are accessible
5108  *	@link: ATA link to test SCR accessibility for
5109  *
5110  *	Test whether SCRs are accessible for @link.
5111  *
5112  *	LOCKING:
5113  *	None.
5114  *
5115  *	RETURNS:
5116  *	1 if SCRs are accessible, 0 otherwise.
5117  */
5118 int sata_scr_valid(struct ata_link *link)
5119 {
5120 	struct ata_port *ap = link->ap;
5121 
5122 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5123 }
5124 
5125 /**
5126  *	sata_scr_read - read SCR register of the specified port
5127  *	@link: ATA link to read SCR for
5128  *	@reg: SCR to read
5129  *	@val: Place to store read value
5130  *
5131  *	Read SCR register @reg of @link into *@val.  This function is
5132  *	guaranteed to succeed if @link is ap->link, the cable type of
5133  *	the port is SATA and the port implements ->scr_read.
5134  *
5135  *	LOCKING:
5136  *	None if @link is ap->link.  Kernel thread context otherwise.
5137  *
5138  *	RETURNS:
5139  *	0 on success, negative errno on failure.
5140  */
5141 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5142 {
5143 	if (ata_is_host_link(link)) {
5144 		if (sata_scr_valid(link))
5145 			return link->ap->ops->scr_read(link, reg, val);
5146 		return -EOPNOTSUPP;
5147 	}
5148 
5149 	return sata_pmp_scr_read(link, reg, val);
5150 }
5151 
5152 /**
5153  *	sata_scr_write - write SCR register of the specified port
5154  *	@link: ATA link to write SCR for
5155  *	@reg: SCR to write
5156  *	@val: value to write
5157  *
5158  *	Write @val to SCR register @reg of @link.  This function is
5159  *	guaranteed to succeed if @link is ap->link, the cable type of
5160  *	the port is SATA and the port implements ->scr_read.
5161  *
5162  *	LOCKING:
5163  *	None if @link is ap->link.  Kernel thread context otherwise.
5164  *
5165  *	RETURNS:
5166  *	0 on success, negative errno on failure.
5167  */
5168 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5169 {
5170 	if (ata_is_host_link(link)) {
5171 		if (sata_scr_valid(link))
5172 			return link->ap->ops->scr_write(link, reg, val);
5173 		return -EOPNOTSUPP;
5174 	}
5175 
5176 	return sata_pmp_scr_write(link, reg, val);
5177 }
5178 
5179 /**
5180  *	sata_scr_write_flush - write SCR register of the specified port and flush
5181  *	@link: ATA link to write SCR for
5182  *	@reg: SCR to write
5183  *	@val: value to write
5184  *
5185  *	This function is identical to sata_scr_write() except that this
5186  *	function performs flush after writing to the register.
5187  *
5188  *	LOCKING:
5189  *	None if @link is ap->link.  Kernel thread context otherwise.
5190  *
5191  *	RETURNS:
5192  *	0 on success, negative errno on failure.
5193  */
5194 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5195 {
5196 	if (ata_is_host_link(link)) {
5197 		int rc;
5198 
5199 		if (sata_scr_valid(link)) {
5200 			rc = link->ap->ops->scr_write(link, reg, val);
5201 			if (rc == 0)
5202 				rc = link->ap->ops->scr_read(link, reg, &val);
5203 			return rc;
5204 		}
5205 		return -EOPNOTSUPP;
5206 	}
5207 
5208 	return sata_pmp_scr_write(link, reg, val);
5209 }
5210 
5211 /**
5212  *	ata_phys_link_online - test whether the given link is online
5213  *	@link: ATA link to test
5214  *
5215  *	Test whether @link is online.  Note that this function returns
5216  *	0 if online status of @link cannot be obtained, so
5217  *	ata_link_online(link) != !ata_link_offline(link).
5218  *
5219  *	LOCKING:
5220  *	None.
5221  *
5222  *	RETURNS:
5223  *	True if the port online status is available and online.
5224  */
5225 bool ata_phys_link_online(struct ata_link *link)
5226 {
5227 	u32 sstatus;
5228 
5229 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5230 	    ata_sstatus_online(sstatus))
5231 		return true;
5232 	return false;
5233 }
5234 
5235 /**
5236  *	ata_phys_link_offline - test whether the given link is offline
5237  *	@link: ATA link to test
5238  *
5239  *	Test whether @link is offline.  Note that this function
5240  *	returns 0 if offline status of @link cannot be obtained, so
5241  *	ata_link_online(link) != !ata_link_offline(link).
5242  *
5243  *	LOCKING:
5244  *	None.
5245  *
5246  *	RETURNS:
5247  *	True if the port offline status is available and offline.
5248  */
5249 bool ata_phys_link_offline(struct ata_link *link)
5250 {
5251 	u32 sstatus;
5252 
5253 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5254 	    !ata_sstatus_online(sstatus))
5255 		return true;
5256 	return false;
5257 }
5258 
5259 /**
5260  *	ata_link_online - test whether the given link is online
5261  *	@link: ATA link to test
5262  *
5263  *	Test whether @link is online.  This is identical to
5264  *	ata_phys_link_online() when there's no slave link.  When
5265  *	there's a slave link, this function should only be called on
5266  *	the master link and will return true if any of M/S links is
5267  *	online.
5268  *
5269  *	LOCKING:
5270  *	None.
5271  *
5272  *	RETURNS:
5273  *	True if the port online status is available and online.
5274  */
5275 bool ata_link_online(struct ata_link *link)
5276 {
5277 	struct ata_link *slave = link->ap->slave_link;
5278 
5279 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5280 
5281 	return ata_phys_link_online(link) ||
5282 		(slave && ata_phys_link_online(slave));
5283 }
5284 
5285 /**
5286  *	ata_link_offline - test whether the given link is offline
5287  *	@link: ATA link to test
5288  *
5289  *	Test whether @link is offline.  This is identical to
5290  *	ata_phys_link_offline() when there's no slave link.  When
5291  *	there's a slave link, this function should only be called on
5292  *	the master link and will return true if both M/S links are
5293  *	offline.
5294  *
5295  *	LOCKING:
5296  *	None.
5297  *
5298  *	RETURNS:
5299  *	True if the port offline status is available and offline.
5300  */
5301 bool ata_link_offline(struct ata_link *link)
5302 {
5303 	struct ata_link *slave = link->ap->slave_link;
5304 
5305 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5306 
5307 	return ata_phys_link_offline(link) &&
5308 		(!slave || ata_phys_link_offline(slave));
5309 }
5310 
5311 #ifdef CONFIG_PM
5312 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5313 				unsigned int action, unsigned int ehi_flags,
5314 				bool async)
5315 {
5316 	struct ata_link *link;
5317 	unsigned long flags;
5318 
5319 	/* Previous resume operation might still be in
5320 	 * progress.  Wait for PM_PENDING to clear.
5321 	 */
5322 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5323 		ata_port_wait_eh(ap);
5324 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5325 	}
5326 
5327 	/* request PM ops to EH */
5328 	spin_lock_irqsave(ap->lock, flags);
5329 
5330 	ap->pm_mesg = mesg;
5331 	ap->pflags |= ATA_PFLAG_PM_PENDING;
5332 	ata_for_each_link(link, ap, HOST_FIRST) {
5333 		link->eh_info.action |= action;
5334 		link->eh_info.flags |= ehi_flags;
5335 	}
5336 
5337 	ata_port_schedule_eh(ap);
5338 
5339 	spin_unlock_irqrestore(ap->lock, flags);
5340 
5341 	if (!async) {
5342 		ata_port_wait_eh(ap);
5343 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5344 	}
5345 }
5346 
5347 /*
5348  * On some hardware, device fails to respond after spun down for suspend.  As
5349  * the device won't be used before being resumed, we don't need to touch the
5350  * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5351  *
5352  * http://thread.gmane.org/gmane.linux.ide/46764
5353  */
5354 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5355 						 | ATA_EHI_NO_AUTOPSY
5356 						 | ATA_EHI_NO_RECOVERY;
5357 
5358 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5359 {
5360 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5361 }
5362 
5363 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5364 {
5365 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5366 }
5367 
5368 static int ata_port_pm_suspend(struct device *dev)
5369 {
5370 	struct ata_port *ap = to_ata_port(dev);
5371 
5372 	if (pm_runtime_suspended(dev))
5373 		return 0;
5374 
5375 	ata_port_suspend(ap, PMSG_SUSPEND);
5376 	return 0;
5377 }
5378 
5379 static int ata_port_pm_freeze(struct device *dev)
5380 {
5381 	struct ata_port *ap = to_ata_port(dev);
5382 
5383 	if (pm_runtime_suspended(dev))
5384 		return 0;
5385 
5386 	ata_port_suspend(ap, PMSG_FREEZE);
5387 	return 0;
5388 }
5389 
5390 static int ata_port_pm_poweroff(struct device *dev)
5391 {
5392 	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5393 	return 0;
5394 }
5395 
5396 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5397 						| ATA_EHI_QUIET;
5398 
5399 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5400 {
5401 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5402 }
5403 
5404 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5405 {
5406 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5407 }
5408 
5409 static int ata_port_pm_resume(struct device *dev)
5410 {
5411 	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5412 	pm_runtime_disable(dev);
5413 	pm_runtime_set_active(dev);
5414 	pm_runtime_enable(dev);
5415 	return 0;
5416 }
5417 
5418 /*
5419  * For ODDs, the upper layer will poll for media change every few seconds,
5420  * which will make it enter and leave suspend state every few seconds. And
5421  * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5422  * is very little and the ODD may malfunction after constantly being reset.
5423  * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5424  * ODD is attached to the port.
5425  */
5426 static int ata_port_runtime_idle(struct device *dev)
5427 {
5428 	struct ata_port *ap = to_ata_port(dev);
5429 	struct ata_link *link;
5430 	struct ata_device *adev;
5431 
5432 	ata_for_each_link(link, ap, HOST_FIRST) {
5433 		ata_for_each_dev(adev, link, ENABLED)
5434 			if (adev->class == ATA_DEV_ATAPI &&
5435 			    !zpodd_dev_enabled(adev))
5436 				return -EBUSY;
5437 	}
5438 
5439 	return 0;
5440 }
5441 
5442 static int ata_port_runtime_suspend(struct device *dev)
5443 {
5444 	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5445 	return 0;
5446 }
5447 
5448 static int ata_port_runtime_resume(struct device *dev)
5449 {
5450 	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5451 	return 0;
5452 }
5453 
5454 static const struct dev_pm_ops ata_port_pm_ops = {
5455 	.suspend = ata_port_pm_suspend,
5456 	.resume = ata_port_pm_resume,
5457 	.freeze = ata_port_pm_freeze,
5458 	.thaw = ata_port_pm_resume,
5459 	.poweroff = ata_port_pm_poweroff,
5460 	.restore = ata_port_pm_resume,
5461 
5462 	.runtime_suspend = ata_port_runtime_suspend,
5463 	.runtime_resume = ata_port_runtime_resume,
5464 	.runtime_idle = ata_port_runtime_idle,
5465 };
5466 
5467 /* sas ports don't participate in pm runtime management of ata_ports,
5468  * and need to resume ata devices at the domain level, not the per-port
5469  * level. sas suspend/resume is async to allow parallel port recovery
5470  * since sas has multiple ata_port instances per Scsi_Host.
5471  */
5472 void ata_sas_port_suspend(struct ata_port *ap)
5473 {
5474 	ata_port_suspend_async(ap, PMSG_SUSPEND);
5475 }
5476 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5477 
5478 void ata_sas_port_resume(struct ata_port *ap)
5479 {
5480 	ata_port_resume_async(ap, PMSG_RESUME);
5481 }
5482 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5483 
5484 /**
5485  *	ata_host_suspend - suspend host
5486  *	@host: host to suspend
5487  *	@mesg: PM message
5488  *
5489  *	Suspend @host.  Actual operation is performed by port suspend.
5490  */
5491 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5492 {
5493 	host->dev->power.power_state = mesg;
5494 	return 0;
5495 }
5496 
5497 /**
5498  *	ata_host_resume - resume host
5499  *	@host: host to resume
5500  *
5501  *	Resume @host.  Actual operation is performed by port resume.
5502  */
5503 void ata_host_resume(struct ata_host *host)
5504 {
5505 	host->dev->power.power_state = PMSG_ON;
5506 }
5507 #endif
5508 
5509 struct device_type ata_port_type = {
5510 	.name = "ata_port",
5511 #ifdef CONFIG_PM
5512 	.pm = &ata_port_pm_ops,
5513 #endif
5514 };
5515 
5516 /**
5517  *	ata_dev_init - Initialize an ata_device structure
5518  *	@dev: Device structure to initialize
5519  *
5520  *	Initialize @dev in preparation for probing.
5521  *
5522  *	LOCKING:
5523  *	Inherited from caller.
5524  */
5525 void ata_dev_init(struct ata_device *dev)
5526 {
5527 	struct ata_link *link = ata_dev_phys_link(dev);
5528 	struct ata_port *ap = link->ap;
5529 	unsigned long flags;
5530 
5531 	/* SATA spd limit is bound to the attached device, reset together */
5532 	link->sata_spd_limit = link->hw_sata_spd_limit;
5533 	link->sata_spd = 0;
5534 
5535 	/* High bits of dev->flags are used to record warm plug
5536 	 * requests which occur asynchronously.  Synchronize using
5537 	 * host lock.
5538 	 */
5539 	spin_lock_irqsave(ap->lock, flags);
5540 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5541 	dev->horkage = 0;
5542 	spin_unlock_irqrestore(ap->lock, flags);
5543 
5544 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5545 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5546 	dev->pio_mask = UINT_MAX;
5547 	dev->mwdma_mask = UINT_MAX;
5548 	dev->udma_mask = UINT_MAX;
5549 }
5550 
5551 /**
5552  *	ata_link_init - Initialize an ata_link structure
5553  *	@ap: ATA port link is attached to
5554  *	@link: Link structure to initialize
5555  *	@pmp: Port multiplier port number
5556  *
5557  *	Initialize @link.
5558  *
5559  *	LOCKING:
5560  *	Kernel thread context (may sleep)
5561  */
5562 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5563 {
5564 	int i;
5565 
5566 	/* clear everything except for devices */
5567 	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5568 	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5569 
5570 	link->ap = ap;
5571 	link->pmp = pmp;
5572 	link->active_tag = ATA_TAG_POISON;
5573 	link->hw_sata_spd_limit = UINT_MAX;
5574 
5575 	/* can't use iterator, ap isn't initialized yet */
5576 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5577 		struct ata_device *dev = &link->device[i];
5578 
5579 		dev->link = link;
5580 		dev->devno = dev - link->device;
5581 #ifdef CONFIG_ATA_ACPI
5582 		dev->gtf_filter = ata_acpi_gtf_filter;
5583 #endif
5584 		ata_dev_init(dev);
5585 	}
5586 }
5587 
5588 /**
5589  *	sata_link_init_spd - Initialize link->sata_spd_limit
5590  *	@link: Link to configure sata_spd_limit for
5591  *
5592  *	Initialize @link->[hw_]sata_spd_limit to the currently
5593  *	configured value.
5594  *
5595  *	LOCKING:
5596  *	Kernel thread context (may sleep).
5597  *
5598  *	RETURNS:
5599  *	0 on success, -errno on failure.
5600  */
5601 int sata_link_init_spd(struct ata_link *link)
5602 {
5603 	u8 spd;
5604 	int rc;
5605 
5606 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5607 	if (rc)
5608 		return rc;
5609 
5610 	spd = (link->saved_scontrol >> 4) & 0xf;
5611 	if (spd)
5612 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5613 
5614 	ata_force_link_limits(link);
5615 
5616 	link->sata_spd_limit = link->hw_sata_spd_limit;
5617 
5618 	return 0;
5619 }
5620 
5621 /**
5622  *	ata_port_alloc - allocate and initialize basic ATA port resources
5623  *	@host: ATA host this allocated port belongs to
5624  *
5625  *	Allocate and initialize basic ATA port resources.
5626  *
5627  *	RETURNS:
5628  *	Allocate ATA port on success, NULL on failure.
5629  *
5630  *	LOCKING:
5631  *	Inherited from calling layer (may sleep).
5632  */
5633 struct ata_port *ata_port_alloc(struct ata_host *host)
5634 {
5635 	struct ata_port *ap;
5636 
5637 	DPRINTK("ENTER\n");
5638 
5639 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5640 	if (!ap)
5641 		return NULL;
5642 
5643 	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5644 	ap->lock = &host->lock;
5645 	ap->print_id = -1;
5646 	ap->local_port_no = -1;
5647 	ap->host = host;
5648 	ap->dev = host->dev;
5649 
5650 #if defined(ATA_VERBOSE_DEBUG)
5651 	/* turn on all debugging levels */
5652 	ap->msg_enable = 0x00FF;
5653 #elif defined(ATA_DEBUG)
5654 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5655 #else
5656 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5657 #endif
5658 
5659 	mutex_init(&ap->scsi_scan_mutex);
5660 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5661 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5662 	INIT_LIST_HEAD(&ap->eh_done_q);
5663 	init_waitqueue_head(&ap->eh_wait_q);
5664 	init_completion(&ap->park_req_pending);
5665 	init_timer_deferrable(&ap->fastdrain_timer);
5666 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5667 	ap->fastdrain_timer.data = (unsigned long)ap;
5668 
5669 	ap->cbl = ATA_CBL_NONE;
5670 
5671 	ata_link_init(ap, &ap->link, 0);
5672 
5673 #ifdef ATA_IRQ_TRAP
5674 	ap->stats.unhandled_irq = 1;
5675 	ap->stats.idle_irq = 1;
5676 #endif
5677 	ata_sff_port_init(ap);
5678 
5679 	return ap;
5680 }
5681 
5682 static void ata_host_release(struct device *gendev, void *res)
5683 {
5684 	struct ata_host *host = dev_get_drvdata(gendev);
5685 	int i;
5686 
5687 	for (i = 0; i < host->n_ports; i++) {
5688 		struct ata_port *ap = host->ports[i];
5689 
5690 		if (!ap)
5691 			continue;
5692 
5693 		if (ap->scsi_host)
5694 			scsi_host_put(ap->scsi_host);
5695 
5696 		kfree(ap->pmp_link);
5697 		kfree(ap->slave_link);
5698 		kfree(ap);
5699 		host->ports[i] = NULL;
5700 	}
5701 
5702 	dev_set_drvdata(gendev, NULL);
5703 }
5704 
5705 /**
5706  *	ata_host_alloc - allocate and init basic ATA host resources
5707  *	@dev: generic device this host is associated with
5708  *	@max_ports: maximum number of ATA ports associated with this host
5709  *
5710  *	Allocate and initialize basic ATA host resources.  LLD calls
5711  *	this function to allocate a host, initializes it fully and
5712  *	attaches it using ata_host_register().
5713  *
5714  *	@max_ports ports are allocated and host->n_ports is
5715  *	initialized to @max_ports.  The caller is allowed to decrease
5716  *	host->n_ports before calling ata_host_register().  The unused
5717  *	ports will be automatically freed on registration.
5718  *
5719  *	RETURNS:
5720  *	Allocate ATA host on success, NULL on failure.
5721  *
5722  *	LOCKING:
5723  *	Inherited from calling layer (may sleep).
5724  */
5725 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5726 {
5727 	struct ata_host *host;
5728 	size_t sz;
5729 	int i;
5730 
5731 	DPRINTK("ENTER\n");
5732 
5733 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5734 		return NULL;
5735 
5736 	/* alloc a container for our list of ATA ports (buses) */
5737 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5738 	/* alloc a container for our list of ATA ports (buses) */
5739 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5740 	if (!host)
5741 		goto err_out;
5742 
5743 	devres_add(dev, host);
5744 	dev_set_drvdata(dev, host);
5745 
5746 	spin_lock_init(&host->lock);
5747 	mutex_init(&host->eh_mutex);
5748 	host->dev = dev;
5749 	host->n_ports = max_ports;
5750 
5751 	/* allocate ports bound to this host */
5752 	for (i = 0; i < max_ports; i++) {
5753 		struct ata_port *ap;
5754 
5755 		ap = ata_port_alloc(host);
5756 		if (!ap)
5757 			goto err_out;
5758 
5759 		ap->port_no = i;
5760 		host->ports[i] = ap;
5761 	}
5762 
5763 	devres_remove_group(dev, NULL);
5764 	return host;
5765 
5766  err_out:
5767 	devres_release_group(dev, NULL);
5768 	return NULL;
5769 }
5770 
5771 /**
5772  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5773  *	@dev: generic device this host is associated with
5774  *	@ppi: array of ATA port_info to initialize host with
5775  *	@n_ports: number of ATA ports attached to this host
5776  *
5777  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5778  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5779  *	last entry will be used for the remaining ports.
5780  *
5781  *	RETURNS:
5782  *	Allocate ATA host on success, NULL on failure.
5783  *
5784  *	LOCKING:
5785  *	Inherited from calling layer (may sleep).
5786  */
5787 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5788 				      const struct ata_port_info * const * ppi,
5789 				      int n_ports)
5790 {
5791 	const struct ata_port_info *pi;
5792 	struct ata_host *host;
5793 	int i, j;
5794 
5795 	host = ata_host_alloc(dev, n_ports);
5796 	if (!host)
5797 		return NULL;
5798 
5799 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5800 		struct ata_port *ap = host->ports[i];
5801 
5802 		if (ppi[j])
5803 			pi = ppi[j++];
5804 
5805 		ap->pio_mask = pi->pio_mask;
5806 		ap->mwdma_mask = pi->mwdma_mask;
5807 		ap->udma_mask = pi->udma_mask;
5808 		ap->flags |= pi->flags;
5809 		ap->link.flags |= pi->link_flags;
5810 		ap->ops = pi->port_ops;
5811 
5812 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5813 			host->ops = pi->port_ops;
5814 	}
5815 
5816 	return host;
5817 }
5818 
5819 /**
5820  *	ata_slave_link_init - initialize slave link
5821  *	@ap: port to initialize slave link for
5822  *
5823  *	Create and initialize slave link for @ap.  This enables slave
5824  *	link handling on the port.
5825  *
5826  *	In libata, a port contains links and a link contains devices.
5827  *	There is single host link but if a PMP is attached to it,
5828  *	there can be multiple fan-out links.  On SATA, there's usually
5829  *	a single device connected to a link but PATA and SATA
5830  *	controllers emulating TF based interface can have two - master
5831  *	and slave.
5832  *
5833  *	However, there are a few controllers which don't fit into this
5834  *	abstraction too well - SATA controllers which emulate TF
5835  *	interface with both master and slave devices but also have
5836  *	separate SCR register sets for each device.  These controllers
5837  *	need separate links for physical link handling
5838  *	(e.g. onlineness, link speed) but should be treated like a
5839  *	traditional M/S controller for everything else (e.g. command
5840  *	issue, softreset).
5841  *
5842  *	slave_link is libata's way of handling this class of
5843  *	controllers without impacting core layer too much.  For
5844  *	anything other than physical link handling, the default host
5845  *	link is used for both master and slave.  For physical link
5846  *	handling, separate @ap->slave_link is used.  All dirty details
5847  *	are implemented inside libata core layer.  From LLD's POV, the
5848  *	only difference is that prereset, hardreset and postreset are
5849  *	called once more for the slave link, so the reset sequence
5850  *	looks like the following.
5851  *
5852  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5853  *	softreset(M) -> postreset(M) -> postreset(S)
5854  *
5855  *	Note that softreset is called only for the master.  Softreset
5856  *	resets both M/S by definition, so SRST on master should handle
5857  *	both (the standard method will work just fine).
5858  *
5859  *	LOCKING:
5860  *	Should be called before host is registered.
5861  *
5862  *	RETURNS:
5863  *	0 on success, -errno on failure.
5864  */
5865 int ata_slave_link_init(struct ata_port *ap)
5866 {
5867 	struct ata_link *link;
5868 
5869 	WARN_ON(ap->slave_link);
5870 	WARN_ON(ap->flags & ATA_FLAG_PMP);
5871 
5872 	link = kzalloc(sizeof(*link), GFP_KERNEL);
5873 	if (!link)
5874 		return -ENOMEM;
5875 
5876 	ata_link_init(ap, link, 1);
5877 	ap->slave_link = link;
5878 	return 0;
5879 }
5880 
5881 static void ata_host_stop(struct device *gendev, void *res)
5882 {
5883 	struct ata_host *host = dev_get_drvdata(gendev);
5884 	int i;
5885 
5886 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5887 
5888 	for (i = 0; i < host->n_ports; i++) {
5889 		struct ata_port *ap = host->ports[i];
5890 
5891 		if (ap->ops->port_stop)
5892 			ap->ops->port_stop(ap);
5893 	}
5894 
5895 	if (host->ops->host_stop)
5896 		host->ops->host_stop(host);
5897 }
5898 
5899 /**
5900  *	ata_finalize_port_ops - finalize ata_port_operations
5901  *	@ops: ata_port_operations to finalize
5902  *
5903  *	An ata_port_operations can inherit from another ops and that
5904  *	ops can again inherit from another.  This can go on as many
5905  *	times as necessary as long as there is no loop in the
5906  *	inheritance chain.
5907  *
5908  *	Ops tables are finalized when the host is started.  NULL or
5909  *	unspecified entries are inherited from the closet ancestor
5910  *	which has the method and the entry is populated with it.
5911  *	After finalization, the ops table directly points to all the
5912  *	methods and ->inherits is no longer necessary and cleared.
5913  *
5914  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5915  *
5916  *	LOCKING:
5917  *	None.
5918  */
5919 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5920 {
5921 	static DEFINE_SPINLOCK(lock);
5922 	const struct ata_port_operations *cur;
5923 	void **begin = (void **)ops;
5924 	void **end = (void **)&ops->inherits;
5925 	void **pp;
5926 
5927 	if (!ops || !ops->inherits)
5928 		return;
5929 
5930 	spin_lock(&lock);
5931 
5932 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5933 		void **inherit = (void **)cur;
5934 
5935 		for (pp = begin; pp < end; pp++, inherit++)
5936 			if (!*pp)
5937 				*pp = *inherit;
5938 	}
5939 
5940 	for (pp = begin; pp < end; pp++)
5941 		if (IS_ERR(*pp))
5942 			*pp = NULL;
5943 
5944 	ops->inherits = NULL;
5945 
5946 	spin_unlock(&lock);
5947 }
5948 
5949 /**
5950  *	ata_host_start - start and freeze ports of an ATA host
5951  *	@host: ATA host to start ports for
5952  *
5953  *	Start and then freeze ports of @host.  Started status is
5954  *	recorded in host->flags, so this function can be called
5955  *	multiple times.  Ports are guaranteed to get started only
5956  *	once.  If host->ops isn't initialized yet, its set to the
5957  *	first non-dummy port ops.
5958  *
5959  *	LOCKING:
5960  *	Inherited from calling layer (may sleep).
5961  *
5962  *	RETURNS:
5963  *	0 if all ports are started successfully, -errno otherwise.
5964  */
5965 int ata_host_start(struct ata_host *host)
5966 {
5967 	int have_stop = 0;
5968 	void *start_dr = NULL;
5969 	int i, rc;
5970 
5971 	if (host->flags & ATA_HOST_STARTED)
5972 		return 0;
5973 
5974 	ata_finalize_port_ops(host->ops);
5975 
5976 	for (i = 0; i < host->n_ports; i++) {
5977 		struct ata_port *ap = host->ports[i];
5978 
5979 		ata_finalize_port_ops(ap->ops);
5980 
5981 		if (!host->ops && !ata_port_is_dummy(ap))
5982 			host->ops = ap->ops;
5983 
5984 		if (ap->ops->port_stop)
5985 			have_stop = 1;
5986 	}
5987 
5988 	if (host->ops->host_stop)
5989 		have_stop = 1;
5990 
5991 	if (have_stop) {
5992 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5993 		if (!start_dr)
5994 			return -ENOMEM;
5995 	}
5996 
5997 	for (i = 0; i < host->n_ports; i++) {
5998 		struct ata_port *ap = host->ports[i];
5999 
6000 		if (ap->ops->port_start) {
6001 			rc = ap->ops->port_start(ap);
6002 			if (rc) {
6003 				if (rc != -ENODEV)
6004 					dev_err(host->dev,
6005 						"failed to start port %d (errno=%d)\n",
6006 						i, rc);
6007 				goto err_out;
6008 			}
6009 		}
6010 		ata_eh_freeze_port(ap);
6011 	}
6012 
6013 	if (start_dr)
6014 		devres_add(host->dev, start_dr);
6015 	host->flags |= ATA_HOST_STARTED;
6016 	return 0;
6017 
6018  err_out:
6019 	while (--i >= 0) {
6020 		struct ata_port *ap = host->ports[i];
6021 
6022 		if (ap->ops->port_stop)
6023 			ap->ops->port_stop(ap);
6024 	}
6025 	devres_free(start_dr);
6026 	return rc;
6027 }
6028 
6029 /**
6030  *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6031  *	@host:	host to initialize
6032  *	@dev:	device host is attached to
6033  *	@ops:	port_ops
6034  *
6035  */
6036 void ata_host_init(struct ata_host *host, struct device *dev,
6037 		   struct ata_port_operations *ops)
6038 {
6039 	spin_lock_init(&host->lock);
6040 	mutex_init(&host->eh_mutex);
6041 	host->n_tags = ATA_MAX_QUEUE - 1;
6042 	host->dev = dev;
6043 	host->ops = ops;
6044 }
6045 
6046 void __ata_port_probe(struct ata_port *ap)
6047 {
6048 	struct ata_eh_info *ehi = &ap->link.eh_info;
6049 	unsigned long flags;
6050 
6051 	/* kick EH for boot probing */
6052 	spin_lock_irqsave(ap->lock, flags);
6053 
6054 	ehi->probe_mask |= ATA_ALL_DEVICES;
6055 	ehi->action |= ATA_EH_RESET;
6056 	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6057 
6058 	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6059 	ap->pflags |= ATA_PFLAG_LOADING;
6060 	ata_port_schedule_eh(ap);
6061 
6062 	spin_unlock_irqrestore(ap->lock, flags);
6063 }
6064 
6065 int ata_port_probe(struct ata_port *ap)
6066 {
6067 	int rc = 0;
6068 
6069 	if (ap->ops->error_handler) {
6070 		__ata_port_probe(ap);
6071 		ata_port_wait_eh(ap);
6072 	} else {
6073 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6074 		rc = ata_bus_probe(ap);
6075 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6076 	}
6077 	return rc;
6078 }
6079 
6080 
6081 static void async_port_probe(void *data, async_cookie_t cookie)
6082 {
6083 	struct ata_port *ap = data;
6084 
6085 	/*
6086 	 * If we're not allowed to scan this host in parallel,
6087 	 * we need to wait until all previous scans have completed
6088 	 * before going further.
6089 	 * Jeff Garzik says this is only within a controller, so we
6090 	 * don't need to wait for port 0, only for later ports.
6091 	 */
6092 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6093 		async_synchronize_cookie(cookie);
6094 
6095 	(void)ata_port_probe(ap);
6096 
6097 	/* in order to keep device order, we need to synchronize at this point */
6098 	async_synchronize_cookie(cookie);
6099 
6100 	ata_scsi_scan_host(ap, 1);
6101 }
6102 
6103 /**
6104  *	ata_host_register - register initialized ATA host
6105  *	@host: ATA host to register
6106  *	@sht: template for SCSI host
6107  *
6108  *	Register initialized ATA host.  @host is allocated using
6109  *	ata_host_alloc() and fully initialized by LLD.  This function
6110  *	starts ports, registers @host with ATA and SCSI layers and
6111  *	probe registered devices.
6112  *
6113  *	LOCKING:
6114  *	Inherited from calling layer (may sleep).
6115  *
6116  *	RETURNS:
6117  *	0 on success, -errno otherwise.
6118  */
6119 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6120 {
6121 	int i, rc;
6122 
6123 	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6124 
6125 	/* host must have been started */
6126 	if (!(host->flags & ATA_HOST_STARTED)) {
6127 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6128 		WARN_ON(1);
6129 		return -EINVAL;
6130 	}
6131 
6132 	/* Blow away unused ports.  This happens when LLD can't
6133 	 * determine the exact number of ports to allocate at
6134 	 * allocation time.
6135 	 */
6136 	for (i = host->n_ports; host->ports[i]; i++)
6137 		kfree(host->ports[i]);
6138 
6139 	/* give ports names and add SCSI hosts */
6140 	for (i = 0; i < host->n_ports; i++) {
6141 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6142 		host->ports[i]->local_port_no = i + 1;
6143 	}
6144 
6145 	/* Create associated sysfs transport objects  */
6146 	for (i = 0; i < host->n_ports; i++) {
6147 		rc = ata_tport_add(host->dev,host->ports[i]);
6148 		if (rc) {
6149 			goto err_tadd;
6150 		}
6151 	}
6152 
6153 	rc = ata_scsi_add_hosts(host, sht);
6154 	if (rc)
6155 		goto err_tadd;
6156 
6157 	/* set cable, sata_spd_limit and report */
6158 	for (i = 0; i < host->n_ports; i++) {
6159 		struct ata_port *ap = host->ports[i];
6160 		unsigned long xfer_mask;
6161 
6162 		/* set SATA cable type if still unset */
6163 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6164 			ap->cbl = ATA_CBL_SATA;
6165 
6166 		/* init sata_spd_limit to the current value */
6167 		sata_link_init_spd(&ap->link);
6168 		if (ap->slave_link)
6169 			sata_link_init_spd(ap->slave_link);
6170 
6171 		/* print per-port info to dmesg */
6172 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6173 					      ap->udma_mask);
6174 
6175 		if (!ata_port_is_dummy(ap)) {
6176 			ata_port_info(ap, "%cATA max %s %s\n",
6177 				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6178 				      ata_mode_string(xfer_mask),
6179 				      ap->link.eh_info.desc);
6180 			ata_ehi_clear_desc(&ap->link.eh_info);
6181 		} else
6182 			ata_port_info(ap, "DUMMY\n");
6183 	}
6184 
6185 	/* perform each probe asynchronously */
6186 	for (i = 0; i < host->n_ports; i++) {
6187 		struct ata_port *ap = host->ports[i];
6188 		async_schedule(async_port_probe, ap);
6189 	}
6190 
6191 	return 0;
6192 
6193  err_tadd:
6194 	while (--i >= 0) {
6195 		ata_tport_delete(host->ports[i]);
6196 	}
6197 	return rc;
6198 
6199 }
6200 
6201 /**
6202  *	ata_host_activate - start host, request IRQ and register it
6203  *	@host: target ATA host
6204  *	@irq: IRQ to request
6205  *	@irq_handler: irq_handler used when requesting IRQ
6206  *	@irq_flags: irq_flags used when requesting IRQ
6207  *	@sht: scsi_host_template to use when registering the host
6208  *
6209  *	After allocating an ATA host and initializing it, most libata
6210  *	LLDs perform three steps to activate the host - start host,
6211  *	request IRQ and register it.  This helper takes necessasry
6212  *	arguments and performs the three steps in one go.
6213  *
6214  *	An invalid IRQ skips the IRQ registration and expects the host to
6215  *	have set polling mode on the port. In this case, @irq_handler
6216  *	should be NULL.
6217  *
6218  *	LOCKING:
6219  *	Inherited from calling layer (may sleep).
6220  *
6221  *	RETURNS:
6222  *	0 on success, -errno otherwise.
6223  */
6224 int ata_host_activate(struct ata_host *host, int irq,
6225 		      irq_handler_t irq_handler, unsigned long irq_flags,
6226 		      struct scsi_host_template *sht)
6227 {
6228 	int i, rc;
6229 	char *irq_desc;
6230 
6231 	rc = ata_host_start(host);
6232 	if (rc)
6233 		return rc;
6234 
6235 	/* Special case for polling mode */
6236 	if (!irq) {
6237 		WARN_ON(irq_handler);
6238 		return ata_host_register(host, sht);
6239 	}
6240 
6241 	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6242 				  dev_driver_string(host->dev),
6243 				  dev_name(host->dev));
6244 	if (!irq_desc)
6245 		return -ENOMEM;
6246 
6247 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6248 			      irq_desc, host);
6249 	if (rc)
6250 		return rc;
6251 
6252 	for (i = 0; i < host->n_ports; i++)
6253 		ata_port_desc(host->ports[i], "irq %d", irq);
6254 
6255 	rc = ata_host_register(host, sht);
6256 	/* if failed, just free the IRQ and leave ports alone */
6257 	if (rc)
6258 		devm_free_irq(host->dev, irq, host);
6259 
6260 	return rc;
6261 }
6262 
6263 /**
6264  *	ata_port_detach - Detach ATA port in prepration of device removal
6265  *	@ap: ATA port to be detached
6266  *
6267  *	Detach all ATA devices and the associated SCSI devices of @ap;
6268  *	then, remove the associated SCSI host.  @ap is guaranteed to
6269  *	be quiescent on return from this function.
6270  *
6271  *	LOCKING:
6272  *	Kernel thread context (may sleep).
6273  */
6274 static void ata_port_detach(struct ata_port *ap)
6275 {
6276 	unsigned long flags;
6277 	struct ata_link *link;
6278 	struct ata_device *dev;
6279 
6280 	if (!ap->ops->error_handler)
6281 		goto skip_eh;
6282 
6283 	/* tell EH we're leaving & flush EH */
6284 	spin_lock_irqsave(ap->lock, flags);
6285 	ap->pflags |= ATA_PFLAG_UNLOADING;
6286 	ata_port_schedule_eh(ap);
6287 	spin_unlock_irqrestore(ap->lock, flags);
6288 
6289 	/* wait till EH commits suicide */
6290 	ata_port_wait_eh(ap);
6291 
6292 	/* it better be dead now */
6293 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6294 
6295 	cancel_delayed_work_sync(&ap->hotplug_task);
6296 
6297  skip_eh:
6298 	/* clean up zpodd on port removal */
6299 	ata_for_each_link(link, ap, HOST_FIRST) {
6300 		ata_for_each_dev(dev, link, ALL) {
6301 			if (zpodd_dev_enabled(dev))
6302 				zpodd_exit(dev);
6303 		}
6304 	}
6305 	if (ap->pmp_link) {
6306 		int i;
6307 		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6308 			ata_tlink_delete(&ap->pmp_link[i]);
6309 	}
6310 	/* remove the associated SCSI host */
6311 	scsi_remove_host(ap->scsi_host);
6312 	ata_tport_delete(ap);
6313 }
6314 
6315 /**
6316  *	ata_host_detach - Detach all ports of an ATA host
6317  *	@host: Host to detach
6318  *
6319  *	Detach all ports of @host.
6320  *
6321  *	LOCKING:
6322  *	Kernel thread context (may sleep).
6323  */
6324 void ata_host_detach(struct ata_host *host)
6325 {
6326 	int i;
6327 
6328 	for (i = 0; i < host->n_ports; i++)
6329 		ata_port_detach(host->ports[i]);
6330 
6331 	/* the host is dead now, dissociate ACPI */
6332 	ata_acpi_dissociate(host);
6333 }
6334 
6335 #ifdef CONFIG_PCI
6336 
6337 /**
6338  *	ata_pci_remove_one - PCI layer callback for device removal
6339  *	@pdev: PCI device that was removed
6340  *
6341  *	PCI layer indicates to libata via this hook that hot-unplug or
6342  *	module unload event has occurred.  Detach all ports.  Resource
6343  *	release is handled via devres.
6344  *
6345  *	LOCKING:
6346  *	Inherited from PCI layer (may sleep).
6347  */
6348 void ata_pci_remove_one(struct pci_dev *pdev)
6349 {
6350 	struct ata_host *host = pci_get_drvdata(pdev);
6351 
6352 	ata_host_detach(host);
6353 }
6354 
6355 /* move to PCI subsystem */
6356 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6357 {
6358 	unsigned long tmp = 0;
6359 
6360 	switch (bits->width) {
6361 	case 1: {
6362 		u8 tmp8 = 0;
6363 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6364 		tmp = tmp8;
6365 		break;
6366 	}
6367 	case 2: {
6368 		u16 tmp16 = 0;
6369 		pci_read_config_word(pdev, bits->reg, &tmp16);
6370 		tmp = tmp16;
6371 		break;
6372 	}
6373 	case 4: {
6374 		u32 tmp32 = 0;
6375 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6376 		tmp = tmp32;
6377 		break;
6378 	}
6379 
6380 	default:
6381 		return -EINVAL;
6382 	}
6383 
6384 	tmp &= bits->mask;
6385 
6386 	return (tmp == bits->val) ? 1 : 0;
6387 }
6388 
6389 #ifdef CONFIG_PM
6390 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6391 {
6392 	pci_save_state(pdev);
6393 	pci_disable_device(pdev);
6394 
6395 	if (mesg.event & PM_EVENT_SLEEP)
6396 		pci_set_power_state(pdev, PCI_D3hot);
6397 }
6398 
6399 int ata_pci_device_do_resume(struct pci_dev *pdev)
6400 {
6401 	int rc;
6402 
6403 	pci_set_power_state(pdev, PCI_D0);
6404 	pci_restore_state(pdev);
6405 
6406 	rc = pcim_enable_device(pdev);
6407 	if (rc) {
6408 		dev_err(&pdev->dev,
6409 			"failed to enable device after resume (%d)\n", rc);
6410 		return rc;
6411 	}
6412 
6413 	pci_set_master(pdev);
6414 	return 0;
6415 }
6416 
6417 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6418 {
6419 	struct ata_host *host = pci_get_drvdata(pdev);
6420 	int rc = 0;
6421 
6422 	rc = ata_host_suspend(host, mesg);
6423 	if (rc)
6424 		return rc;
6425 
6426 	ata_pci_device_do_suspend(pdev, mesg);
6427 
6428 	return 0;
6429 }
6430 
6431 int ata_pci_device_resume(struct pci_dev *pdev)
6432 {
6433 	struct ata_host *host = pci_get_drvdata(pdev);
6434 	int rc;
6435 
6436 	rc = ata_pci_device_do_resume(pdev);
6437 	if (rc == 0)
6438 		ata_host_resume(host);
6439 	return rc;
6440 }
6441 #endif /* CONFIG_PM */
6442 
6443 #endif /* CONFIG_PCI */
6444 
6445 /**
6446  *	ata_platform_remove_one - Platform layer callback for device removal
6447  *	@pdev: Platform device that was removed
6448  *
6449  *	Platform layer indicates to libata via this hook that hot-unplug or
6450  *	module unload event has occurred.  Detach all ports.  Resource
6451  *	release is handled via devres.
6452  *
6453  *	LOCKING:
6454  *	Inherited from platform layer (may sleep).
6455  */
6456 int ata_platform_remove_one(struct platform_device *pdev)
6457 {
6458 	struct ata_host *host = platform_get_drvdata(pdev);
6459 
6460 	ata_host_detach(host);
6461 
6462 	return 0;
6463 }
6464 
6465 static int __init ata_parse_force_one(char **cur,
6466 				      struct ata_force_ent *force_ent,
6467 				      const char **reason)
6468 {
6469 	static const struct ata_force_param force_tbl[] __initconst = {
6470 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6471 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6472 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6473 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6474 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6475 		{ "sata",	.cbl		= ATA_CBL_SATA },
6476 		{ "1.5Gbps",	.spd_limit	= 1 },
6477 		{ "3.0Gbps",	.spd_limit	= 2 },
6478 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6479 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6480 		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
6481 		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
6482 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6483 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6484 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6485 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6486 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6487 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6488 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6489 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6490 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6491 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6492 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6493 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6494 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6495 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6496 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6497 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6498 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6499 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6500 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6501 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6502 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6503 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6504 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6505 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6506 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6507 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6508 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6509 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6510 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6511 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6512 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6513 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6514 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6515 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6516 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6517 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6518 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6519 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6520 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6521 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6522 		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6523 	};
6524 	char *start = *cur, *p = *cur;
6525 	char *id, *val, *endp;
6526 	const struct ata_force_param *match_fp = NULL;
6527 	int nr_matches = 0, i;
6528 
6529 	/* find where this param ends and update *cur */
6530 	while (*p != '\0' && *p != ',')
6531 		p++;
6532 
6533 	if (*p == '\0')
6534 		*cur = p;
6535 	else
6536 		*cur = p + 1;
6537 
6538 	*p = '\0';
6539 
6540 	/* parse */
6541 	p = strchr(start, ':');
6542 	if (!p) {
6543 		val = strstrip(start);
6544 		goto parse_val;
6545 	}
6546 	*p = '\0';
6547 
6548 	id = strstrip(start);
6549 	val = strstrip(p + 1);
6550 
6551 	/* parse id */
6552 	p = strchr(id, '.');
6553 	if (p) {
6554 		*p++ = '\0';
6555 		force_ent->device = simple_strtoul(p, &endp, 10);
6556 		if (p == endp || *endp != '\0') {
6557 			*reason = "invalid device";
6558 			return -EINVAL;
6559 		}
6560 	}
6561 
6562 	force_ent->port = simple_strtoul(id, &endp, 10);
6563 	if (p == endp || *endp != '\0') {
6564 		*reason = "invalid port/link";
6565 		return -EINVAL;
6566 	}
6567 
6568  parse_val:
6569 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6570 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6571 		const struct ata_force_param *fp = &force_tbl[i];
6572 
6573 		if (strncasecmp(val, fp->name, strlen(val)))
6574 			continue;
6575 
6576 		nr_matches++;
6577 		match_fp = fp;
6578 
6579 		if (strcasecmp(val, fp->name) == 0) {
6580 			nr_matches = 1;
6581 			break;
6582 		}
6583 	}
6584 
6585 	if (!nr_matches) {
6586 		*reason = "unknown value";
6587 		return -EINVAL;
6588 	}
6589 	if (nr_matches > 1) {
6590 		*reason = "ambigious value";
6591 		return -EINVAL;
6592 	}
6593 
6594 	force_ent->param = *match_fp;
6595 
6596 	return 0;
6597 }
6598 
6599 static void __init ata_parse_force_param(void)
6600 {
6601 	int idx = 0, size = 1;
6602 	int last_port = -1, last_device = -1;
6603 	char *p, *cur, *next;
6604 
6605 	/* calculate maximum number of params and allocate force_tbl */
6606 	for (p = ata_force_param_buf; *p; p++)
6607 		if (*p == ',')
6608 			size++;
6609 
6610 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6611 	if (!ata_force_tbl) {
6612 		printk(KERN_WARNING "ata: failed to extend force table, "
6613 		       "libata.force ignored\n");
6614 		return;
6615 	}
6616 
6617 	/* parse and populate the table */
6618 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6619 		const char *reason = "";
6620 		struct ata_force_ent te = { .port = -1, .device = -1 };
6621 
6622 		next = cur;
6623 		if (ata_parse_force_one(&next, &te, &reason)) {
6624 			printk(KERN_WARNING "ata: failed to parse force "
6625 			       "parameter \"%s\" (%s)\n",
6626 			       cur, reason);
6627 			continue;
6628 		}
6629 
6630 		if (te.port == -1) {
6631 			te.port = last_port;
6632 			te.device = last_device;
6633 		}
6634 
6635 		ata_force_tbl[idx++] = te;
6636 
6637 		last_port = te.port;
6638 		last_device = te.device;
6639 	}
6640 
6641 	ata_force_tbl_size = idx;
6642 }
6643 
6644 static int __init ata_init(void)
6645 {
6646 	int rc;
6647 
6648 	ata_parse_force_param();
6649 
6650 	rc = ata_sff_init();
6651 	if (rc) {
6652 		kfree(ata_force_tbl);
6653 		return rc;
6654 	}
6655 
6656 	libata_transport_init();
6657 	ata_scsi_transport_template = ata_attach_transport();
6658 	if (!ata_scsi_transport_template) {
6659 		ata_sff_exit();
6660 		rc = -ENOMEM;
6661 		goto err_out;
6662 	}
6663 
6664 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6665 	return 0;
6666 
6667 err_out:
6668 	return rc;
6669 }
6670 
6671 static void __exit ata_exit(void)
6672 {
6673 	ata_release_transport(ata_scsi_transport_template);
6674 	libata_transport_exit();
6675 	ata_sff_exit();
6676 	kfree(ata_force_tbl);
6677 }
6678 
6679 subsys_initcall(ata_init);
6680 module_exit(ata_exit);
6681 
6682 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6683 
6684 int ata_ratelimit(void)
6685 {
6686 	return __ratelimit(&ratelimit);
6687 }
6688 
6689 /**
6690  *	ata_msleep - ATA EH owner aware msleep
6691  *	@ap: ATA port to attribute the sleep to
6692  *	@msecs: duration to sleep in milliseconds
6693  *
6694  *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6695  *	ownership is released before going to sleep and reacquired
6696  *	after the sleep is complete.  IOW, other ports sharing the
6697  *	@ap->host will be allowed to own the EH while this task is
6698  *	sleeping.
6699  *
6700  *	LOCKING:
6701  *	Might sleep.
6702  */
6703 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6704 {
6705 	bool owns_eh = ap && ap->host->eh_owner == current;
6706 
6707 	if (owns_eh)
6708 		ata_eh_release(ap);
6709 
6710 	if (msecs < 20) {
6711 		unsigned long usecs = msecs * USEC_PER_MSEC;
6712 		usleep_range(usecs, usecs + 50);
6713 	} else {
6714 		msleep(msecs);
6715 	}
6716 
6717 	if (owns_eh)
6718 		ata_eh_acquire(ap);
6719 }
6720 
6721 /**
6722  *	ata_wait_register - wait until register value changes
6723  *	@ap: ATA port to wait register for, can be NULL
6724  *	@reg: IO-mapped register
6725  *	@mask: Mask to apply to read register value
6726  *	@val: Wait condition
6727  *	@interval: polling interval in milliseconds
6728  *	@timeout: timeout in milliseconds
6729  *
6730  *	Waiting for some bits of register to change is a common
6731  *	operation for ATA controllers.  This function reads 32bit LE
6732  *	IO-mapped register @reg and tests for the following condition.
6733  *
6734  *	(*@reg & mask) != val
6735  *
6736  *	If the condition is met, it returns; otherwise, the process is
6737  *	repeated after @interval_msec until timeout.
6738  *
6739  *	LOCKING:
6740  *	Kernel thread context (may sleep)
6741  *
6742  *	RETURNS:
6743  *	The final register value.
6744  */
6745 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6746 		      unsigned long interval, unsigned long timeout)
6747 {
6748 	unsigned long deadline;
6749 	u32 tmp;
6750 
6751 	tmp = ioread32(reg);
6752 
6753 	/* Calculate timeout _after_ the first read to make sure
6754 	 * preceding writes reach the controller before starting to
6755 	 * eat away the timeout.
6756 	 */
6757 	deadline = ata_deadline(jiffies, timeout);
6758 
6759 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6760 		ata_msleep(ap, interval);
6761 		tmp = ioread32(reg);
6762 	}
6763 
6764 	return tmp;
6765 }
6766 
6767 /**
6768  *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
6769  *	@link: Link receiving the event
6770  *
6771  *	Test whether the received PHY event has to be ignored or not.
6772  *
6773  *	LOCKING:
6774  *	None:
6775  *
6776  *	RETURNS:
6777  *	True if the event has to be ignored.
6778  */
6779 bool sata_lpm_ignore_phy_events(struct ata_link *link)
6780 {
6781 	unsigned long lpm_timeout = link->last_lpm_change +
6782 				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6783 
6784 	/* if LPM is enabled, PHYRDY doesn't mean anything */
6785 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
6786 		return true;
6787 
6788 	/* ignore the first PHY event after the LPM policy changed
6789 	 * as it is might be spurious
6790 	 */
6791 	if ((link->flags & ATA_LFLAG_CHANGED) &&
6792 	    time_before(jiffies, lpm_timeout))
6793 		return true;
6794 
6795 	return false;
6796 }
6797 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
6798 
6799 /*
6800  * Dummy port_ops
6801  */
6802 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6803 {
6804 	return AC_ERR_SYSTEM;
6805 }
6806 
6807 static void ata_dummy_error_handler(struct ata_port *ap)
6808 {
6809 	/* truly dummy */
6810 }
6811 
6812 struct ata_port_operations ata_dummy_port_ops = {
6813 	.qc_prep		= ata_noop_qc_prep,
6814 	.qc_issue		= ata_dummy_qc_issue,
6815 	.error_handler		= ata_dummy_error_handler,
6816 	.sched_eh		= ata_std_sched_eh,
6817 	.end_eh			= ata_std_end_eh,
6818 };
6819 
6820 const struct ata_port_info ata_dummy_port_info = {
6821 	.port_ops		= &ata_dummy_port_ops,
6822 };
6823 
6824 /*
6825  * Utility print functions
6826  */
6827 void ata_port_printk(const struct ata_port *ap, const char *level,
6828 		     const char *fmt, ...)
6829 {
6830 	struct va_format vaf;
6831 	va_list args;
6832 
6833 	va_start(args, fmt);
6834 
6835 	vaf.fmt = fmt;
6836 	vaf.va = &args;
6837 
6838 	printk("%sata%u: %pV", level, ap->print_id, &vaf);
6839 
6840 	va_end(args);
6841 }
6842 EXPORT_SYMBOL(ata_port_printk);
6843 
6844 void ata_link_printk(const struct ata_link *link, const char *level,
6845 		     const char *fmt, ...)
6846 {
6847 	struct va_format vaf;
6848 	va_list args;
6849 
6850 	va_start(args, fmt);
6851 
6852 	vaf.fmt = fmt;
6853 	vaf.va = &args;
6854 
6855 	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6856 		printk("%sata%u.%02u: %pV",
6857 		       level, link->ap->print_id, link->pmp, &vaf);
6858 	else
6859 		printk("%sata%u: %pV",
6860 		       level, link->ap->print_id, &vaf);
6861 
6862 	va_end(args);
6863 }
6864 EXPORT_SYMBOL(ata_link_printk);
6865 
6866 void ata_dev_printk(const struct ata_device *dev, const char *level,
6867 		    const char *fmt, ...)
6868 {
6869 	struct va_format vaf;
6870 	va_list args;
6871 
6872 	va_start(args, fmt);
6873 
6874 	vaf.fmt = fmt;
6875 	vaf.va = &args;
6876 
6877 	printk("%sata%u.%02u: %pV",
6878 	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6879 	       &vaf);
6880 
6881 	va_end(args);
6882 }
6883 EXPORT_SYMBOL(ata_dev_printk);
6884 
6885 void ata_print_version(const struct device *dev, const char *version)
6886 {
6887 	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6888 }
6889 EXPORT_SYMBOL(ata_print_version);
6890 
6891 /*
6892  * libata is essentially a library of internal helper functions for
6893  * low-level ATA host controller drivers.  As such, the API/ABI is
6894  * likely to change as new drivers are added and updated.
6895  * Do not depend on ABI/API stability.
6896  */
6897 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6898 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6899 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6900 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6901 EXPORT_SYMBOL_GPL(sata_port_ops);
6902 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6903 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6904 EXPORT_SYMBOL_GPL(ata_link_next);
6905 EXPORT_SYMBOL_GPL(ata_dev_next);
6906 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6907 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6908 EXPORT_SYMBOL_GPL(ata_host_init);
6909 EXPORT_SYMBOL_GPL(ata_host_alloc);
6910 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6911 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6912 EXPORT_SYMBOL_GPL(ata_host_start);
6913 EXPORT_SYMBOL_GPL(ata_host_register);
6914 EXPORT_SYMBOL_GPL(ata_host_activate);
6915 EXPORT_SYMBOL_GPL(ata_host_detach);
6916 EXPORT_SYMBOL_GPL(ata_sg_init);
6917 EXPORT_SYMBOL_GPL(ata_qc_complete);
6918 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6919 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6920 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6921 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6922 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6923 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6924 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6925 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6926 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6927 EXPORT_SYMBOL_GPL(ata_mode_string);
6928 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6929 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6930 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6931 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6932 EXPORT_SYMBOL_GPL(ata_dev_disable);
6933 EXPORT_SYMBOL_GPL(sata_set_spd);
6934 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6935 EXPORT_SYMBOL_GPL(sata_link_debounce);
6936 EXPORT_SYMBOL_GPL(sata_link_resume);
6937 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6938 EXPORT_SYMBOL_GPL(ata_std_prereset);
6939 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6940 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6941 EXPORT_SYMBOL_GPL(ata_std_postreset);
6942 EXPORT_SYMBOL_GPL(ata_dev_classify);
6943 EXPORT_SYMBOL_GPL(ata_dev_pair);
6944 EXPORT_SYMBOL_GPL(ata_ratelimit);
6945 EXPORT_SYMBOL_GPL(ata_msleep);
6946 EXPORT_SYMBOL_GPL(ata_wait_register);
6947 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6948 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6949 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6950 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6951 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6952 EXPORT_SYMBOL_GPL(sata_scr_valid);
6953 EXPORT_SYMBOL_GPL(sata_scr_read);
6954 EXPORT_SYMBOL_GPL(sata_scr_write);
6955 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6956 EXPORT_SYMBOL_GPL(ata_link_online);
6957 EXPORT_SYMBOL_GPL(ata_link_offline);
6958 #ifdef CONFIG_PM
6959 EXPORT_SYMBOL_GPL(ata_host_suspend);
6960 EXPORT_SYMBOL_GPL(ata_host_resume);
6961 #endif /* CONFIG_PM */
6962 EXPORT_SYMBOL_GPL(ata_id_string);
6963 EXPORT_SYMBOL_GPL(ata_id_c_string);
6964 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6965 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6966 
6967 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6968 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6969 EXPORT_SYMBOL_GPL(ata_timing_compute);
6970 EXPORT_SYMBOL_GPL(ata_timing_merge);
6971 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6972 
6973 #ifdef CONFIG_PCI
6974 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6975 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6976 #ifdef CONFIG_PM
6977 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6978 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6979 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6980 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6981 #endif /* CONFIG_PM */
6982 #endif /* CONFIG_PCI */
6983 
6984 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6985 
6986 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6987 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6988 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6989 EXPORT_SYMBOL_GPL(ata_port_desc);
6990 #ifdef CONFIG_PCI
6991 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6992 #endif /* CONFIG_PCI */
6993 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6994 EXPORT_SYMBOL_GPL(ata_link_abort);
6995 EXPORT_SYMBOL_GPL(ata_port_abort);
6996 EXPORT_SYMBOL_GPL(ata_port_freeze);
6997 EXPORT_SYMBOL_GPL(sata_async_notification);
6998 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6999 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7000 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7001 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7002 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7003 EXPORT_SYMBOL_GPL(ata_do_eh);
7004 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7005 
7006 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7007 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7008 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7009 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7010 EXPORT_SYMBOL_GPL(ata_cable_sata);
7011