xref: /openbmc/linux/drivers/ata/libata-core.c (revision d8bcaabe)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/driver-api/libata.rst
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/time.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <linux/async.h>
61 #include <linux/log2.h>
62 #include <linux/slab.h>
63 #include <linux/glob.h>
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_host.h>
67 #include <linux/libata.h>
68 #include <asm/byteorder.h>
69 #include <asm/unaligned.h>
70 #include <linux/cdrom.h>
71 #include <linux/ratelimit.h>
72 #include <linux/leds.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/platform_device.h>
75 
76 #define CREATE_TRACE_POINTS
77 #include <trace/events/libata.h>
78 
79 #include "libata.h"
80 #include "libata-transport.h"
81 
82 /* debounce timing parameters in msecs { interval, duration, timeout } */
83 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
84 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
85 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
86 
87 const struct ata_port_operations ata_base_port_ops = {
88 	.prereset		= ata_std_prereset,
89 	.postreset		= ata_std_postreset,
90 	.error_handler		= ata_std_error_handler,
91 	.sched_eh		= ata_std_sched_eh,
92 	.end_eh			= ata_std_end_eh,
93 };
94 
95 const struct ata_port_operations sata_port_ops = {
96 	.inherits		= &ata_base_port_ops,
97 
98 	.qc_defer		= ata_std_qc_defer,
99 	.hardreset		= sata_std_hardreset,
100 };
101 
102 static unsigned int ata_dev_init_params(struct ata_device *dev,
103 					u16 heads, u16 sectors);
104 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
105 static void ata_dev_xfermask(struct ata_device *dev);
106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
107 
108 atomic_t ata_print_id = ATOMIC_INIT(0);
109 
110 struct ata_force_param {
111 	const char	*name;
112 	unsigned int	cbl;
113 	int		spd_limit;
114 	unsigned long	xfer_mask;
115 	unsigned int	horkage_on;
116 	unsigned int	horkage_off;
117 	unsigned int	lflags;
118 };
119 
120 struct ata_force_ent {
121 	int			port;
122 	int			device;
123 	struct ata_force_param	param;
124 };
125 
126 static struct ata_force_ent *ata_force_tbl;
127 static int ata_force_tbl_size;
128 
129 static char ata_force_param_buf[PAGE_SIZE] __initdata;
130 /* param_buf is thrown away after initialization, disallow read */
131 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
132 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
133 
134 static int atapi_enabled = 1;
135 module_param(atapi_enabled, int, 0444);
136 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
137 
138 static int atapi_dmadir = 0;
139 module_param(atapi_dmadir, int, 0444);
140 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
141 
142 int atapi_passthru16 = 1;
143 module_param(atapi_passthru16, int, 0444);
144 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
145 
146 int libata_fua = 0;
147 module_param_named(fua, libata_fua, int, 0444);
148 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
149 
150 static int ata_ignore_hpa;
151 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153 
154 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155 module_param_named(dma, libata_dma_mask, int, 0444);
156 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157 
158 static int ata_probe_timeout;
159 module_param(ata_probe_timeout, int, 0444);
160 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161 
162 int libata_noacpi = 0;
163 module_param_named(noacpi, libata_noacpi, int, 0444);
164 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
165 
166 int libata_allow_tpm = 0;
167 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
169 
170 static int atapi_an;
171 module_param(atapi_an, int, 0444);
172 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
173 
174 MODULE_AUTHOR("Jeff Garzik");
175 MODULE_DESCRIPTION("Library module for ATA devices");
176 MODULE_LICENSE("GPL");
177 MODULE_VERSION(DRV_VERSION);
178 
179 
180 static bool ata_sstatus_online(u32 sstatus)
181 {
182 	return (sstatus & 0xf) == 0x3;
183 }
184 
185 /**
186  *	ata_link_next - link iteration helper
187  *	@link: the previous link, NULL to start
188  *	@ap: ATA port containing links to iterate
189  *	@mode: iteration mode, one of ATA_LITER_*
190  *
191  *	LOCKING:
192  *	Host lock or EH context.
193  *
194  *	RETURNS:
195  *	Pointer to the next link.
196  */
197 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
198 			       enum ata_link_iter_mode mode)
199 {
200 	BUG_ON(mode != ATA_LITER_EDGE &&
201 	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
202 
203 	/* NULL link indicates start of iteration */
204 	if (!link)
205 		switch (mode) {
206 		case ATA_LITER_EDGE:
207 		case ATA_LITER_PMP_FIRST:
208 			if (sata_pmp_attached(ap))
209 				return ap->pmp_link;
210 			/* fall through */
211 		case ATA_LITER_HOST_FIRST:
212 			return &ap->link;
213 		}
214 
215 	/* we just iterated over the host link, what's next? */
216 	if (link == &ap->link)
217 		switch (mode) {
218 		case ATA_LITER_HOST_FIRST:
219 			if (sata_pmp_attached(ap))
220 				return ap->pmp_link;
221 			/* fall through */
222 		case ATA_LITER_PMP_FIRST:
223 			if (unlikely(ap->slave_link))
224 				return ap->slave_link;
225 			/* fall through */
226 		case ATA_LITER_EDGE:
227 			return NULL;
228 		}
229 
230 	/* slave_link excludes PMP */
231 	if (unlikely(link == ap->slave_link))
232 		return NULL;
233 
234 	/* we were over a PMP link */
235 	if (++link < ap->pmp_link + ap->nr_pmp_links)
236 		return link;
237 
238 	if (mode == ATA_LITER_PMP_FIRST)
239 		return &ap->link;
240 
241 	return NULL;
242 }
243 
244 /**
245  *	ata_dev_next - device iteration helper
246  *	@dev: the previous device, NULL to start
247  *	@link: ATA link containing devices to iterate
248  *	@mode: iteration mode, one of ATA_DITER_*
249  *
250  *	LOCKING:
251  *	Host lock or EH context.
252  *
253  *	RETURNS:
254  *	Pointer to the next device.
255  */
256 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
257 				enum ata_dev_iter_mode mode)
258 {
259 	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
260 	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
261 
262 	/* NULL dev indicates start of iteration */
263 	if (!dev)
264 		switch (mode) {
265 		case ATA_DITER_ENABLED:
266 		case ATA_DITER_ALL:
267 			dev = link->device;
268 			goto check;
269 		case ATA_DITER_ENABLED_REVERSE:
270 		case ATA_DITER_ALL_REVERSE:
271 			dev = link->device + ata_link_max_devices(link) - 1;
272 			goto check;
273 		}
274 
275  next:
276 	/* move to the next one */
277 	switch (mode) {
278 	case ATA_DITER_ENABLED:
279 	case ATA_DITER_ALL:
280 		if (++dev < link->device + ata_link_max_devices(link))
281 			goto check;
282 		return NULL;
283 	case ATA_DITER_ENABLED_REVERSE:
284 	case ATA_DITER_ALL_REVERSE:
285 		if (--dev >= link->device)
286 			goto check;
287 		return NULL;
288 	}
289 
290  check:
291 	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
292 	    !ata_dev_enabled(dev))
293 		goto next;
294 	return dev;
295 }
296 
297 /**
298  *	ata_dev_phys_link - find physical link for a device
299  *	@dev: ATA device to look up physical link for
300  *
301  *	Look up physical link which @dev is attached to.  Note that
302  *	this is different from @dev->link only when @dev is on slave
303  *	link.  For all other cases, it's the same as @dev->link.
304  *
305  *	LOCKING:
306  *	Don't care.
307  *
308  *	RETURNS:
309  *	Pointer to the found physical link.
310  */
311 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
312 {
313 	struct ata_port *ap = dev->link->ap;
314 
315 	if (!ap->slave_link)
316 		return dev->link;
317 	if (!dev->devno)
318 		return &ap->link;
319 	return ap->slave_link;
320 }
321 
322 /**
323  *	ata_force_cbl - force cable type according to libata.force
324  *	@ap: ATA port of interest
325  *
326  *	Force cable type according to libata.force and whine about it.
327  *	The last entry which has matching port number is used, so it
328  *	can be specified as part of device force parameters.  For
329  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
330  *	same effect.
331  *
332  *	LOCKING:
333  *	EH context.
334  */
335 void ata_force_cbl(struct ata_port *ap)
336 {
337 	int i;
338 
339 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
340 		const struct ata_force_ent *fe = &ata_force_tbl[i];
341 
342 		if (fe->port != -1 && fe->port != ap->print_id)
343 			continue;
344 
345 		if (fe->param.cbl == ATA_CBL_NONE)
346 			continue;
347 
348 		ap->cbl = fe->param.cbl;
349 		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
350 		return;
351 	}
352 }
353 
354 /**
355  *	ata_force_link_limits - force link limits according to libata.force
356  *	@link: ATA link of interest
357  *
358  *	Force link flags and SATA spd limit according to libata.force
359  *	and whine about it.  When only the port part is specified
360  *	(e.g. 1:), the limit applies to all links connected to both
361  *	the host link and all fan-out ports connected via PMP.  If the
362  *	device part is specified as 0 (e.g. 1.00:), it specifies the
363  *	first fan-out link not the host link.  Device number 15 always
364  *	points to the host link whether PMP is attached or not.  If the
365  *	controller has slave link, device number 16 points to it.
366  *
367  *	LOCKING:
368  *	EH context.
369  */
370 static void ata_force_link_limits(struct ata_link *link)
371 {
372 	bool did_spd = false;
373 	int linkno = link->pmp;
374 	int i;
375 
376 	if (ata_is_host_link(link))
377 		linkno += 15;
378 
379 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
380 		const struct ata_force_ent *fe = &ata_force_tbl[i];
381 
382 		if (fe->port != -1 && fe->port != link->ap->print_id)
383 			continue;
384 
385 		if (fe->device != -1 && fe->device != linkno)
386 			continue;
387 
388 		/* only honor the first spd limit */
389 		if (!did_spd && fe->param.spd_limit) {
390 			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
391 			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
392 					fe->param.name);
393 			did_spd = true;
394 		}
395 
396 		/* let lflags stack */
397 		if (fe->param.lflags) {
398 			link->flags |= fe->param.lflags;
399 			ata_link_notice(link,
400 					"FORCE: link flag 0x%x forced -> 0x%x\n",
401 					fe->param.lflags, link->flags);
402 		}
403 	}
404 }
405 
406 /**
407  *	ata_force_xfermask - force xfermask according to libata.force
408  *	@dev: ATA device of interest
409  *
410  *	Force xfer_mask according to libata.force and whine about it.
411  *	For consistency with link selection, device number 15 selects
412  *	the first device connected to the host link.
413  *
414  *	LOCKING:
415  *	EH context.
416  */
417 static void ata_force_xfermask(struct ata_device *dev)
418 {
419 	int devno = dev->link->pmp + dev->devno;
420 	int alt_devno = devno;
421 	int i;
422 
423 	/* allow n.15/16 for devices attached to host port */
424 	if (ata_is_host_link(dev->link))
425 		alt_devno += 15;
426 
427 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
428 		const struct ata_force_ent *fe = &ata_force_tbl[i];
429 		unsigned long pio_mask, mwdma_mask, udma_mask;
430 
431 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
432 			continue;
433 
434 		if (fe->device != -1 && fe->device != devno &&
435 		    fe->device != alt_devno)
436 			continue;
437 
438 		if (!fe->param.xfer_mask)
439 			continue;
440 
441 		ata_unpack_xfermask(fe->param.xfer_mask,
442 				    &pio_mask, &mwdma_mask, &udma_mask);
443 		if (udma_mask)
444 			dev->udma_mask = udma_mask;
445 		else if (mwdma_mask) {
446 			dev->udma_mask = 0;
447 			dev->mwdma_mask = mwdma_mask;
448 		} else {
449 			dev->udma_mask = 0;
450 			dev->mwdma_mask = 0;
451 			dev->pio_mask = pio_mask;
452 		}
453 
454 		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
455 			       fe->param.name);
456 		return;
457 	}
458 }
459 
460 /**
461  *	ata_force_horkage - force horkage according to libata.force
462  *	@dev: ATA device of interest
463  *
464  *	Force horkage according to libata.force and whine about it.
465  *	For consistency with link selection, device number 15 selects
466  *	the first device connected to the host link.
467  *
468  *	LOCKING:
469  *	EH context.
470  */
471 static void ata_force_horkage(struct ata_device *dev)
472 {
473 	int devno = dev->link->pmp + dev->devno;
474 	int alt_devno = devno;
475 	int i;
476 
477 	/* allow n.15/16 for devices attached to host port */
478 	if (ata_is_host_link(dev->link))
479 		alt_devno += 15;
480 
481 	for (i = 0; i < ata_force_tbl_size; i++) {
482 		const struct ata_force_ent *fe = &ata_force_tbl[i];
483 
484 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
485 			continue;
486 
487 		if (fe->device != -1 && fe->device != devno &&
488 		    fe->device != alt_devno)
489 			continue;
490 
491 		if (!(~dev->horkage & fe->param.horkage_on) &&
492 		    !(dev->horkage & fe->param.horkage_off))
493 			continue;
494 
495 		dev->horkage |= fe->param.horkage_on;
496 		dev->horkage &= ~fe->param.horkage_off;
497 
498 		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
499 			       fe->param.name);
500 	}
501 }
502 
503 /**
504  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
505  *	@opcode: SCSI opcode
506  *
507  *	Determine ATAPI command type from @opcode.
508  *
509  *	LOCKING:
510  *	None.
511  *
512  *	RETURNS:
513  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
514  */
515 int atapi_cmd_type(u8 opcode)
516 {
517 	switch (opcode) {
518 	case GPCMD_READ_10:
519 	case GPCMD_READ_12:
520 		return ATAPI_READ;
521 
522 	case GPCMD_WRITE_10:
523 	case GPCMD_WRITE_12:
524 	case GPCMD_WRITE_AND_VERIFY_10:
525 		return ATAPI_WRITE;
526 
527 	case GPCMD_READ_CD:
528 	case GPCMD_READ_CD_MSF:
529 		return ATAPI_READ_CD;
530 
531 	case ATA_16:
532 	case ATA_12:
533 		if (atapi_passthru16)
534 			return ATAPI_PASS_THRU;
535 		/* fall thru */
536 	default:
537 		return ATAPI_MISC;
538 	}
539 }
540 
541 /**
542  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
543  *	@tf: Taskfile to convert
544  *	@pmp: Port multiplier port
545  *	@is_cmd: This FIS is for command
546  *	@fis: Buffer into which data will output
547  *
548  *	Converts a standard ATA taskfile to a Serial ATA
549  *	FIS structure (Register - Host to Device).
550  *
551  *	LOCKING:
552  *	Inherited from caller.
553  */
554 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
555 {
556 	fis[0] = 0x27;			/* Register - Host to Device FIS */
557 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
558 	if (is_cmd)
559 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
560 
561 	fis[2] = tf->command;
562 	fis[3] = tf->feature;
563 
564 	fis[4] = tf->lbal;
565 	fis[5] = tf->lbam;
566 	fis[6] = tf->lbah;
567 	fis[7] = tf->device;
568 
569 	fis[8] = tf->hob_lbal;
570 	fis[9] = tf->hob_lbam;
571 	fis[10] = tf->hob_lbah;
572 	fis[11] = tf->hob_feature;
573 
574 	fis[12] = tf->nsect;
575 	fis[13] = tf->hob_nsect;
576 	fis[14] = 0;
577 	fis[15] = tf->ctl;
578 
579 	fis[16] = tf->auxiliary & 0xff;
580 	fis[17] = (tf->auxiliary >> 8) & 0xff;
581 	fis[18] = (tf->auxiliary >> 16) & 0xff;
582 	fis[19] = (tf->auxiliary >> 24) & 0xff;
583 }
584 
585 /**
586  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
587  *	@fis: Buffer from which data will be input
588  *	@tf: Taskfile to output
589  *
590  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
591  *
592  *	LOCKING:
593  *	Inherited from caller.
594  */
595 
596 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
597 {
598 	tf->command	= fis[2];	/* status */
599 	tf->feature	= fis[3];	/* error */
600 
601 	tf->lbal	= fis[4];
602 	tf->lbam	= fis[5];
603 	tf->lbah	= fis[6];
604 	tf->device	= fis[7];
605 
606 	tf->hob_lbal	= fis[8];
607 	tf->hob_lbam	= fis[9];
608 	tf->hob_lbah	= fis[10];
609 
610 	tf->nsect	= fis[12];
611 	tf->hob_nsect	= fis[13];
612 }
613 
614 static const u8 ata_rw_cmds[] = {
615 	/* pio multi */
616 	ATA_CMD_READ_MULTI,
617 	ATA_CMD_WRITE_MULTI,
618 	ATA_CMD_READ_MULTI_EXT,
619 	ATA_CMD_WRITE_MULTI_EXT,
620 	0,
621 	0,
622 	0,
623 	ATA_CMD_WRITE_MULTI_FUA_EXT,
624 	/* pio */
625 	ATA_CMD_PIO_READ,
626 	ATA_CMD_PIO_WRITE,
627 	ATA_CMD_PIO_READ_EXT,
628 	ATA_CMD_PIO_WRITE_EXT,
629 	0,
630 	0,
631 	0,
632 	0,
633 	/* dma */
634 	ATA_CMD_READ,
635 	ATA_CMD_WRITE,
636 	ATA_CMD_READ_EXT,
637 	ATA_CMD_WRITE_EXT,
638 	0,
639 	0,
640 	0,
641 	ATA_CMD_WRITE_FUA_EXT
642 };
643 
644 /**
645  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
646  *	@tf: command to examine and configure
647  *	@dev: device tf belongs to
648  *
649  *	Examine the device configuration and tf->flags to calculate
650  *	the proper read/write commands and protocol to use.
651  *
652  *	LOCKING:
653  *	caller.
654  */
655 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
656 {
657 	u8 cmd;
658 
659 	int index, fua, lba48, write;
660 
661 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
662 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
663 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
664 
665 	if (dev->flags & ATA_DFLAG_PIO) {
666 		tf->protocol = ATA_PROT_PIO;
667 		index = dev->multi_count ? 0 : 8;
668 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
669 		/* Unable to use DMA due to host limitation */
670 		tf->protocol = ATA_PROT_PIO;
671 		index = dev->multi_count ? 0 : 8;
672 	} else {
673 		tf->protocol = ATA_PROT_DMA;
674 		index = 16;
675 	}
676 
677 	cmd = ata_rw_cmds[index + fua + lba48 + write];
678 	if (cmd) {
679 		tf->command = cmd;
680 		return 0;
681 	}
682 	return -1;
683 }
684 
685 /**
686  *	ata_tf_read_block - Read block address from ATA taskfile
687  *	@tf: ATA taskfile of interest
688  *	@dev: ATA device @tf belongs to
689  *
690  *	LOCKING:
691  *	None.
692  *
693  *	Read block address from @tf.  This function can handle all
694  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
695  *	flags select the address format to use.
696  *
697  *	RETURNS:
698  *	Block address read from @tf.
699  */
700 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
701 {
702 	u64 block = 0;
703 
704 	if (tf->flags & ATA_TFLAG_LBA) {
705 		if (tf->flags & ATA_TFLAG_LBA48) {
706 			block |= (u64)tf->hob_lbah << 40;
707 			block |= (u64)tf->hob_lbam << 32;
708 			block |= (u64)tf->hob_lbal << 24;
709 		} else
710 			block |= (tf->device & 0xf) << 24;
711 
712 		block |= tf->lbah << 16;
713 		block |= tf->lbam << 8;
714 		block |= tf->lbal;
715 	} else {
716 		u32 cyl, head, sect;
717 
718 		cyl = tf->lbam | (tf->lbah << 8);
719 		head = tf->device & 0xf;
720 		sect = tf->lbal;
721 
722 		if (!sect) {
723 			ata_dev_warn(dev,
724 				     "device reported invalid CHS sector 0\n");
725 			return U64_MAX;
726 		}
727 
728 		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
729 	}
730 
731 	return block;
732 }
733 
734 /**
735  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
736  *	@tf: Target ATA taskfile
737  *	@dev: ATA device @tf belongs to
738  *	@block: Block address
739  *	@n_block: Number of blocks
740  *	@tf_flags: RW/FUA etc...
741  *	@tag: tag
742  *	@class: IO priority class
743  *
744  *	LOCKING:
745  *	None.
746  *
747  *	Build ATA taskfile @tf for read/write request described by
748  *	@block, @n_block, @tf_flags and @tag on @dev.
749  *
750  *	RETURNS:
751  *
752  *	0 on success, -ERANGE if the request is too large for @dev,
753  *	-EINVAL if the request is invalid.
754  */
755 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
756 		    u64 block, u32 n_block, unsigned int tf_flags,
757 		    unsigned int tag, int class)
758 {
759 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
760 	tf->flags |= tf_flags;
761 
762 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
763 		/* yay, NCQ */
764 		if (!lba_48_ok(block, n_block))
765 			return -ERANGE;
766 
767 		tf->protocol = ATA_PROT_NCQ;
768 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
769 
770 		if (tf->flags & ATA_TFLAG_WRITE)
771 			tf->command = ATA_CMD_FPDMA_WRITE;
772 		else
773 			tf->command = ATA_CMD_FPDMA_READ;
774 
775 		tf->nsect = tag << 3;
776 		tf->hob_feature = (n_block >> 8) & 0xff;
777 		tf->feature = n_block & 0xff;
778 
779 		tf->hob_lbah = (block >> 40) & 0xff;
780 		tf->hob_lbam = (block >> 32) & 0xff;
781 		tf->hob_lbal = (block >> 24) & 0xff;
782 		tf->lbah = (block >> 16) & 0xff;
783 		tf->lbam = (block >> 8) & 0xff;
784 		tf->lbal = block & 0xff;
785 
786 		tf->device = ATA_LBA;
787 		if (tf->flags & ATA_TFLAG_FUA)
788 			tf->device |= 1 << 7;
789 
790 		if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
791 			if (class == IOPRIO_CLASS_RT)
792 				tf->hob_nsect |= ATA_PRIO_HIGH <<
793 						 ATA_SHIFT_PRIO;
794 		}
795 	} else if (dev->flags & ATA_DFLAG_LBA) {
796 		tf->flags |= ATA_TFLAG_LBA;
797 
798 		if (lba_28_ok(block, n_block)) {
799 			/* use LBA28 */
800 			tf->device |= (block >> 24) & 0xf;
801 		} else if (lba_48_ok(block, n_block)) {
802 			if (!(dev->flags & ATA_DFLAG_LBA48))
803 				return -ERANGE;
804 
805 			/* use LBA48 */
806 			tf->flags |= ATA_TFLAG_LBA48;
807 
808 			tf->hob_nsect = (n_block >> 8) & 0xff;
809 
810 			tf->hob_lbah = (block >> 40) & 0xff;
811 			tf->hob_lbam = (block >> 32) & 0xff;
812 			tf->hob_lbal = (block >> 24) & 0xff;
813 		} else
814 			/* request too large even for LBA48 */
815 			return -ERANGE;
816 
817 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818 			return -EINVAL;
819 
820 		tf->nsect = n_block & 0xff;
821 
822 		tf->lbah = (block >> 16) & 0xff;
823 		tf->lbam = (block >> 8) & 0xff;
824 		tf->lbal = block & 0xff;
825 
826 		tf->device |= ATA_LBA;
827 	} else {
828 		/* CHS */
829 		u32 sect, head, cyl, track;
830 
831 		/* The request -may- be too large for CHS addressing. */
832 		if (!lba_28_ok(block, n_block))
833 			return -ERANGE;
834 
835 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
836 			return -EINVAL;
837 
838 		/* Convert LBA to CHS */
839 		track = (u32)block / dev->sectors;
840 		cyl   = track / dev->heads;
841 		head  = track % dev->heads;
842 		sect  = (u32)block % dev->sectors + 1;
843 
844 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
845 			(u32)block, track, cyl, head, sect);
846 
847 		/* Check whether the converted CHS can fit.
848 		   Cylinder: 0-65535
849 		   Head: 0-15
850 		   Sector: 1-255*/
851 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
852 			return -ERANGE;
853 
854 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
855 		tf->lbal = sect;
856 		tf->lbam = cyl;
857 		tf->lbah = cyl >> 8;
858 		tf->device |= head;
859 	}
860 
861 	return 0;
862 }
863 
864 /**
865  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
866  *	@pio_mask: pio_mask
867  *	@mwdma_mask: mwdma_mask
868  *	@udma_mask: udma_mask
869  *
870  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
871  *	unsigned int xfer_mask.
872  *
873  *	LOCKING:
874  *	None.
875  *
876  *	RETURNS:
877  *	Packed xfer_mask.
878  */
879 unsigned long ata_pack_xfermask(unsigned long pio_mask,
880 				unsigned long mwdma_mask,
881 				unsigned long udma_mask)
882 {
883 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
884 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
885 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
886 }
887 
888 /**
889  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
890  *	@xfer_mask: xfer_mask to unpack
891  *	@pio_mask: resulting pio_mask
892  *	@mwdma_mask: resulting mwdma_mask
893  *	@udma_mask: resulting udma_mask
894  *
895  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
896  *	Any NULL destination masks will be ignored.
897  */
898 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
899 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
900 {
901 	if (pio_mask)
902 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
903 	if (mwdma_mask)
904 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
905 	if (udma_mask)
906 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
907 }
908 
909 static const struct ata_xfer_ent {
910 	int shift, bits;
911 	u8 base;
912 } ata_xfer_tbl[] = {
913 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
914 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
915 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
916 	{ -1, },
917 };
918 
919 /**
920  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
921  *	@xfer_mask: xfer_mask of interest
922  *
923  *	Return matching XFER_* value for @xfer_mask.  Only the highest
924  *	bit of @xfer_mask is considered.
925  *
926  *	LOCKING:
927  *	None.
928  *
929  *	RETURNS:
930  *	Matching XFER_* value, 0xff if no match found.
931  */
932 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
933 {
934 	int highbit = fls(xfer_mask) - 1;
935 	const struct ata_xfer_ent *ent;
936 
937 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
938 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
939 			return ent->base + highbit - ent->shift;
940 	return 0xff;
941 }
942 
943 /**
944  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
945  *	@xfer_mode: XFER_* of interest
946  *
947  *	Return matching xfer_mask for @xfer_mode.
948  *
949  *	LOCKING:
950  *	None.
951  *
952  *	RETURNS:
953  *	Matching xfer_mask, 0 if no match found.
954  */
955 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
956 {
957 	const struct ata_xfer_ent *ent;
958 
959 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
960 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
961 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
962 				& ~((1 << ent->shift) - 1);
963 	return 0;
964 }
965 
966 /**
967  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
968  *	@xfer_mode: XFER_* of interest
969  *
970  *	Return matching xfer_shift for @xfer_mode.
971  *
972  *	LOCKING:
973  *	None.
974  *
975  *	RETURNS:
976  *	Matching xfer_shift, -1 if no match found.
977  */
978 int ata_xfer_mode2shift(unsigned long xfer_mode)
979 {
980 	const struct ata_xfer_ent *ent;
981 
982 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
983 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
984 			return ent->shift;
985 	return -1;
986 }
987 
988 /**
989  *	ata_mode_string - convert xfer_mask to string
990  *	@xfer_mask: mask of bits supported; only highest bit counts.
991  *
992  *	Determine string which represents the highest speed
993  *	(highest bit in @modemask).
994  *
995  *	LOCKING:
996  *	None.
997  *
998  *	RETURNS:
999  *	Constant C string representing highest speed listed in
1000  *	@mode_mask, or the constant C string "<n/a>".
1001  */
1002 const char *ata_mode_string(unsigned long xfer_mask)
1003 {
1004 	static const char * const xfer_mode_str[] = {
1005 		"PIO0",
1006 		"PIO1",
1007 		"PIO2",
1008 		"PIO3",
1009 		"PIO4",
1010 		"PIO5",
1011 		"PIO6",
1012 		"MWDMA0",
1013 		"MWDMA1",
1014 		"MWDMA2",
1015 		"MWDMA3",
1016 		"MWDMA4",
1017 		"UDMA/16",
1018 		"UDMA/25",
1019 		"UDMA/33",
1020 		"UDMA/44",
1021 		"UDMA/66",
1022 		"UDMA/100",
1023 		"UDMA/133",
1024 		"UDMA7",
1025 	};
1026 	int highbit;
1027 
1028 	highbit = fls(xfer_mask) - 1;
1029 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1030 		return xfer_mode_str[highbit];
1031 	return "<n/a>";
1032 }
1033 
1034 const char *sata_spd_string(unsigned int spd)
1035 {
1036 	static const char * const spd_str[] = {
1037 		"1.5 Gbps",
1038 		"3.0 Gbps",
1039 		"6.0 Gbps",
1040 	};
1041 
1042 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1043 		return "<unknown>";
1044 	return spd_str[spd - 1];
1045 }
1046 
1047 /**
1048  *	ata_dev_classify - determine device type based on ATA-spec signature
1049  *	@tf: ATA taskfile register set for device to be identified
1050  *
1051  *	Determine from taskfile register contents whether a device is
1052  *	ATA or ATAPI, as per "Signature and persistence" section
1053  *	of ATA/PI spec (volume 1, sect 5.14).
1054  *
1055  *	LOCKING:
1056  *	None.
1057  *
1058  *	RETURNS:
1059  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1060  *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1061  */
1062 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1063 {
1064 	/* Apple's open source Darwin code hints that some devices only
1065 	 * put a proper signature into the LBA mid/high registers,
1066 	 * So, we only check those.  It's sufficient for uniqueness.
1067 	 *
1068 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1069 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1070 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1071 	 * spec has never mentioned about using different signatures
1072 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1073 	 * Multiplier specification began to use 0x69/0x96 to identify
1074 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1075 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1076 	 * 0x69/0x96 shortly and described them as reserved for
1077 	 * SerialATA.
1078 	 *
1079 	 * We follow the current spec and consider that 0x69/0x96
1080 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1081 	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1082 	 * SEMB signature.  This is worked around in
1083 	 * ata_dev_read_id().
1084 	 */
1085 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1086 		DPRINTK("found ATA device by sig\n");
1087 		return ATA_DEV_ATA;
1088 	}
1089 
1090 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1091 		DPRINTK("found ATAPI device by sig\n");
1092 		return ATA_DEV_ATAPI;
1093 	}
1094 
1095 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1096 		DPRINTK("found PMP device by sig\n");
1097 		return ATA_DEV_PMP;
1098 	}
1099 
1100 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1101 		DPRINTK("found SEMB device by sig (could be ATA device)\n");
1102 		return ATA_DEV_SEMB;
1103 	}
1104 
1105 	if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1106 		DPRINTK("found ZAC device by sig\n");
1107 		return ATA_DEV_ZAC;
1108 	}
1109 
1110 	DPRINTK("unknown device\n");
1111 	return ATA_DEV_UNKNOWN;
1112 }
1113 
1114 /**
1115  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1116  *	@id: IDENTIFY DEVICE results we will examine
1117  *	@s: string into which data is output
1118  *	@ofs: offset into identify device page
1119  *	@len: length of string to return. must be an even number.
1120  *
1121  *	The strings in the IDENTIFY DEVICE page are broken up into
1122  *	16-bit chunks.  Run through the string, and output each
1123  *	8-bit chunk linearly, regardless of platform.
1124  *
1125  *	LOCKING:
1126  *	caller.
1127  */
1128 
1129 void ata_id_string(const u16 *id, unsigned char *s,
1130 		   unsigned int ofs, unsigned int len)
1131 {
1132 	unsigned int c;
1133 
1134 	BUG_ON(len & 1);
1135 
1136 	while (len > 0) {
1137 		c = id[ofs] >> 8;
1138 		*s = c;
1139 		s++;
1140 
1141 		c = id[ofs] & 0xff;
1142 		*s = c;
1143 		s++;
1144 
1145 		ofs++;
1146 		len -= 2;
1147 	}
1148 }
1149 
1150 /**
1151  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1152  *	@id: IDENTIFY DEVICE results we will examine
1153  *	@s: string into which data is output
1154  *	@ofs: offset into identify device page
1155  *	@len: length of string to return. must be an odd number.
1156  *
1157  *	This function is identical to ata_id_string except that it
1158  *	trims trailing spaces and terminates the resulting string with
1159  *	null.  @len must be actual maximum length (even number) + 1.
1160  *
1161  *	LOCKING:
1162  *	caller.
1163  */
1164 void ata_id_c_string(const u16 *id, unsigned char *s,
1165 		     unsigned int ofs, unsigned int len)
1166 {
1167 	unsigned char *p;
1168 
1169 	ata_id_string(id, s, ofs, len - 1);
1170 
1171 	p = s + strnlen(s, len - 1);
1172 	while (p > s && p[-1] == ' ')
1173 		p--;
1174 	*p = '\0';
1175 }
1176 
1177 static u64 ata_id_n_sectors(const u16 *id)
1178 {
1179 	if (ata_id_has_lba(id)) {
1180 		if (ata_id_has_lba48(id))
1181 			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1182 		else
1183 			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1184 	} else {
1185 		if (ata_id_current_chs_valid(id))
1186 			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1187 			       id[ATA_ID_CUR_SECTORS];
1188 		else
1189 			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1190 			       id[ATA_ID_SECTORS];
1191 	}
1192 }
1193 
1194 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1195 {
1196 	u64 sectors = 0;
1197 
1198 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1199 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1200 	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1201 	sectors |= (tf->lbah & 0xff) << 16;
1202 	sectors |= (tf->lbam & 0xff) << 8;
1203 	sectors |= (tf->lbal & 0xff);
1204 
1205 	return sectors;
1206 }
1207 
1208 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1209 {
1210 	u64 sectors = 0;
1211 
1212 	sectors |= (tf->device & 0x0f) << 24;
1213 	sectors |= (tf->lbah & 0xff) << 16;
1214 	sectors |= (tf->lbam & 0xff) << 8;
1215 	sectors |= (tf->lbal & 0xff);
1216 
1217 	return sectors;
1218 }
1219 
1220 /**
1221  *	ata_read_native_max_address - Read native max address
1222  *	@dev: target device
1223  *	@max_sectors: out parameter for the result native max address
1224  *
1225  *	Perform an LBA48 or LBA28 native size query upon the device in
1226  *	question.
1227  *
1228  *	RETURNS:
1229  *	0 on success, -EACCES if command is aborted by the drive.
1230  *	-EIO on other errors.
1231  */
1232 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1233 {
1234 	unsigned int err_mask;
1235 	struct ata_taskfile tf;
1236 	int lba48 = ata_id_has_lba48(dev->id);
1237 
1238 	ata_tf_init(dev, &tf);
1239 
1240 	/* always clear all address registers */
1241 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1242 
1243 	if (lba48) {
1244 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1245 		tf.flags |= ATA_TFLAG_LBA48;
1246 	} else
1247 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1248 
1249 	tf.protocol = ATA_PROT_NODATA;
1250 	tf.device |= ATA_LBA;
1251 
1252 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1253 	if (err_mask) {
1254 		ata_dev_warn(dev,
1255 			     "failed to read native max address (err_mask=0x%x)\n",
1256 			     err_mask);
1257 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1258 			return -EACCES;
1259 		return -EIO;
1260 	}
1261 
1262 	if (lba48)
1263 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1264 	else
1265 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1266 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1267 		(*max_sectors)--;
1268 	return 0;
1269 }
1270 
1271 /**
1272  *	ata_set_max_sectors - Set max sectors
1273  *	@dev: target device
1274  *	@new_sectors: new max sectors value to set for the device
1275  *
1276  *	Set max sectors of @dev to @new_sectors.
1277  *
1278  *	RETURNS:
1279  *	0 on success, -EACCES if command is aborted or denied (due to
1280  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1281  *	errors.
1282  */
1283 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1284 {
1285 	unsigned int err_mask;
1286 	struct ata_taskfile tf;
1287 	int lba48 = ata_id_has_lba48(dev->id);
1288 
1289 	new_sectors--;
1290 
1291 	ata_tf_init(dev, &tf);
1292 
1293 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1294 
1295 	if (lba48) {
1296 		tf.command = ATA_CMD_SET_MAX_EXT;
1297 		tf.flags |= ATA_TFLAG_LBA48;
1298 
1299 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1300 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1301 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1302 	} else {
1303 		tf.command = ATA_CMD_SET_MAX;
1304 
1305 		tf.device |= (new_sectors >> 24) & 0xf;
1306 	}
1307 
1308 	tf.protocol = ATA_PROT_NODATA;
1309 	tf.device |= ATA_LBA;
1310 
1311 	tf.lbal = (new_sectors >> 0) & 0xff;
1312 	tf.lbam = (new_sectors >> 8) & 0xff;
1313 	tf.lbah = (new_sectors >> 16) & 0xff;
1314 
1315 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1316 	if (err_mask) {
1317 		ata_dev_warn(dev,
1318 			     "failed to set max address (err_mask=0x%x)\n",
1319 			     err_mask);
1320 		if (err_mask == AC_ERR_DEV &&
1321 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1322 			return -EACCES;
1323 		return -EIO;
1324 	}
1325 
1326 	return 0;
1327 }
1328 
1329 /**
1330  *	ata_hpa_resize		-	Resize a device with an HPA set
1331  *	@dev: Device to resize
1332  *
1333  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1334  *	it if required to the full size of the media. The caller must check
1335  *	the drive has the HPA feature set enabled.
1336  *
1337  *	RETURNS:
1338  *	0 on success, -errno on failure.
1339  */
1340 static int ata_hpa_resize(struct ata_device *dev)
1341 {
1342 	struct ata_eh_context *ehc = &dev->link->eh_context;
1343 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1344 	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1345 	u64 sectors = ata_id_n_sectors(dev->id);
1346 	u64 native_sectors;
1347 	int rc;
1348 
1349 	/* do we need to do it? */
1350 	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1351 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1352 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1353 		return 0;
1354 
1355 	/* read native max address */
1356 	rc = ata_read_native_max_address(dev, &native_sectors);
1357 	if (rc) {
1358 		/* If device aborted the command or HPA isn't going to
1359 		 * be unlocked, skip HPA resizing.
1360 		 */
1361 		if (rc == -EACCES || !unlock_hpa) {
1362 			ata_dev_warn(dev,
1363 				     "HPA support seems broken, skipping HPA handling\n");
1364 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1365 
1366 			/* we can continue if device aborted the command */
1367 			if (rc == -EACCES)
1368 				rc = 0;
1369 		}
1370 
1371 		return rc;
1372 	}
1373 	dev->n_native_sectors = native_sectors;
1374 
1375 	/* nothing to do? */
1376 	if (native_sectors <= sectors || !unlock_hpa) {
1377 		if (!print_info || native_sectors == sectors)
1378 			return 0;
1379 
1380 		if (native_sectors > sectors)
1381 			ata_dev_info(dev,
1382 				"HPA detected: current %llu, native %llu\n",
1383 				(unsigned long long)sectors,
1384 				(unsigned long long)native_sectors);
1385 		else if (native_sectors < sectors)
1386 			ata_dev_warn(dev,
1387 				"native sectors (%llu) is smaller than sectors (%llu)\n",
1388 				(unsigned long long)native_sectors,
1389 				(unsigned long long)sectors);
1390 		return 0;
1391 	}
1392 
1393 	/* let's unlock HPA */
1394 	rc = ata_set_max_sectors(dev, native_sectors);
1395 	if (rc == -EACCES) {
1396 		/* if device aborted the command, skip HPA resizing */
1397 		ata_dev_warn(dev,
1398 			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1399 			     (unsigned long long)sectors,
1400 			     (unsigned long long)native_sectors);
1401 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1402 		return 0;
1403 	} else if (rc)
1404 		return rc;
1405 
1406 	/* re-read IDENTIFY data */
1407 	rc = ata_dev_reread_id(dev, 0);
1408 	if (rc) {
1409 		ata_dev_err(dev,
1410 			    "failed to re-read IDENTIFY data after HPA resizing\n");
1411 		return rc;
1412 	}
1413 
1414 	if (print_info) {
1415 		u64 new_sectors = ata_id_n_sectors(dev->id);
1416 		ata_dev_info(dev,
1417 			"HPA unlocked: %llu -> %llu, native %llu\n",
1418 			(unsigned long long)sectors,
1419 			(unsigned long long)new_sectors,
1420 			(unsigned long long)native_sectors);
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 /**
1427  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1428  *	@id: IDENTIFY DEVICE page to dump
1429  *
1430  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1431  *	page.
1432  *
1433  *	LOCKING:
1434  *	caller.
1435  */
1436 
1437 static inline void ata_dump_id(const u16 *id)
1438 {
1439 	DPRINTK("49==0x%04x  "
1440 		"53==0x%04x  "
1441 		"63==0x%04x  "
1442 		"64==0x%04x  "
1443 		"75==0x%04x  \n",
1444 		id[49],
1445 		id[53],
1446 		id[63],
1447 		id[64],
1448 		id[75]);
1449 	DPRINTK("80==0x%04x  "
1450 		"81==0x%04x  "
1451 		"82==0x%04x  "
1452 		"83==0x%04x  "
1453 		"84==0x%04x  \n",
1454 		id[80],
1455 		id[81],
1456 		id[82],
1457 		id[83],
1458 		id[84]);
1459 	DPRINTK("88==0x%04x  "
1460 		"93==0x%04x\n",
1461 		id[88],
1462 		id[93]);
1463 }
1464 
1465 /**
1466  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1467  *	@id: IDENTIFY data to compute xfer mask from
1468  *
1469  *	Compute the xfermask for this device. This is not as trivial
1470  *	as it seems if we must consider early devices correctly.
1471  *
1472  *	FIXME: pre IDE drive timing (do we care ?).
1473  *
1474  *	LOCKING:
1475  *	None.
1476  *
1477  *	RETURNS:
1478  *	Computed xfermask
1479  */
1480 unsigned long ata_id_xfermask(const u16 *id)
1481 {
1482 	unsigned long pio_mask, mwdma_mask, udma_mask;
1483 
1484 	/* Usual case. Word 53 indicates word 64 is valid */
1485 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1486 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1487 		pio_mask <<= 3;
1488 		pio_mask |= 0x7;
1489 	} else {
1490 		/* If word 64 isn't valid then Word 51 high byte holds
1491 		 * the PIO timing number for the maximum. Turn it into
1492 		 * a mask.
1493 		 */
1494 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1495 		if (mode < 5)	/* Valid PIO range */
1496 			pio_mask = (2 << mode) - 1;
1497 		else
1498 			pio_mask = 1;
1499 
1500 		/* But wait.. there's more. Design your standards by
1501 		 * committee and you too can get a free iordy field to
1502 		 * process. However its the speeds not the modes that
1503 		 * are supported... Note drivers using the timing API
1504 		 * will get this right anyway
1505 		 */
1506 	}
1507 
1508 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1509 
1510 	if (ata_id_is_cfa(id)) {
1511 		/*
1512 		 *	Process compact flash extended modes
1513 		 */
1514 		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1515 		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1516 
1517 		if (pio)
1518 			pio_mask |= (1 << 5);
1519 		if (pio > 1)
1520 			pio_mask |= (1 << 6);
1521 		if (dma)
1522 			mwdma_mask |= (1 << 3);
1523 		if (dma > 1)
1524 			mwdma_mask |= (1 << 4);
1525 	}
1526 
1527 	udma_mask = 0;
1528 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1529 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1530 
1531 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1532 }
1533 
1534 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1535 {
1536 	struct completion *waiting = qc->private_data;
1537 
1538 	complete(waiting);
1539 }
1540 
1541 /**
1542  *	ata_exec_internal_sg - execute libata internal command
1543  *	@dev: Device to which the command is sent
1544  *	@tf: Taskfile registers for the command and the result
1545  *	@cdb: CDB for packet command
1546  *	@dma_dir: Data transfer direction of the command
1547  *	@sgl: sg list for the data buffer of the command
1548  *	@n_elem: Number of sg entries
1549  *	@timeout: Timeout in msecs (0 for default)
1550  *
1551  *	Executes libata internal command with timeout.  @tf contains
1552  *	command on entry and result on return.  Timeout and error
1553  *	conditions are reported via return value.  No recovery action
1554  *	is taken after a command times out.  It's caller's duty to
1555  *	clean up after timeout.
1556  *
1557  *	LOCKING:
1558  *	None.  Should be called with kernel context, might sleep.
1559  *
1560  *	RETURNS:
1561  *	Zero on success, AC_ERR_* mask on failure
1562  */
1563 unsigned ata_exec_internal_sg(struct ata_device *dev,
1564 			      struct ata_taskfile *tf, const u8 *cdb,
1565 			      int dma_dir, struct scatterlist *sgl,
1566 			      unsigned int n_elem, unsigned long timeout)
1567 {
1568 	struct ata_link *link = dev->link;
1569 	struct ata_port *ap = link->ap;
1570 	u8 command = tf->command;
1571 	int auto_timeout = 0;
1572 	struct ata_queued_cmd *qc;
1573 	unsigned int tag, preempted_tag;
1574 	u32 preempted_sactive, preempted_qc_active;
1575 	int preempted_nr_active_links;
1576 	DECLARE_COMPLETION_ONSTACK(wait);
1577 	unsigned long flags;
1578 	unsigned int err_mask;
1579 	int rc;
1580 
1581 	spin_lock_irqsave(ap->lock, flags);
1582 
1583 	/* no internal command while frozen */
1584 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1585 		spin_unlock_irqrestore(ap->lock, flags);
1586 		return AC_ERR_SYSTEM;
1587 	}
1588 
1589 	/* initialize internal qc */
1590 
1591 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1592 	 * drivers choke if any other tag is given.  This breaks
1593 	 * ata_tag_internal() test for those drivers.  Don't use new
1594 	 * EH stuff without converting to it.
1595 	 */
1596 	if (ap->ops->error_handler)
1597 		tag = ATA_TAG_INTERNAL;
1598 	else
1599 		tag = 0;
1600 
1601 	qc = __ata_qc_from_tag(ap, tag);
1602 
1603 	qc->tag = tag;
1604 	qc->scsicmd = NULL;
1605 	qc->ap = ap;
1606 	qc->dev = dev;
1607 	ata_qc_reinit(qc);
1608 
1609 	preempted_tag = link->active_tag;
1610 	preempted_sactive = link->sactive;
1611 	preempted_qc_active = ap->qc_active;
1612 	preempted_nr_active_links = ap->nr_active_links;
1613 	link->active_tag = ATA_TAG_POISON;
1614 	link->sactive = 0;
1615 	ap->qc_active = 0;
1616 	ap->nr_active_links = 0;
1617 
1618 	/* prepare & issue qc */
1619 	qc->tf = *tf;
1620 	if (cdb)
1621 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1622 
1623 	/* some SATA bridges need us to indicate data xfer direction */
1624 	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1625 	    dma_dir == DMA_FROM_DEVICE)
1626 		qc->tf.feature |= ATAPI_DMADIR;
1627 
1628 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1629 	qc->dma_dir = dma_dir;
1630 	if (dma_dir != DMA_NONE) {
1631 		unsigned int i, buflen = 0;
1632 		struct scatterlist *sg;
1633 
1634 		for_each_sg(sgl, sg, n_elem, i)
1635 			buflen += sg->length;
1636 
1637 		ata_sg_init(qc, sgl, n_elem);
1638 		qc->nbytes = buflen;
1639 	}
1640 
1641 	qc->private_data = &wait;
1642 	qc->complete_fn = ata_qc_complete_internal;
1643 
1644 	ata_qc_issue(qc);
1645 
1646 	spin_unlock_irqrestore(ap->lock, flags);
1647 
1648 	if (!timeout) {
1649 		if (ata_probe_timeout)
1650 			timeout = ata_probe_timeout * 1000;
1651 		else {
1652 			timeout = ata_internal_cmd_timeout(dev, command);
1653 			auto_timeout = 1;
1654 		}
1655 	}
1656 
1657 	if (ap->ops->error_handler)
1658 		ata_eh_release(ap);
1659 
1660 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1661 
1662 	if (ap->ops->error_handler)
1663 		ata_eh_acquire(ap);
1664 
1665 	ata_sff_flush_pio_task(ap);
1666 
1667 	if (!rc) {
1668 		spin_lock_irqsave(ap->lock, flags);
1669 
1670 		/* We're racing with irq here.  If we lose, the
1671 		 * following test prevents us from completing the qc
1672 		 * twice.  If we win, the port is frozen and will be
1673 		 * cleaned up by ->post_internal_cmd().
1674 		 */
1675 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1676 			qc->err_mask |= AC_ERR_TIMEOUT;
1677 
1678 			if (ap->ops->error_handler)
1679 				ata_port_freeze(ap);
1680 			else
1681 				ata_qc_complete(qc);
1682 
1683 			if (ata_msg_warn(ap))
1684 				ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1685 					     command);
1686 		}
1687 
1688 		spin_unlock_irqrestore(ap->lock, flags);
1689 	}
1690 
1691 	/* do post_internal_cmd */
1692 	if (ap->ops->post_internal_cmd)
1693 		ap->ops->post_internal_cmd(qc);
1694 
1695 	/* perform minimal error analysis */
1696 	if (qc->flags & ATA_QCFLAG_FAILED) {
1697 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1698 			qc->err_mask |= AC_ERR_DEV;
1699 
1700 		if (!qc->err_mask)
1701 			qc->err_mask |= AC_ERR_OTHER;
1702 
1703 		if (qc->err_mask & ~AC_ERR_OTHER)
1704 			qc->err_mask &= ~AC_ERR_OTHER;
1705 	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1706 		qc->result_tf.command |= ATA_SENSE;
1707 	}
1708 
1709 	/* finish up */
1710 	spin_lock_irqsave(ap->lock, flags);
1711 
1712 	*tf = qc->result_tf;
1713 	err_mask = qc->err_mask;
1714 
1715 	ata_qc_free(qc);
1716 	link->active_tag = preempted_tag;
1717 	link->sactive = preempted_sactive;
1718 	ap->qc_active = preempted_qc_active;
1719 	ap->nr_active_links = preempted_nr_active_links;
1720 
1721 	spin_unlock_irqrestore(ap->lock, flags);
1722 
1723 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1724 		ata_internal_cmd_timed_out(dev, command);
1725 
1726 	return err_mask;
1727 }
1728 
1729 /**
1730  *	ata_exec_internal - execute libata internal command
1731  *	@dev: Device to which the command is sent
1732  *	@tf: Taskfile registers for the command and the result
1733  *	@cdb: CDB for packet command
1734  *	@dma_dir: Data transfer direction of the command
1735  *	@buf: Data buffer of the command
1736  *	@buflen: Length of data buffer
1737  *	@timeout: Timeout in msecs (0 for default)
1738  *
1739  *	Wrapper around ata_exec_internal_sg() which takes simple
1740  *	buffer instead of sg list.
1741  *
1742  *	LOCKING:
1743  *	None.  Should be called with kernel context, might sleep.
1744  *
1745  *	RETURNS:
1746  *	Zero on success, AC_ERR_* mask on failure
1747  */
1748 unsigned ata_exec_internal(struct ata_device *dev,
1749 			   struct ata_taskfile *tf, const u8 *cdb,
1750 			   int dma_dir, void *buf, unsigned int buflen,
1751 			   unsigned long timeout)
1752 {
1753 	struct scatterlist *psg = NULL, sg;
1754 	unsigned int n_elem = 0;
1755 
1756 	if (dma_dir != DMA_NONE) {
1757 		WARN_ON(!buf);
1758 		sg_init_one(&sg, buf, buflen);
1759 		psg = &sg;
1760 		n_elem++;
1761 	}
1762 
1763 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1764 				    timeout);
1765 }
1766 
1767 /**
1768  *	ata_pio_need_iordy	-	check if iordy needed
1769  *	@adev: ATA device
1770  *
1771  *	Check if the current speed of the device requires IORDY. Used
1772  *	by various controllers for chip configuration.
1773  */
1774 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1775 {
1776 	/* Don't set IORDY if we're preparing for reset.  IORDY may
1777 	 * lead to controller lock up on certain controllers if the
1778 	 * port is not occupied.  See bko#11703 for details.
1779 	 */
1780 	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1781 		return 0;
1782 	/* Controller doesn't support IORDY.  Probably a pointless
1783 	 * check as the caller should know this.
1784 	 */
1785 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1786 		return 0;
1787 	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1788 	if (ata_id_is_cfa(adev->id)
1789 	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1790 		return 0;
1791 	/* PIO3 and higher it is mandatory */
1792 	if (adev->pio_mode > XFER_PIO_2)
1793 		return 1;
1794 	/* We turn it on when possible */
1795 	if (ata_id_has_iordy(adev->id))
1796 		return 1;
1797 	return 0;
1798 }
1799 
1800 /**
1801  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1802  *	@adev: ATA device
1803  *
1804  *	Compute the highest mode possible if we are not using iordy. Return
1805  *	-1 if no iordy mode is available.
1806  */
1807 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1808 {
1809 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1810 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1811 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1812 		/* Is the speed faster than the drive allows non IORDY ? */
1813 		if (pio) {
1814 			/* This is cycle times not frequency - watch the logic! */
1815 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1816 				return 3 << ATA_SHIFT_PIO;
1817 			return 7 << ATA_SHIFT_PIO;
1818 		}
1819 	}
1820 	return 3 << ATA_SHIFT_PIO;
1821 }
1822 
1823 /**
1824  *	ata_do_dev_read_id		-	default ID read method
1825  *	@dev: device
1826  *	@tf: proposed taskfile
1827  *	@id: data buffer
1828  *
1829  *	Issue the identify taskfile and hand back the buffer containing
1830  *	identify data. For some RAID controllers and for pre ATA devices
1831  *	this function is wrapped or replaced by the driver
1832  */
1833 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1834 					struct ata_taskfile *tf, u16 *id)
1835 {
1836 	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1837 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1838 }
1839 
1840 /**
1841  *	ata_dev_read_id - Read ID data from the specified device
1842  *	@dev: target device
1843  *	@p_class: pointer to class of the target device (may be changed)
1844  *	@flags: ATA_READID_* flags
1845  *	@id: buffer to read IDENTIFY data into
1846  *
1847  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1848  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1849  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1850  *	for pre-ATA4 drives.
1851  *
1852  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1853  *	now we abort if we hit that case.
1854  *
1855  *	LOCKING:
1856  *	Kernel thread context (may sleep)
1857  *
1858  *	RETURNS:
1859  *	0 on success, -errno otherwise.
1860  */
1861 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1862 		    unsigned int flags, u16 *id)
1863 {
1864 	struct ata_port *ap = dev->link->ap;
1865 	unsigned int class = *p_class;
1866 	struct ata_taskfile tf;
1867 	unsigned int err_mask = 0;
1868 	const char *reason;
1869 	bool is_semb = class == ATA_DEV_SEMB;
1870 	int may_fallback = 1, tried_spinup = 0;
1871 	int rc;
1872 
1873 	if (ata_msg_ctl(ap))
1874 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1875 
1876 retry:
1877 	ata_tf_init(dev, &tf);
1878 
1879 	switch (class) {
1880 	case ATA_DEV_SEMB:
1881 		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1882 	case ATA_DEV_ATA:
1883 	case ATA_DEV_ZAC:
1884 		tf.command = ATA_CMD_ID_ATA;
1885 		break;
1886 	case ATA_DEV_ATAPI:
1887 		tf.command = ATA_CMD_ID_ATAPI;
1888 		break;
1889 	default:
1890 		rc = -ENODEV;
1891 		reason = "unsupported class";
1892 		goto err_out;
1893 	}
1894 
1895 	tf.protocol = ATA_PROT_PIO;
1896 
1897 	/* Some devices choke if TF registers contain garbage.  Make
1898 	 * sure those are properly initialized.
1899 	 */
1900 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1901 
1902 	/* Device presence detection is unreliable on some
1903 	 * controllers.  Always poll IDENTIFY if available.
1904 	 */
1905 	tf.flags |= ATA_TFLAG_POLLING;
1906 
1907 	if (ap->ops->read_id)
1908 		err_mask = ap->ops->read_id(dev, &tf, id);
1909 	else
1910 		err_mask = ata_do_dev_read_id(dev, &tf, id);
1911 
1912 	if (err_mask) {
1913 		if (err_mask & AC_ERR_NODEV_HINT) {
1914 			ata_dev_dbg(dev, "NODEV after polling detection\n");
1915 			return -ENOENT;
1916 		}
1917 
1918 		if (is_semb) {
1919 			ata_dev_info(dev,
1920 		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1921 			/* SEMB is not supported yet */
1922 			*p_class = ATA_DEV_SEMB_UNSUP;
1923 			return 0;
1924 		}
1925 
1926 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1927 			/* Device or controller might have reported
1928 			 * the wrong device class.  Give a shot at the
1929 			 * other IDENTIFY if the current one is
1930 			 * aborted by the device.
1931 			 */
1932 			if (may_fallback) {
1933 				may_fallback = 0;
1934 
1935 				if (class == ATA_DEV_ATA)
1936 					class = ATA_DEV_ATAPI;
1937 				else
1938 					class = ATA_DEV_ATA;
1939 				goto retry;
1940 			}
1941 
1942 			/* Control reaches here iff the device aborted
1943 			 * both flavors of IDENTIFYs which happens
1944 			 * sometimes with phantom devices.
1945 			 */
1946 			ata_dev_dbg(dev,
1947 				    "both IDENTIFYs aborted, assuming NODEV\n");
1948 			return -ENOENT;
1949 		}
1950 
1951 		rc = -EIO;
1952 		reason = "I/O error";
1953 		goto err_out;
1954 	}
1955 
1956 	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1957 		ata_dev_dbg(dev, "dumping IDENTIFY data, "
1958 			    "class=%d may_fallback=%d tried_spinup=%d\n",
1959 			    class, may_fallback, tried_spinup);
1960 		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1961 			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1962 	}
1963 
1964 	/* Falling back doesn't make sense if ID data was read
1965 	 * successfully at least once.
1966 	 */
1967 	may_fallback = 0;
1968 
1969 	swap_buf_le16(id, ATA_ID_WORDS);
1970 
1971 	/* sanity check */
1972 	rc = -EINVAL;
1973 	reason = "device reports invalid type";
1974 
1975 	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1976 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1977 			goto err_out;
1978 		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1979 							ata_id_is_ata(id)) {
1980 			ata_dev_dbg(dev,
1981 				"host indicates ignore ATA devices, ignored\n");
1982 			return -ENOENT;
1983 		}
1984 	} else {
1985 		if (ata_id_is_ata(id))
1986 			goto err_out;
1987 	}
1988 
1989 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1990 		tried_spinup = 1;
1991 		/*
1992 		 * Drive powered-up in standby mode, and requires a specific
1993 		 * SET_FEATURES spin-up subcommand before it will accept
1994 		 * anything other than the original IDENTIFY command.
1995 		 */
1996 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1997 		if (err_mask && id[2] != 0x738c) {
1998 			rc = -EIO;
1999 			reason = "SPINUP failed";
2000 			goto err_out;
2001 		}
2002 		/*
2003 		 * If the drive initially returned incomplete IDENTIFY info,
2004 		 * we now must reissue the IDENTIFY command.
2005 		 */
2006 		if (id[2] == 0x37c8)
2007 			goto retry;
2008 	}
2009 
2010 	if ((flags & ATA_READID_POSTRESET) &&
2011 	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2012 		/*
2013 		 * The exact sequence expected by certain pre-ATA4 drives is:
2014 		 * SRST RESET
2015 		 * IDENTIFY (optional in early ATA)
2016 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2017 		 * anything else..
2018 		 * Some drives were very specific about that exact sequence.
2019 		 *
2020 		 * Note that ATA4 says lba is mandatory so the second check
2021 		 * should never trigger.
2022 		 */
2023 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2024 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2025 			if (err_mask) {
2026 				rc = -EIO;
2027 				reason = "INIT_DEV_PARAMS failed";
2028 				goto err_out;
2029 			}
2030 
2031 			/* current CHS translation info (id[53-58]) might be
2032 			 * changed. reread the identify device info.
2033 			 */
2034 			flags &= ~ATA_READID_POSTRESET;
2035 			goto retry;
2036 		}
2037 	}
2038 
2039 	*p_class = class;
2040 
2041 	return 0;
2042 
2043  err_out:
2044 	if (ata_msg_warn(ap))
2045 		ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2046 			     reason, err_mask);
2047 	return rc;
2048 }
2049 
2050 /**
2051  *	ata_read_log_page - read a specific log page
2052  *	@dev: target device
2053  *	@log: log to read
2054  *	@page: page to read
2055  *	@buf: buffer to store read page
2056  *	@sectors: number of sectors to read
2057  *
2058  *	Read log page using READ_LOG_EXT command.
2059  *
2060  *	LOCKING:
2061  *	Kernel thread context (may sleep).
2062  *
2063  *	RETURNS:
2064  *	0 on success, AC_ERR_* mask otherwise.
2065  */
2066 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2067 			       u8 page, void *buf, unsigned int sectors)
2068 {
2069 	unsigned long ap_flags = dev->link->ap->flags;
2070 	struct ata_taskfile tf;
2071 	unsigned int err_mask;
2072 	bool dma = false;
2073 
2074 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2075 
2076 	/*
2077 	 * Return error without actually issuing the command on controllers
2078 	 * which e.g. lockup on a read log page.
2079 	 */
2080 	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2081 		return AC_ERR_DEV;
2082 
2083 retry:
2084 	ata_tf_init(dev, &tf);
2085 	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2086 	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2087 		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2088 		tf.protocol = ATA_PROT_DMA;
2089 		dma = true;
2090 	} else {
2091 		tf.command = ATA_CMD_READ_LOG_EXT;
2092 		tf.protocol = ATA_PROT_PIO;
2093 		dma = false;
2094 	}
2095 	tf.lbal = log;
2096 	tf.lbam = page;
2097 	tf.nsect = sectors;
2098 	tf.hob_nsect = sectors >> 8;
2099 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2100 
2101 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2102 				     buf, sectors * ATA_SECT_SIZE, 0);
2103 
2104 	if (err_mask && dma) {
2105 		dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2106 		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2107 		goto retry;
2108 	}
2109 
2110 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
2111 	return err_mask;
2112 }
2113 
2114 static bool ata_log_supported(struct ata_device *dev, u8 log)
2115 {
2116 	struct ata_port *ap = dev->link->ap;
2117 
2118 	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2119 		return false;
2120 	return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2121 }
2122 
2123 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2124 {
2125 	struct ata_port *ap = dev->link->ap;
2126 	unsigned int err, i;
2127 
2128 	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2129 		ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2130 		return false;
2131 	}
2132 
2133 	/*
2134 	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2135 	 * supported.
2136 	 */
2137 	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2138 				1);
2139 	if (err) {
2140 		ata_dev_info(dev,
2141 			     "failed to get Device Identify Log Emask 0x%x\n",
2142 			     err);
2143 		return false;
2144 	}
2145 
2146 	for (i = 0; i < ap->sector_buf[8]; i++) {
2147 		if (ap->sector_buf[9 + i] == page)
2148 			return true;
2149 	}
2150 
2151 	return false;
2152 }
2153 
2154 static int ata_do_link_spd_horkage(struct ata_device *dev)
2155 {
2156 	struct ata_link *plink = ata_dev_phys_link(dev);
2157 	u32 target, target_limit;
2158 
2159 	if (!sata_scr_valid(plink))
2160 		return 0;
2161 
2162 	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2163 		target = 1;
2164 	else
2165 		return 0;
2166 
2167 	target_limit = (1 << target) - 1;
2168 
2169 	/* if already on stricter limit, no need to push further */
2170 	if (plink->sata_spd_limit <= target_limit)
2171 		return 0;
2172 
2173 	plink->sata_spd_limit = target_limit;
2174 
2175 	/* Request another EH round by returning -EAGAIN if link is
2176 	 * going faster than the target speed.  Forward progress is
2177 	 * guaranteed by setting sata_spd_limit to target_limit above.
2178 	 */
2179 	if (plink->sata_spd > target) {
2180 		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2181 			     sata_spd_string(target));
2182 		return -EAGAIN;
2183 	}
2184 	return 0;
2185 }
2186 
2187 static inline u8 ata_dev_knobble(struct ata_device *dev)
2188 {
2189 	struct ata_port *ap = dev->link->ap;
2190 
2191 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2192 		return 0;
2193 
2194 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2195 }
2196 
2197 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2198 {
2199 	struct ata_port *ap = dev->link->ap;
2200 	unsigned int err_mask;
2201 
2202 	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2203 		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2204 		return;
2205 	}
2206 	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2207 				     0, ap->sector_buf, 1);
2208 	if (err_mask) {
2209 		ata_dev_dbg(dev,
2210 			    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2211 			    err_mask);
2212 	} else {
2213 		u8 *cmds = dev->ncq_send_recv_cmds;
2214 
2215 		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2216 		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2217 
2218 		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2219 			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2220 			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2221 				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2222 		}
2223 	}
2224 }
2225 
2226 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2227 {
2228 	struct ata_port *ap = dev->link->ap;
2229 	unsigned int err_mask;
2230 
2231 	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2232 		ata_dev_warn(dev,
2233 			     "NCQ Send/Recv Log not supported\n");
2234 		return;
2235 	}
2236 	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2237 				     0, ap->sector_buf, 1);
2238 	if (err_mask) {
2239 		ata_dev_dbg(dev,
2240 			    "failed to get NCQ Non-Data Log Emask 0x%x\n",
2241 			    err_mask);
2242 	} else {
2243 		u8 *cmds = dev->ncq_non_data_cmds;
2244 
2245 		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2246 	}
2247 }
2248 
2249 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2250 {
2251 	struct ata_port *ap = dev->link->ap;
2252 	unsigned int err_mask;
2253 
2254 	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2255 		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2256 		return;
2257 	}
2258 
2259 	err_mask = ata_read_log_page(dev,
2260 				     ATA_LOG_IDENTIFY_DEVICE,
2261 				     ATA_LOG_SATA_SETTINGS,
2262 				     ap->sector_buf,
2263 				     1);
2264 	if (err_mask) {
2265 		ata_dev_dbg(dev,
2266 			    "failed to get Identify Device data, Emask 0x%x\n",
2267 			    err_mask);
2268 		return;
2269 	}
2270 
2271 	if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2272 		dev->flags |= ATA_DFLAG_NCQ_PRIO;
2273 	} else {
2274 		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2275 		ata_dev_dbg(dev, "SATA page does not support priority\n");
2276 	}
2277 
2278 }
2279 
2280 static int ata_dev_config_ncq(struct ata_device *dev,
2281 			       char *desc, size_t desc_sz)
2282 {
2283 	struct ata_port *ap = dev->link->ap;
2284 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2285 	unsigned int err_mask;
2286 	char *aa_desc = "";
2287 
2288 	if (!ata_id_has_ncq(dev->id)) {
2289 		desc[0] = '\0';
2290 		return 0;
2291 	}
2292 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2293 		snprintf(desc, desc_sz, "NCQ (not used)");
2294 		return 0;
2295 	}
2296 	if (ap->flags & ATA_FLAG_NCQ) {
2297 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2298 		dev->flags |= ATA_DFLAG_NCQ;
2299 	}
2300 
2301 	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2302 		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2303 		ata_id_has_fpdma_aa(dev->id)) {
2304 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2305 			SATA_FPDMA_AA);
2306 		if (err_mask) {
2307 			ata_dev_err(dev,
2308 				    "failed to enable AA (error_mask=0x%x)\n",
2309 				    err_mask);
2310 			if (err_mask != AC_ERR_DEV) {
2311 				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2312 				return -EIO;
2313 			}
2314 		} else
2315 			aa_desc = ", AA";
2316 	}
2317 
2318 	if (hdepth >= ddepth)
2319 		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2320 	else
2321 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2322 			ddepth, aa_desc);
2323 
2324 	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2325 		if (ata_id_has_ncq_send_and_recv(dev->id))
2326 			ata_dev_config_ncq_send_recv(dev);
2327 		if (ata_id_has_ncq_non_data(dev->id))
2328 			ata_dev_config_ncq_non_data(dev);
2329 		if (ata_id_has_ncq_prio(dev->id))
2330 			ata_dev_config_ncq_prio(dev);
2331 	}
2332 
2333 	return 0;
2334 }
2335 
2336 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2337 {
2338 	unsigned int err_mask;
2339 
2340 	if (!ata_id_has_sense_reporting(dev->id))
2341 		return;
2342 
2343 	if (ata_id_sense_reporting_enabled(dev->id))
2344 		return;
2345 
2346 	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2347 	if (err_mask) {
2348 		ata_dev_dbg(dev,
2349 			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2350 			    err_mask);
2351 	}
2352 }
2353 
2354 static void ata_dev_config_zac(struct ata_device *dev)
2355 {
2356 	struct ata_port *ap = dev->link->ap;
2357 	unsigned int err_mask;
2358 	u8 *identify_buf = ap->sector_buf;
2359 
2360 	dev->zac_zones_optimal_open = U32_MAX;
2361 	dev->zac_zones_optimal_nonseq = U32_MAX;
2362 	dev->zac_zones_max_open = U32_MAX;
2363 
2364 	/*
2365 	 * Always set the 'ZAC' flag for Host-managed devices.
2366 	 */
2367 	if (dev->class == ATA_DEV_ZAC)
2368 		dev->flags |= ATA_DFLAG_ZAC;
2369 	else if (ata_id_zoned_cap(dev->id) == 0x01)
2370 		/*
2371 		 * Check for host-aware devices.
2372 		 */
2373 		dev->flags |= ATA_DFLAG_ZAC;
2374 
2375 	if (!(dev->flags & ATA_DFLAG_ZAC))
2376 		return;
2377 
2378 	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2379 		ata_dev_warn(dev,
2380 			     "ATA Zoned Information Log not supported\n");
2381 		return;
2382 	}
2383 
2384 	/*
2385 	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2386 	 */
2387 	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2388 				     ATA_LOG_ZONED_INFORMATION,
2389 				     identify_buf, 1);
2390 	if (!err_mask) {
2391 		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2392 
2393 		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2394 		if ((zoned_cap >> 63))
2395 			dev->zac_zoned_cap = (zoned_cap & 1);
2396 		opt_open = get_unaligned_le64(&identify_buf[24]);
2397 		if ((opt_open >> 63))
2398 			dev->zac_zones_optimal_open = (u32)opt_open;
2399 		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2400 		if ((opt_nonseq >> 63))
2401 			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2402 		max_open = get_unaligned_le64(&identify_buf[40]);
2403 		if ((max_open >> 63))
2404 			dev->zac_zones_max_open = (u32)max_open;
2405 	}
2406 }
2407 
2408 static void ata_dev_config_trusted(struct ata_device *dev)
2409 {
2410 	struct ata_port *ap = dev->link->ap;
2411 	u64 trusted_cap;
2412 	unsigned int err;
2413 
2414 	if (!ata_id_has_trusted(dev->id))
2415 		return;
2416 
2417 	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2418 		ata_dev_warn(dev,
2419 			     "Security Log not supported\n");
2420 		return;
2421 	}
2422 
2423 	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2424 			ap->sector_buf, 1);
2425 	if (err) {
2426 		ata_dev_dbg(dev,
2427 			    "failed to read Security Log, Emask 0x%x\n", err);
2428 		return;
2429 	}
2430 
2431 	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2432 	if (!(trusted_cap & (1ULL << 63))) {
2433 		ata_dev_dbg(dev,
2434 			    "Trusted Computing capability qword not valid!\n");
2435 		return;
2436 	}
2437 
2438 	if (trusted_cap & (1 << 0))
2439 		dev->flags |= ATA_DFLAG_TRUSTED;
2440 }
2441 
2442 /**
2443  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2444  *	@dev: Target device to configure
2445  *
2446  *	Configure @dev according to @dev->id.  Generic and low-level
2447  *	driver specific fixups are also applied.
2448  *
2449  *	LOCKING:
2450  *	Kernel thread context (may sleep)
2451  *
2452  *	RETURNS:
2453  *	0 on success, -errno otherwise
2454  */
2455 int ata_dev_configure(struct ata_device *dev)
2456 {
2457 	struct ata_port *ap = dev->link->ap;
2458 	struct ata_eh_context *ehc = &dev->link->eh_context;
2459 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2460 	const u16 *id = dev->id;
2461 	unsigned long xfer_mask;
2462 	unsigned int err_mask;
2463 	char revbuf[7];		/* XYZ-99\0 */
2464 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2465 	char modelbuf[ATA_ID_PROD_LEN+1];
2466 	int rc;
2467 
2468 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2469 		ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2470 		return 0;
2471 	}
2472 
2473 	if (ata_msg_probe(ap))
2474 		ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2475 
2476 	/* set horkage */
2477 	dev->horkage |= ata_dev_blacklisted(dev);
2478 	ata_force_horkage(dev);
2479 
2480 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2481 		ata_dev_info(dev, "unsupported device, disabling\n");
2482 		ata_dev_disable(dev);
2483 		return 0;
2484 	}
2485 
2486 	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2487 	    dev->class == ATA_DEV_ATAPI) {
2488 		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2489 			     atapi_enabled ? "not supported with this driver"
2490 			     : "disabled");
2491 		ata_dev_disable(dev);
2492 		return 0;
2493 	}
2494 
2495 	rc = ata_do_link_spd_horkage(dev);
2496 	if (rc)
2497 		return rc;
2498 
2499 	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2500 	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2501 	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2502 		dev->horkage |= ATA_HORKAGE_NOLPM;
2503 
2504 	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2505 		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2506 		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2507 	}
2508 
2509 	/* let ACPI work its magic */
2510 	rc = ata_acpi_on_devcfg(dev);
2511 	if (rc)
2512 		return rc;
2513 
2514 	/* massage HPA, do it early as it might change IDENTIFY data */
2515 	rc = ata_hpa_resize(dev);
2516 	if (rc)
2517 		return rc;
2518 
2519 	/* print device capabilities */
2520 	if (ata_msg_probe(ap))
2521 		ata_dev_dbg(dev,
2522 			    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2523 			    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2524 			    __func__,
2525 			    id[49], id[82], id[83], id[84],
2526 			    id[85], id[86], id[87], id[88]);
2527 
2528 	/* initialize to-be-configured parameters */
2529 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2530 	dev->max_sectors = 0;
2531 	dev->cdb_len = 0;
2532 	dev->n_sectors = 0;
2533 	dev->cylinders = 0;
2534 	dev->heads = 0;
2535 	dev->sectors = 0;
2536 	dev->multi_count = 0;
2537 
2538 	/*
2539 	 * common ATA, ATAPI feature tests
2540 	 */
2541 
2542 	/* find max transfer mode; for printk only */
2543 	xfer_mask = ata_id_xfermask(id);
2544 
2545 	if (ata_msg_probe(ap))
2546 		ata_dump_id(id);
2547 
2548 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2549 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2550 			sizeof(fwrevbuf));
2551 
2552 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2553 			sizeof(modelbuf));
2554 
2555 	/* ATA-specific feature tests */
2556 	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2557 		if (ata_id_is_cfa(id)) {
2558 			/* CPRM may make this media unusable */
2559 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2560 				ata_dev_warn(dev,
2561 	"supports DRM functions and may not be fully accessible\n");
2562 			snprintf(revbuf, 7, "CFA");
2563 		} else {
2564 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2565 			/* Warn the user if the device has TPM extensions */
2566 			if (ata_id_has_tpm(id))
2567 				ata_dev_warn(dev,
2568 	"supports DRM functions and may not be fully accessible\n");
2569 		}
2570 
2571 		dev->n_sectors = ata_id_n_sectors(id);
2572 
2573 		/* get current R/W Multiple count setting */
2574 		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2575 			unsigned int max = dev->id[47] & 0xff;
2576 			unsigned int cnt = dev->id[59] & 0xff;
2577 			/* only recognize/allow powers of two here */
2578 			if (is_power_of_2(max) && is_power_of_2(cnt))
2579 				if (cnt <= max)
2580 					dev->multi_count = cnt;
2581 		}
2582 
2583 		if (ata_id_has_lba(id)) {
2584 			const char *lba_desc;
2585 			char ncq_desc[24];
2586 
2587 			lba_desc = "LBA";
2588 			dev->flags |= ATA_DFLAG_LBA;
2589 			if (ata_id_has_lba48(id)) {
2590 				dev->flags |= ATA_DFLAG_LBA48;
2591 				lba_desc = "LBA48";
2592 
2593 				if (dev->n_sectors >= (1UL << 28) &&
2594 				    ata_id_has_flush_ext(id))
2595 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2596 			}
2597 
2598 			/* config NCQ */
2599 			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2600 			if (rc)
2601 				return rc;
2602 
2603 			/* print device info to dmesg */
2604 			if (ata_msg_drv(ap) && print_info) {
2605 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2606 					     revbuf, modelbuf, fwrevbuf,
2607 					     ata_mode_string(xfer_mask));
2608 				ata_dev_info(dev,
2609 					     "%llu sectors, multi %u: %s %s\n",
2610 					(unsigned long long)dev->n_sectors,
2611 					dev->multi_count, lba_desc, ncq_desc);
2612 			}
2613 		} else {
2614 			/* CHS */
2615 
2616 			/* Default translation */
2617 			dev->cylinders	= id[1];
2618 			dev->heads	= id[3];
2619 			dev->sectors	= id[6];
2620 
2621 			if (ata_id_current_chs_valid(id)) {
2622 				/* Current CHS translation is valid. */
2623 				dev->cylinders = id[54];
2624 				dev->heads     = id[55];
2625 				dev->sectors   = id[56];
2626 			}
2627 
2628 			/* print device info to dmesg */
2629 			if (ata_msg_drv(ap) && print_info) {
2630 				ata_dev_info(dev, "%s: %s, %s, max %s\n",
2631 					     revbuf,	modelbuf, fwrevbuf,
2632 					     ata_mode_string(xfer_mask));
2633 				ata_dev_info(dev,
2634 					     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2635 					     (unsigned long long)dev->n_sectors,
2636 					     dev->multi_count, dev->cylinders,
2637 					     dev->heads, dev->sectors);
2638 			}
2639 		}
2640 
2641 		/* Check and mark DevSlp capability. Get DevSlp timing variables
2642 		 * from SATA Settings page of Identify Device Data Log.
2643 		 */
2644 		if (ata_id_has_devslp(dev->id)) {
2645 			u8 *sata_setting = ap->sector_buf;
2646 			int i, j;
2647 
2648 			dev->flags |= ATA_DFLAG_DEVSLP;
2649 			err_mask = ata_read_log_page(dev,
2650 						     ATA_LOG_IDENTIFY_DEVICE,
2651 						     ATA_LOG_SATA_SETTINGS,
2652 						     sata_setting,
2653 						     1);
2654 			if (err_mask)
2655 				ata_dev_dbg(dev,
2656 					    "failed to get Identify Device Data, Emask 0x%x\n",
2657 					    err_mask);
2658 			else
2659 				for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2660 					j = ATA_LOG_DEVSLP_OFFSET + i;
2661 					dev->devslp_timing[i] = sata_setting[j];
2662 				}
2663 		}
2664 		ata_dev_config_sense_reporting(dev);
2665 		ata_dev_config_zac(dev);
2666 		ata_dev_config_trusted(dev);
2667 		dev->cdb_len = 32;
2668 	}
2669 
2670 	/* ATAPI-specific feature tests */
2671 	else if (dev->class == ATA_DEV_ATAPI) {
2672 		const char *cdb_intr_string = "";
2673 		const char *atapi_an_string = "";
2674 		const char *dma_dir_string = "";
2675 		u32 sntf;
2676 
2677 		rc = atapi_cdb_len(id);
2678 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2679 			if (ata_msg_warn(ap))
2680 				ata_dev_warn(dev, "unsupported CDB len\n");
2681 			rc = -EINVAL;
2682 			goto err_out_nosup;
2683 		}
2684 		dev->cdb_len = (unsigned int) rc;
2685 
2686 		/* Enable ATAPI AN if both the host and device have
2687 		 * the support.  If PMP is attached, SNTF is required
2688 		 * to enable ATAPI AN to discern between PHY status
2689 		 * changed notifications and ATAPI ANs.
2690 		 */
2691 		if (atapi_an &&
2692 		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2693 		    (!sata_pmp_attached(ap) ||
2694 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2695 			/* issue SET feature command to turn this on */
2696 			err_mask = ata_dev_set_feature(dev,
2697 					SETFEATURES_SATA_ENABLE, SATA_AN);
2698 			if (err_mask)
2699 				ata_dev_err(dev,
2700 					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
2701 					    err_mask);
2702 			else {
2703 				dev->flags |= ATA_DFLAG_AN;
2704 				atapi_an_string = ", ATAPI AN";
2705 			}
2706 		}
2707 
2708 		if (ata_id_cdb_intr(dev->id)) {
2709 			dev->flags |= ATA_DFLAG_CDB_INTR;
2710 			cdb_intr_string = ", CDB intr";
2711 		}
2712 
2713 		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2714 			dev->flags |= ATA_DFLAG_DMADIR;
2715 			dma_dir_string = ", DMADIR";
2716 		}
2717 
2718 		if (ata_id_has_da(dev->id)) {
2719 			dev->flags |= ATA_DFLAG_DA;
2720 			zpodd_init(dev);
2721 		}
2722 
2723 		/* print device info to dmesg */
2724 		if (ata_msg_drv(ap) && print_info)
2725 			ata_dev_info(dev,
2726 				     "ATAPI: %s, %s, max %s%s%s%s\n",
2727 				     modelbuf, fwrevbuf,
2728 				     ata_mode_string(xfer_mask),
2729 				     cdb_intr_string, atapi_an_string,
2730 				     dma_dir_string);
2731 	}
2732 
2733 	/* determine max_sectors */
2734 	dev->max_sectors = ATA_MAX_SECTORS;
2735 	if (dev->flags & ATA_DFLAG_LBA48)
2736 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2737 
2738 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2739 	   200 sectors */
2740 	if (ata_dev_knobble(dev)) {
2741 		if (ata_msg_drv(ap) && print_info)
2742 			ata_dev_info(dev, "applying bridge limits\n");
2743 		dev->udma_mask &= ATA_UDMA5;
2744 		dev->max_sectors = ATA_MAX_SECTORS;
2745 	}
2746 
2747 	if ((dev->class == ATA_DEV_ATAPI) &&
2748 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2749 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2750 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2751 	}
2752 
2753 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2754 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2755 					 dev->max_sectors);
2756 
2757 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2758 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2759 					 dev->max_sectors);
2760 
2761 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2762 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2763 
2764 	if (ap->ops->dev_config)
2765 		ap->ops->dev_config(dev);
2766 
2767 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2768 		/* Let the user know. We don't want to disallow opens for
2769 		   rescue purposes, or in case the vendor is just a blithering
2770 		   idiot. Do this after the dev_config call as some controllers
2771 		   with buggy firmware may want to avoid reporting false device
2772 		   bugs */
2773 
2774 		if (print_info) {
2775 			ata_dev_warn(dev,
2776 "Drive reports diagnostics failure. This may indicate a drive\n");
2777 			ata_dev_warn(dev,
2778 "fault or invalid emulation. Contact drive vendor for information.\n");
2779 		}
2780 	}
2781 
2782 	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2783 		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2784 		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
2785 	}
2786 
2787 	return 0;
2788 
2789 err_out_nosup:
2790 	if (ata_msg_probe(ap))
2791 		ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2792 	return rc;
2793 }
2794 
2795 /**
2796  *	ata_cable_40wire	-	return 40 wire cable type
2797  *	@ap: port
2798  *
2799  *	Helper method for drivers which want to hardwire 40 wire cable
2800  *	detection.
2801  */
2802 
2803 int ata_cable_40wire(struct ata_port *ap)
2804 {
2805 	return ATA_CBL_PATA40;
2806 }
2807 
2808 /**
2809  *	ata_cable_80wire	-	return 80 wire cable type
2810  *	@ap: port
2811  *
2812  *	Helper method for drivers which want to hardwire 80 wire cable
2813  *	detection.
2814  */
2815 
2816 int ata_cable_80wire(struct ata_port *ap)
2817 {
2818 	return ATA_CBL_PATA80;
2819 }
2820 
2821 /**
2822  *	ata_cable_unknown	-	return unknown PATA cable.
2823  *	@ap: port
2824  *
2825  *	Helper method for drivers which have no PATA cable detection.
2826  */
2827 
2828 int ata_cable_unknown(struct ata_port *ap)
2829 {
2830 	return ATA_CBL_PATA_UNK;
2831 }
2832 
2833 /**
2834  *	ata_cable_ignore	-	return ignored PATA cable.
2835  *	@ap: port
2836  *
2837  *	Helper method for drivers which don't use cable type to limit
2838  *	transfer mode.
2839  */
2840 int ata_cable_ignore(struct ata_port *ap)
2841 {
2842 	return ATA_CBL_PATA_IGN;
2843 }
2844 
2845 /**
2846  *	ata_cable_sata	-	return SATA cable type
2847  *	@ap: port
2848  *
2849  *	Helper method for drivers which have SATA cables
2850  */
2851 
2852 int ata_cable_sata(struct ata_port *ap)
2853 {
2854 	return ATA_CBL_SATA;
2855 }
2856 
2857 /**
2858  *	ata_bus_probe - Reset and probe ATA bus
2859  *	@ap: Bus to probe
2860  *
2861  *	Master ATA bus probing function.  Initiates a hardware-dependent
2862  *	bus reset, then attempts to identify any devices found on
2863  *	the bus.
2864  *
2865  *	LOCKING:
2866  *	PCI/etc. bus probe sem.
2867  *
2868  *	RETURNS:
2869  *	Zero on success, negative errno otherwise.
2870  */
2871 
2872 int ata_bus_probe(struct ata_port *ap)
2873 {
2874 	unsigned int classes[ATA_MAX_DEVICES];
2875 	int tries[ATA_MAX_DEVICES];
2876 	int rc;
2877 	struct ata_device *dev;
2878 
2879 	ata_for_each_dev(dev, &ap->link, ALL)
2880 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2881 
2882  retry:
2883 	ata_for_each_dev(dev, &ap->link, ALL) {
2884 		/* If we issue an SRST then an ATA drive (not ATAPI)
2885 		 * may change configuration and be in PIO0 timing. If
2886 		 * we do a hard reset (or are coming from power on)
2887 		 * this is true for ATA or ATAPI. Until we've set a
2888 		 * suitable controller mode we should not touch the
2889 		 * bus as we may be talking too fast.
2890 		 */
2891 		dev->pio_mode = XFER_PIO_0;
2892 		dev->dma_mode = 0xff;
2893 
2894 		/* If the controller has a pio mode setup function
2895 		 * then use it to set the chipset to rights. Don't
2896 		 * touch the DMA setup as that will be dealt with when
2897 		 * configuring devices.
2898 		 */
2899 		if (ap->ops->set_piomode)
2900 			ap->ops->set_piomode(ap, dev);
2901 	}
2902 
2903 	/* reset and determine device classes */
2904 	ap->ops->phy_reset(ap);
2905 
2906 	ata_for_each_dev(dev, &ap->link, ALL) {
2907 		if (dev->class != ATA_DEV_UNKNOWN)
2908 			classes[dev->devno] = dev->class;
2909 		else
2910 			classes[dev->devno] = ATA_DEV_NONE;
2911 
2912 		dev->class = ATA_DEV_UNKNOWN;
2913 	}
2914 
2915 	/* read IDENTIFY page and configure devices. We have to do the identify
2916 	   specific sequence bass-ackwards so that PDIAG- is released by
2917 	   the slave device */
2918 
2919 	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2920 		if (tries[dev->devno])
2921 			dev->class = classes[dev->devno];
2922 
2923 		if (!ata_dev_enabled(dev))
2924 			continue;
2925 
2926 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2927 				     dev->id);
2928 		if (rc)
2929 			goto fail;
2930 	}
2931 
2932 	/* Now ask for the cable type as PDIAG- should have been released */
2933 	if (ap->ops->cable_detect)
2934 		ap->cbl = ap->ops->cable_detect(ap);
2935 
2936 	/* We may have SATA bridge glue hiding here irrespective of
2937 	 * the reported cable types and sensed types.  When SATA
2938 	 * drives indicate we have a bridge, we don't know which end
2939 	 * of the link the bridge is which is a problem.
2940 	 */
2941 	ata_for_each_dev(dev, &ap->link, ENABLED)
2942 		if (ata_id_is_sata(dev->id))
2943 			ap->cbl = ATA_CBL_SATA;
2944 
2945 	/* After the identify sequence we can now set up the devices. We do
2946 	   this in the normal order so that the user doesn't get confused */
2947 
2948 	ata_for_each_dev(dev, &ap->link, ENABLED) {
2949 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2950 		rc = ata_dev_configure(dev);
2951 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2952 		if (rc)
2953 			goto fail;
2954 	}
2955 
2956 	/* configure transfer mode */
2957 	rc = ata_set_mode(&ap->link, &dev);
2958 	if (rc)
2959 		goto fail;
2960 
2961 	ata_for_each_dev(dev, &ap->link, ENABLED)
2962 		return 0;
2963 
2964 	return -ENODEV;
2965 
2966  fail:
2967 	tries[dev->devno]--;
2968 
2969 	switch (rc) {
2970 	case -EINVAL:
2971 		/* eeek, something went very wrong, give up */
2972 		tries[dev->devno] = 0;
2973 		break;
2974 
2975 	case -ENODEV:
2976 		/* give it just one more chance */
2977 		tries[dev->devno] = min(tries[dev->devno], 1);
2978 	case -EIO:
2979 		if (tries[dev->devno] == 1) {
2980 			/* This is the last chance, better to slow
2981 			 * down than lose it.
2982 			 */
2983 			sata_down_spd_limit(&ap->link, 0);
2984 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2985 		}
2986 	}
2987 
2988 	if (!tries[dev->devno])
2989 		ata_dev_disable(dev);
2990 
2991 	goto retry;
2992 }
2993 
2994 /**
2995  *	sata_print_link_status - Print SATA link status
2996  *	@link: SATA link to printk link status about
2997  *
2998  *	This function prints link speed and status of a SATA link.
2999  *
3000  *	LOCKING:
3001  *	None.
3002  */
3003 static void sata_print_link_status(struct ata_link *link)
3004 {
3005 	u32 sstatus, scontrol, tmp;
3006 
3007 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
3008 		return;
3009 	sata_scr_read(link, SCR_CONTROL, &scontrol);
3010 
3011 	if (ata_phys_link_online(link)) {
3012 		tmp = (sstatus >> 4) & 0xf;
3013 		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3014 			      sata_spd_string(tmp), sstatus, scontrol);
3015 	} else {
3016 		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3017 			      sstatus, scontrol);
3018 	}
3019 }
3020 
3021 /**
3022  *	ata_dev_pair		-	return other device on cable
3023  *	@adev: device
3024  *
3025  *	Obtain the other device on the same cable, or if none is
3026  *	present NULL is returned
3027  */
3028 
3029 struct ata_device *ata_dev_pair(struct ata_device *adev)
3030 {
3031 	struct ata_link *link = adev->link;
3032 	struct ata_device *pair = &link->device[1 - adev->devno];
3033 	if (!ata_dev_enabled(pair))
3034 		return NULL;
3035 	return pair;
3036 }
3037 
3038 /**
3039  *	sata_down_spd_limit - adjust SATA spd limit downward
3040  *	@link: Link to adjust SATA spd limit for
3041  *	@spd_limit: Additional limit
3042  *
3043  *	Adjust SATA spd limit of @link downward.  Note that this
3044  *	function only adjusts the limit.  The change must be applied
3045  *	using sata_set_spd().
3046  *
3047  *	If @spd_limit is non-zero, the speed is limited to equal to or
3048  *	lower than @spd_limit if such speed is supported.  If
3049  *	@spd_limit is slower than any supported speed, only the lowest
3050  *	supported speed is allowed.
3051  *
3052  *	LOCKING:
3053  *	Inherited from caller.
3054  *
3055  *	RETURNS:
3056  *	0 on success, negative errno on failure
3057  */
3058 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3059 {
3060 	u32 sstatus, spd, mask;
3061 	int rc, bit;
3062 
3063 	if (!sata_scr_valid(link))
3064 		return -EOPNOTSUPP;
3065 
3066 	/* If SCR can be read, use it to determine the current SPD.
3067 	 * If not, use cached value in link->sata_spd.
3068 	 */
3069 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3070 	if (rc == 0 && ata_sstatus_online(sstatus))
3071 		spd = (sstatus >> 4) & 0xf;
3072 	else
3073 		spd = link->sata_spd;
3074 
3075 	mask = link->sata_spd_limit;
3076 	if (mask <= 1)
3077 		return -EINVAL;
3078 
3079 	/* unconditionally mask off the highest bit */
3080 	bit = fls(mask) - 1;
3081 	mask &= ~(1 << bit);
3082 
3083 	/* Mask off all speeds higher than or equal to the current
3084 	 * one.  Force 1.5Gbps if current SPD is not available.
3085 	 */
3086 	if (spd > 1)
3087 		mask &= (1 << (spd - 1)) - 1;
3088 	else
3089 		mask &= 1;
3090 
3091 	/* were we already at the bottom? */
3092 	if (!mask)
3093 		return -EINVAL;
3094 
3095 	if (spd_limit) {
3096 		if (mask & ((1 << spd_limit) - 1))
3097 			mask &= (1 << spd_limit) - 1;
3098 		else {
3099 			bit = ffs(mask) - 1;
3100 			mask = 1 << bit;
3101 		}
3102 	}
3103 
3104 	link->sata_spd_limit = mask;
3105 
3106 	ata_link_warn(link, "limiting SATA link speed to %s\n",
3107 		      sata_spd_string(fls(mask)));
3108 
3109 	return 0;
3110 }
3111 
3112 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3113 {
3114 	struct ata_link *host_link = &link->ap->link;
3115 	u32 limit, target, spd;
3116 
3117 	limit = link->sata_spd_limit;
3118 
3119 	/* Don't configure downstream link faster than upstream link.
3120 	 * It doesn't speed up anything and some PMPs choke on such
3121 	 * configuration.
3122 	 */
3123 	if (!ata_is_host_link(link) && host_link->sata_spd)
3124 		limit &= (1 << host_link->sata_spd) - 1;
3125 
3126 	if (limit == UINT_MAX)
3127 		target = 0;
3128 	else
3129 		target = fls(limit);
3130 
3131 	spd = (*scontrol >> 4) & 0xf;
3132 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3133 
3134 	return spd != target;
3135 }
3136 
3137 /**
3138  *	sata_set_spd_needed - is SATA spd configuration needed
3139  *	@link: Link in question
3140  *
3141  *	Test whether the spd limit in SControl matches
3142  *	@link->sata_spd_limit.  This function is used to determine
3143  *	whether hardreset is necessary to apply SATA spd
3144  *	configuration.
3145  *
3146  *	LOCKING:
3147  *	Inherited from caller.
3148  *
3149  *	RETURNS:
3150  *	1 if SATA spd configuration is needed, 0 otherwise.
3151  */
3152 static int sata_set_spd_needed(struct ata_link *link)
3153 {
3154 	u32 scontrol;
3155 
3156 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3157 		return 1;
3158 
3159 	return __sata_set_spd_needed(link, &scontrol);
3160 }
3161 
3162 /**
3163  *	sata_set_spd - set SATA spd according to spd limit
3164  *	@link: Link to set SATA spd for
3165  *
3166  *	Set SATA spd of @link according to sata_spd_limit.
3167  *
3168  *	LOCKING:
3169  *	Inherited from caller.
3170  *
3171  *	RETURNS:
3172  *	0 if spd doesn't need to be changed, 1 if spd has been
3173  *	changed.  Negative errno if SCR registers are inaccessible.
3174  */
3175 int sata_set_spd(struct ata_link *link)
3176 {
3177 	u32 scontrol;
3178 	int rc;
3179 
3180 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3181 		return rc;
3182 
3183 	if (!__sata_set_spd_needed(link, &scontrol))
3184 		return 0;
3185 
3186 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3187 		return rc;
3188 
3189 	return 1;
3190 }
3191 
3192 /*
3193  * This mode timing computation functionality is ported over from
3194  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3195  */
3196 /*
3197  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3198  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3199  * for UDMA6, which is currently supported only by Maxtor drives.
3200  *
3201  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3202  */
3203 
3204 static const struct ata_timing ata_timing[] = {
3205 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
3206 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
3207 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
3208 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
3209 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
3210 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
3211 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
3212 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
3213 
3214 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
3215 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
3216 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
3217 
3218 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
3219 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
3220 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
3221 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
3222 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
3223 
3224 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
3225 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
3226 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
3227 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
3228 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
3229 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
3230 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
3231 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
3232 
3233 	{ 0xFF }
3234 };
3235 
3236 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3237 #define EZ(v, unit)		((v)?ENOUGH(((v) * 1000), unit):0)
3238 
3239 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3240 {
3241 	q->setup	= EZ(t->setup,       T);
3242 	q->act8b	= EZ(t->act8b,       T);
3243 	q->rec8b	= EZ(t->rec8b,       T);
3244 	q->cyc8b	= EZ(t->cyc8b,       T);
3245 	q->active	= EZ(t->active,      T);
3246 	q->recover	= EZ(t->recover,     T);
3247 	q->dmack_hold	= EZ(t->dmack_hold,  T);
3248 	q->cycle	= EZ(t->cycle,       T);
3249 	q->udma		= EZ(t->udma,       UT);
3250 }
3251 
3252 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3253 		      struct ata_timing *m, unsigned int what)
3254 {
3255 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3256 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3257 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3258 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3259 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3260 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3261 	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3262 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3263 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3264 }
3265 
3266 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3267 {
3268 	const struct ata_timing *t = ata_timing;
3269 
3270 	while (xfer_mode > t->mode)
3271 		t++;
3272 
3273 	if (xfer_mode == t->mode)
3274 		return t;
3275 
3276 	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3277 			__func__, xfer_mode);
3278 
3279 	return NULL;
3280 }
3281 
3282 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3283 		       struct ata_timing *t, int T, int UT)
3284 {
3285 	const u16 *id = adev->id;
3286 	const struct ata_timing *s;
3287 	struct ata_timing p;
3288 
3289 	/*
3290 	 * Find the mode.
3291 	 */
3292 
3293 	if (!(s = ata_timing_find_mode(speed)))
3294 		return -EINVAL;
3295 
3296 	memcpy(t, s, sizeof(*s));
3297 
3298 	/*
3299 	 * If the drive is an EIDE drive, it can tell us it needs extended
3300 	 * PIO/MW_DMA cycle timing.
3301 	 */
3302 
3303 	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3304 		memset(&p, 0, sizeof(p));
3305 
3306 		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3307 			if (speed <= XFER_PIO_2)
3308 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3309 			else if ((speed <= XFER_PIO_4) ||
3310 				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3311 				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3312 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3313 			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3314 
3315 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3316 	}
3317 
3318 	/*
3319 	 * Convert the timing to bus clock counts.
3320 	 */
3321 
3322 	ata_timing_quantize(t, t, T, UT);
3323 
3324 	/*
3325 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3326 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3327 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3328 	 */
3329 
3330 	if (speed > XFER_PIO_6) {
3331 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3332 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3333 	}
3334 
3335 	/*
3336 	 * Lengthen active & recovery time so that cycle time is correct.
3337 	 */
3338 
3339 	if (t->act8b + t->rec8b < t->cyc8b) {
3340 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3341 		t->rec8b = t->cyc8b - t->act8b;
3342 	}
3343 
3344 	if (t->active + t->recover < t->cycle) {
3345 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3346 		t->recover = t->cycle - t->active;
3347 	}
3348 
3349 	/* In a few cases quantisation may produce enough errors to
3350 	   leave t->cycle too low for the sum of active and recovery
3351 	   if so we must correct this */
3352 	if (t->active + t->recover > t->cycle)
3353 		t->cycle = t->active + t->recover;
3354 
3355 	return 0;
3356 }
3357 
3358 /**
3359  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3360  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3361  *	@cycle: cycle duration in ns
3362  *
3363  *	Return matching xfer mode for @cycle.  The returned mode is of
3364  *	the transfer type specified by @xfer_shift.  If @cycle is too
3365  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3366  *	than the fastest known mode, the fasted mode is returned.
3367  *
3368  *	LOCKING:
3369  *	None.
3370  *
3371  *	RETURNS:
3372  *	Matching xfer_mode, 0xff if no match found.
3373  */
3374 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3375 {
3376 	u8 base_mode = 0xff, last_mode = 0xff;
3377 	const struct ata_xfer_ent *ent;
3378 	const struct ata_timing *t;
3379 
3380 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3381 		if (ent->shift == xfer_shift)
3382 			base_mode = ent->base;
3383 
3384 	for (t = ata_timing_find_mode(base_mode);
3385 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3386 		unsigned short this_cycle;
3387 
3388 		switch (xfer_shift) {
3389 		case ATA_SHIFT_PIO:
3390 		case ATA_SHIFT_MWDMA:
3391 			this_cycle = t->cycle;
3392 			break;
3393 		case ATA_SHIFT_UDMA:
3394 			this_cycle = t->udma;
3395 			break;
3396 		default:
3397 			return 0xff;
3398 		}
3399 
3400 		if (cycle > this_cycle)
3401 			break;
3402 
3403 		last_mode = t->mode;
3404 	}
3405 
3406 	return last_mode;
3407 }
3408 
3409 /**
3410  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3411  *	@dev: Device to adjust xfer masks
3412  *	@sel: ATA_DNXFER_* selector
3413  *
3414  *	Adjust xfer masks of @dev downward.  Note that this function
3415  *	does not apply the change.  Invoking ata_set_mode() afterwards
3416  *	will apply the limit.
3417  *
3418  *	LOCKING:
3419  *	Inherited from caller.
3420  *
3421  *	RETURNS:
3422  *	0 on success, negative errno on failure
3423  */
3424 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3425 {
3426 	char buf[32];
3427 	unsigned long orig_mask, xfer_mask;
3428 	unsigned long pio_mask, mwdma_mask, udma_mask;
3429 	int quiet, highbit;
3430 
3431 	quiet = !!(sel & ATA_DNXFER_QUIET);
3432 	sel &= ~ATA_DNXFER_QUIET;
3433 
3434 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3435 						  dev->mwdma_mask,
3436 						  dev->udma_mask);
3437 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3438 
3439 	switch (sel) {
3440 	case ATA_DNXFER_PIO:
3441 		highbit = fls(pio_mask) - 1;
3442 		pio_mask &= ~(1 << highbit);
3443 		break;
3444 
3445 	case ATA_DNXFER_DMA:
3446 		if (udma_mask) {
3447 			highbit = fls(udma_mask) - 1;
3448 			udma_mask &= ~(1 << highbit);
3449 			if (!udma_mask)
3450 				return -ENOENT;
3451 		} else if (mwdma_mask) {
3452 			highbit = fls(mwdma_mask) - 1;
3453 			mwdma_mask &= ~(1 << highbit);
3454 			if (!mwdma_mask)
3455 				return -ENOENT;
3456 		}
3457 		break;
3458 
3459 	case ATA_DNXFER_40C:
3460 		udma_mask &= ATA_UDMA_MASK_40C;
3461 		break;
3462 
3463 	case ATA_DNXFER_FORCE_PIO0:
3464 		pio_mask &= 1;
3465 	case ATA_DNXFER_FORCE_PIO:
3466 		mwdma_mask = 0;
3467 		udma_mask = 0;
3468 		break;
3469 
3470 	default:
3471 		BUG();
3472 	}
3473 
3474 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3475 
3476 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3477 		return -ENOENT;
3478 
3479 	if (!quiet) {
3480 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3481 			snprintf(buf, sizeof(buf), "%s:%s",
3482 				 ata_mode_string(xfer_mask),
3483 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3484 		else
3485 			snprintf(buf, sizeof(buf), "%s",
3486 				 ata_mode_string(xfer_mask));
3487 
3488 		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3489 	}
3490 
3491 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3492 			    &dev->udma_mask);
3493 
3494 	return 0;
3495 }
3496 
3497 static int ata_dev_set_mode(struct ata_device *dev)
3498 {
3499 	struct ata_port *ap = dev->link->ap;
3500 	struct ata_eh_context *ehc = &dev->link->eh_context;
3501 	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3502 	const char *dev_err_whine = "";
3503 	int ign_dev_err = 0;
3504 	unsigned int err_mask = 0;
3505 	int rc;
3506 
3507 	dev->flags &= ~ATA_DFLAG_PIO;
3508 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3509 		dev->flags |= ATA_DFLAG_PIO;
3510 
3511 	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3512 		dev_err_whine = " (SET_XFERMODE skipped)";
3513 	else {
3514 		if (nosetxfer)
3515 			ata_dev_warn(dev,
3516 				     "NOSETXFER but PATA detected - can't "
3517 				     "skip SETXFER, might malfunction\n");
3518 		err_mask = ata_dev_set_xfermode(dev);
3519 	}
3520 
3521 	if (err_mask & ~AC_ERR_DEV)
3522 		goto fail;
3523 
3524 	/* revalidate */
3525 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3526 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3527 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3528 	if (rc)
3529 		return rc;
3530 
3531 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3532 		/* Old CFA may refuse this command, which is just fine */
3533 		if (ata_id_is_cfa(dev->id))
3534 			ign_dev_err = 1;
3535 		/* Catch several broken garbage emulations plus some pre
3536 		   ATA devices */
3537 		if (ata_id_major_version(dev->id) == 0 &&
3538 					dev->pio_mode <= XFER_PIO_2)
3539 			ign_dev_err = 1;
3540 		/* Some very old devices and some bad newer ones fail
3541 		   any kind of SET_XFERMODE request but support PIO0-2
3542 		   timings and no IORDY */
3543 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3544 			ign_dev_err = 1;
3545 	}
3546 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3547 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3548 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3549 	    dev->dma_mode == XFER_MW_DMA_0 &&
3550 	    (dev->id[63] >> 8) & 1)
3551 		ign_dev_err = 1;
3552 
3553 	/* if the device is actually configured correctly, ignore dev err */
3554 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3555 		ign_dev_err = 1;
3556 
3557 	if (err_mask & AC_ERR_DEV) {
3558 		if (!ign_dev_err)
3559 			goto fail;
3560 		else
3561 			dev_err_whine = " (device error ignored)";
3562 	}
3563 
3564 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3565 		dev->xfer_shift, (int)dev->xfer_mode);
3566 
3567 	ata_dev_info(dev, "configured for %s%s\n",
3568 		     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3569 		     dev_err_whine);
3570 
3571 	return 0;
3572 
3573  fail:
3574 	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3575 	return -EIO;
3576 }
3577 
3578 /**
3579  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3580  *	@link: link on which timings will be programmed
3581  *	@r_failed_dev: out parameter for failed device
3582  *
3583  *	Standard implementation of the function used to tune and set
3584  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3585  *	ata_dev_set_mode() fails, pointer to the failing device is
3586  *	returned in @r_failed_dev.
3587  *
3588  *	LOCKING:
3589  *	PCI/etc. bus probe sem.
3590  *
3591  *	RETURNS:
3592  *	0 on success, negative errno otherwise
3593  */
3594 
3595 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3596 {
3597 	struct ata_port *ap = link->ap;
3598 	struct ata_device *dev;
3599 	int rc = 0, used_dma = 0, found = 0;
3600 
3601 	/* step 1: calculate xfer_mask */
3602 	ata_for_each_dev(dev, link, ENABLED) {
3603 		unsigned long pio_mask, dma_mask;
3604 		unsigned int mode_mask;
3605 
3606 		mode_mask = ATA_DMA_MASK_ATA;
3607 		if (dev->class == ATA_DEV_ATAPI)
3608 			mode_mask = ATA_DMA_MASK_ATAPI;
3609 		else if (ata_id_is_cfa(dev->id))
3610 			mode_mask = ATA_DMA_MASK_CFA;
3611 
3612 		ata_dev_xfermask(dev);
3613 		ata_force_xfermask(dev);
3614 
3615 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3616 
3617 		if (libata_dma_mask & mode_mask)
3618 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3619 						     dev->udma_mask);
3620 		else
3621 			dma_mask = 0;
3622 
3623 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3624 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3625 
3626 		found = 1;
3627 		if (ata_dma_enabled(dev))
3628 			used_dma = 1;
3629 	}
3630 	if (!found)
3631 		goto out;
3632 
3633 	/* step 2: always set host PIO timings */
3634 	ata_for_each_dev(dev, link, ENABLED) {
3635 		if (dev->pio_mode == 0xff) {
3636 			ata_dev_warn(dev, "no PIO support\n");
3637 			rc = -EINVAL;
3638 			goto out;
3639 		}
3640 
3641 		dev->xfer_mode = dev->pio_mode;
3642 		dev->xfer_shift = ATA_SHIFT_PIO;
3643 		if (ap->ops->set_piomode)
3644 			ap->ops->set_piomode(ap, dev);
3645 	}
3646 
3647 	/* step 3: set host DMA timings */
3648 	ata_for_each_dev(dev, link, ENABLED) {
3649 		if (!ata_dma_enabled(dev))
3650 			continue;
3651 
3652 		dev->xfer_mode = dev->dma_mode;
3653 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3654 		if (ap->ops->set_dmamode)
3655 			ap->ops->set_dmamode(ap, dev);
3656 	}
3657 
3658 	/* step 4: update devices' xfer mode */
3659 	ata_for_each_dev(dev, link, ENABLED) {
3660 		rc = ata_dev_set_mode(dev);
3661 		if (rc)
3662 			goto out;
3663 	}
3664 
3665 	/* Record simplex status. If we selected DMA then the other
3666 	 * host channels are not permitted to do so.
3667 	 */
3668 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3669 		ap->host->simplex_claimed = ap;
3670 
3671  out:
3672 	if (rc)
3673 		*r_failed_dev = dev;
3674 	return rc;
3675 }
3676 
3677 /**
3678  *	ata_wait_ready - wait for link to become ready
3679  *	@link: link to be waited on
3680  *	@deadline: deadline jiffies for the operation
3681  *	@check_ready: callback to check link readiness
3682  *
3683  *	Wait for @link to become ready.  @check_ready should return
3684  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3685  *	link doesn't seem to be occupied, other errno for other error
3686  *	conditions.
3687  *
3688  *	Transient -ENODEV conditions are allowed for
3689  *	ATA_TMOUT_FF_WAIT.
3690  *
3691  *	LOCKING:
3692  *	EH context.
3693  *
3694  *	RETURNS:
3695  *	0 if @link is ready before @deadline; otherwise, -errno.
3696  */
3697 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3698 		   int (*check_ready)(struct ata_link *link))
3699 {
3700 	unsigned long start = jiffies;
3701 	unsigned long nodev_deadline;
3702 	int warned = 0;
3703 
3704 	/* choose which 0xff timeout to use, read comment in libata.h */
3705 	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3706 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3707 	else
3708 		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3709 
3710 	/* Slave readiness can't be tested separately from master.  On
3711 	 * M/S emulation configuration, this function should be called
3712 	 * only on the master and it will handle both master and slave.
3713 	 */
3714 	WARN_ON(link == link->ap->slave_link);
3715 
3716 	if (time_after(nodev_deadline, deadline))
3717 		nodev_deadline = deadline;
3718 
3719 	while (1) {
3720 		unsigned long now = jiffies;
3721 		int ready, tmp;
3722 
3723 		ready = tmp = check_ready(link);
3724 		if (ready > 0)
3725 			return 0;
3726 
3727 		/*
3728 		 * -ENODEV could be transient.  Ignore -ENODEV if link
3729 		 * is online.  Also, some SATA devices take a long
3730 		 * time to clear 0xff after reset.  Wait for
3731 		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3732 		 * offline.
3733 		 *
3734 		 * Note that some PATA controllers (pata_ali) explode
3735 		 * if status register is read more than once when
3736 		 * there's no device attached.
3737 		 */
3738 		if (ready == -ENODEV) {
3739 			if (ata_link_online(link))
3740 				ready = 0;
3741 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3742 				 !ata_link_offline(link) &&
3743 				 time_before(now, nodev_deadline))
3744 				ready = 0;
3745 		}
3746 
3747 		if (ready)
3748 			return ready;
3749 		if (time_after(now, deadline))
3750 			return -EBUSY;
3751 
3752 		if (!warned && time_after(now, start + 5 * HZ) &&
3753 		    (deadline - now > 3 * HZ)) {
3754 			ata_link_warn(link,
3755 				"link is slow to respond, please be patient "
3756 				"(ready=%d)\n", tmp);
3757 			warned = 1;
3758 		}
3759 
3760 		ata_msleep(link->ap, 50);
3761 	}
3762 }
3763 
3764 /**
3765  *	ata_wait_after_reset - wait for link to become ready after reset
3766  *	@link: link to be waited on
3767  *	@deadline: deadline jiffies for the operation
3768  *	@check_ready: callback to check link readiness
3769  *
3770  *	Wait for @link to become ready after reset.
3771  *
3772  *	LOCKING:
3773  *	EH context.
3774  *
3775  *	RETURNS:
3776  *	0 if @link is ready before @deadline; otherwise, -errno.
3777  */
3778 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3779 				int (*check_ready)(struct ata_link *link))
3780 {
3781 	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3782 
3783 	return ata_wait_ready(link, deadline, check_ready);
3784 }
3785 
3786 /**
3787  *	sata_link_debounce - debounce SATA phy status
3788  *	@link: ATA link to debounce SATA phy status for
3789  *	@params: timing parameters { interval, duration, timeout } in msec
3790  *	@deadline: deadline jiffies for the operation
3791  *
3792  *	Make sure SStatus of @link reaches stable state, determined by
3793  *	holding the same value where DET is not 1 for @duration polled
3794  *	every @interval, before @timeout.  Timeout constraints the
3795  *	beginning of the stable state.  Because DET gets stuck at 1 on
3796  *	some controllers after hot unplugging, this functions waits
3797  *	until timeout then returns 0 if DET is stable at 1.
3798  *
3799  *	@timeout is further limited by @deadline.  The sooner of the
3800  *	two is used.
3801  *
3802  *	LOCKING:
3803  *	Kernel thread context (may sleep)
3804  *
3805  *	RETURNS:
3806  *	0 on success, -errno on failure.
3807  */
3808 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3809 		       unsigned long deadline)
3810 {
3811 	unsigned long interval = params[0];
3812 	unsigned long duration = params[1];
3813 	unsigned long last_jiffies, t;
3814 	u32 last, cur;
3815 	int rc;
3816 
3817 	t = ata_deadline(jiffies, params[2]);
3818 	if (time_before(t, deadline))
3819 		deadline = t;
3820 
3821 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3822 		return rc;
3823 	cur &= 0xf;
3824 
3825 	last = cur;
3826 	last_jiffies = jiffies;
3827 
3828 	while (1) {
3829 		ata_msleep(link->ap, interval);
3830 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3831 			return rc;
3832 		cur &= 0xf;
3833 
3834 		/* DET stable? */
3835 		if (cur == last) {
3836 			if (cur == 1 && time_before(jiffies, deadline))
3837 				continue;
3838 			if (time_after(jiffies,
3839 				       ata_deadline(last_jiffies, duration)))
3840 				return 0;
3841 			continue;
3842 		}
3843 
3844 		/* unstable, start over */
3845 		last = cur;
3846 		last_jiffies = jiffies;
3847 
3848 		/* Check deadline.  If debouncing failed, return
3849 		 * -EPIPE to tell upper layer to lower link speed.
3850 		 */
3851 		if (time_after(jiffies, deadline))
3852 			return -EPIPE;
3853 	}
3854 }
3855 
3856 /**
3857  *	sata_link_resume - resume SATA link
3858  *	@link: ATA link to resume SATA
3859  *	@params: timing parameters { interval, duration, timeout } in msec
3860  *	@deadline: deadline jiffies for the operation
3861  *
3862  *	Resume SATA phy @link and debounce it.
3863  *
3864  *	LOCKING:
3865  *	Kernel thread context (may sleep)
3866  *
3867  *	RETURNS:
3868  *	0 on success, -errno on failure.
3869  */
3870 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3871 		     unsigned long deadline)
3872 {
3873 	int tries = ATA_LINK_RESUME_TRIES;
3874 	u32 scontrol, serror;
3875 	int rc;
3876 
3877 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3878 		return rc;
3879 
3880 	/*
3881 	 * Writes to SControl sometimes get ignored under certain
3882 	 * controllers (ata_piix SIDPR).  Make sure DET actually is
3883 	 * cleared.
3884 	 */
3885 	do {
3886 		scontrol = (scontrol & 0x0f0) | 0x300;
3887 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3888 			return rc;
3889 		/*
3890 		 * Some PHYs react badly if SStatus is pounded
3891 		 * immediately after resuming.  Delay 200ms before
3892 		 * debouncing.
3893 		 */
3894 		if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3895 			ata_msleep(link->ap, 200);
3896 
3897 		/* is SControl restored correctly? */
3898 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3899 			return rc;
3900 	} while ((scontrol & 0xf0f) != 0x300 && --tries);
3901 
3902 	if ((scontrol & 0xf0f) != 0x300) {
3903 		ata_link_warn(link, "failed to resume link (SControl %X)\n",
3904 			     scontrol);
3905 		return 0;
3906 	}
3907 
3908 	if (tries < ATA_LINK_RESUME_TRIES)
3909 		ata_link_warn(link, "link resume succeeded after %d retries\n",
3910 			      ATA_LINK_RESUME_TRIES - tries);
3911 
3912 	if ((rc = sata_link_debounce(link, params, deadline)))
3913 		return rc;
3914 
3915 	/* clear SError, some PHYs require this even for SRST to work */
3916 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3917 		rc = sata_scr_write(link, SCR_ERROR, serror);
3918 
3919 	return rc != -EINVAL ? rc : 0;
3920 }
3921 
3922 /**
3923  *	sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3924  *	@link: ATA link to manipulate SControl for
3925  *	@policy: LPM policy to configure
3926  *	@spm_wakeup: initiate LPM transition to active state
3927  *
3928  *	Manipulate the IPM field of the SControl register of @link
3929  *	according to @policy.  If @policy is ATA_LPM_MAX_POWER and
3930  *	@spm_wakeup is %true, the SPM field is manipulated to wake up
3931  *	the link.  This function also clears PHYRDY_CHG before
3932  *	returning.
3933  *
3934  *	LOCKING:
3935  *	EH context.
3936  *
3937  *	RETURNS:
3938  *	0 on success, -errno otherwise.
3939  */
3940 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3941 		      bool spm_wakeup)
3942 {
3943 	struct ata_eh_context *ehc = &link->eh_context;
3944 	bool woken_up = false;
3945 	u32 scontrol;
3946 	int rc;
3947 
3948 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3949 	if (rc)
3950 		return rc;
3951 
3952 	switch (policy) {
3953 	case ATA_LPM_MAX_POWER:
3954 		/* disable all LPM transitions */
3955 		scontrol |= (0x7 << 8);
3956 		/* initiate transition to active state */
3957 		if (spm_wakeup) {
3958 			scontrol |= (0x4 << 12);
3959 			woken_up = true;
3960 		}
3961 		break;
3962 	case ATA_LPM_MED_POWER:
3963 		/* allow LPM to PARTIAL */
3964 		scontrol &= ~(0x1 << 8);
3965 		scontrol |= (0x6 << 8);
3966 		break;
3967 	case ATA_LPM_MIN_POWER:
3968 		if (ata_link_nr_enabled(link) > 0)
3969 			/* no restrictions on LPM transitions */
3970 			scontrol &= ~(0x7 << 8);
3971 		else {
3972 			/* empty port, power off */
3973 			scontrol &= ~0xf;
3974 			scontrol |= (0x1 << 2);
3975 		}
3976 		break;
3977 	default:
3978 		WARN_ON(1);
3979 	}
3980 
3981 	rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3982 	if (rc)
3983 		return rc;
3984 
3985 	/* give the link time to transit out of LPM state */
3986 	if (woken_up)
3987 		msleep(10);
3988 
3989 	/* clear PHYRDY_CHG from SError */
3990 	ehc->i.serror &= ~SERR_PHYRDY_CHG;
3991 	return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3992 }
3993 
3994 /**
3995  *	ata_std_prereset - prepare for reset
3996  *	@link: ATA link to be reset
3997  *	@deadline: deadline jiffies for the operation
3998  *
3999  *	@link is about to be reset.  Initialize it.  Failure from
4000  *	prereset makes libata abort whole reset sequence and give up
4001  *	that port, so prereset should be best-effort.  It does its
4002  *	best to prepare for reset sequence but if things go wrong, it
4003  *	should just whine, not fail.
4004  *
4005  *	LOCKING:
4006  *	Kernel thread context (may sleep)
4007  *
4008  *	RETURNS:
4009  *	0 on success, -errno otherwise.
4010  */
4011 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
4012 {
4013 	struct ata_port *ap = link->ap;
4014 	struct ata_eh_context *ehc = &link->eh_context;
4015 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
4016 	int rc;
4017 
4018 	/* if we're about to do hardreset, nothing more to do */
4019 	if (ehc->i.action & ATA_EH_HARDRESET)
4020 		return 0;
4021 
4022 	/* if SATA, resume link */
4023 	if (ap->flags & ATA_FLAG_SATA) {
4024 		rc = sata_link_resume(link, timing, deadline);
4025 		/* whine about phy resume failure but proceed */
4026 		if (rc && rc != -EOPNOTSUPP)
4027 			ata_link_warn(link,
4028 				      "failed to resume link for reset (errno=%d)\n",
4029 				      rc);
4030 	}
4031 
4032 	/* no point in trying softreset on offline link */
4033 	if (ata_phys_link_offline(link))
4034 		ehc->i.action &= ~ATA_EH_SOFTRESET;
4035 
4036 	return 0;
4037 }
4038 
4039 /**
4040  *	sata_link_hardreset - reset link via SATA phy reset
4041  *	@link: link to reset
4042  *	@timing: timing parameters { interval, duration, timeout } in msec
4043  *	@deadline: deadline jiffies for the operation
4044  *	@online: optional out parameter indicating link onlineness
4045  *	@check_ready: optional callback to check link readiness
4046  *
4047  *	SATA phy-reset @link using DET bits of SControl register.
4048  *	After hardreset, link readiness is waited upon using
4049  *	ata_wait_ready() if @check_ready is specified.  LLDs are
4050  *	allowed to not specify @check_ready and wait itself after this
4051  *	function returns.  Device classification is LLD's
4052  *	responsibility.
4053  *
4054  *	*@online is set to one iff reset succeeded and @link is online
4055  *	after reset.
4056  *
4057  *	LOCKING:
4058  *	Kernel thread context (may sleep)
4059  *
4060  *	RETURNS:
4061  *	0 on success, -errno otherwise.
4062  */
4063 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4064 			unsigned long deadline,
4065 			bool *online, int (*check_ready)(struct ata_link *))
4066 {
4067 	u32 scontrol;
4068 	int rc;
4069 
4070 	DPRINTK("ENTER\n");
4071 
4072 	if (online)
4073 		*online = false;
4074 
4075 	if (sata_set_spd_needed(link)) {
4076 		/* SATA spec says nothing about how to reconfigure
4077 		 * spd.  To be on the safe side, turn off phy during
4078 		 * reconfiguration.  This works for at least ICH7 AHCI
4079 		 * and Sil3124.
4080 		 */
4081 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4082 			goto out;
4083 
4084 		scontrol = (scontrol & 0x0f0) | 0x304;
4085 
4086 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4087 			goto out;
4088 
4089 		sata_set_spd(link);
4090 	}
4091 
4092 	/* issue phy wake/reset */
4093 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4094 		goto out;
4095 
4096 	scontrol = (scontrol & 0x0f0) | 0x301;
4097 
4098 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4099 		goto out;
4100 
4101 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4102 	 * 10.4.2 says at least 1 ms.
4103 	 */
4104 	ata_msleep(link->ap, 1);
4105 
4106 	/* bring link back */
4107 	rc = sata_link_resume(link, timing, deadline);
4108 	if (rc)
4109 		goto out;
4110 	/* if link is offline nothing more to do */
4111 	if (ata_phys_link_offline(link))
4112 		goto out;
4113 
4114 	/* Link is online.  From this point, -ENODEV too is an error. */
4115 	if (online)
4116 		*online = true;
4117 
4118 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4119 		/* If PMP is supported, we have to do follow-up SRST.
4120 		 * Some PMPs don't send D2H Reg FIS after hardreset if
4121 		 * the first port is empty.  Wait only for
4122 		 * ATA_TMOUT_PMP_SRST_WAIT.
4123 		 */
4124 		if (check_ready) {
4125 			unsigned long pmp_deadline;
4126 
4127 			pmp_deadline = ata_deadline(jiffies,
4128 						    ATA_TMOUT_PMP_SRST_WAIT);
4129 			if (time_after(pmp_deadline, deadline))
4130 				pmp_deadline = deadline;
4131 			ata_wait_ready(link, pmp_deadline, check_ready);
4132 		}
4133 		rc = -EAGAIN;
4134 		goto out;
4135 	}
4136 
4137 	rc = 0;
4138 	if (check_ready)
4139 		rc = ata_wait_ready(link, deadline, check_ready);
4140  out:
4141 	if (rc && rc != -EAGAIN) {
4142 		/* online is set iff link is online && reset succeeded */
4143 		if (online)
4144 			*online = false;
4145 		ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4146 	}
4147 	DPRINTK("EXIT, rc=%d\n", rc);
4148 	return rc;
4149 }
4150 
4151 /**
4152  *	sata_std_hardreset - COMRESET w/o waiting or classification
4153  *	@link: link to reset
4154  *	@class: resulting class of attached device
4155  *	@deadline: deadline jiffies for the operation
4156  *
4157  *	Standard SATA COMRESET w/o waiting or classification.
4158  *
4159  *	LOCKING:
4160  *	Kernel thread context (may sleep)
4161  *
4162  *	RETURNS:
4163  *	0 if link offline, -EAGAIN if link online, -errno on errors.
4164  */
4165 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4166 		       unsigned long deadline)
4167 {
4168 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4169 	bool online;
4170 	int rc;
4171 
4172 	/* do hardreset */
4173 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4174 	return online ? -EAGAIN : rc;
4175 }
4176 
4177 /**
4178  *	ata_std_postreset - standard postreset callback
4179  *	@link: the target ata_link
4180  *	@classes: classes of attached devices
4181  *
4182  *	This function is invoked after a successful reset.  Note that
4183  *	the device might have been reset more than once using
4184  *	different reset methods before postreset is invoked.
4185  *
4186  *	LOCKING:
4187  *	Kernel thread context (may sleep)
4188  */
4189 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4190 {
4191 	u32 serror;
4192 
4193 	DPRINTK("ENTER\n");
4194 
4195 	/* reset complete, clear SError */
4196 	if (!sata_scr_read(link, SCR_ERROR, &serror))
4197 		sata_scr_write(link, SCR_ERROR, serror);
4198 
4199 	/* print link status */
4200 	sata_print_link_status(link);
4201 
4202 	DPRINTK("EXIT\n");
4203 }
4204 
4205 /**
4206  *	ata_dev_same_device - Determine whether new ID matches configured device
4207  *	@dev: device to compare against
4208  *	@new_class: class of the new device
4209  *	@new_id: IDENTIFY page of the new device
4210  *
4211  *	Compare @new_class and @new_id against @dev and determine
4212  *	whether @dev is the device indicated by @new_class and
4213  *	@new_id.
4214  *
4215  *	LOCKING:
4216  *	None.
4217  *
4218  *	RETURNS:
4219  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4220  */
4221 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4222 			       const u16 *new_id)
4223 {
4224 	const u16 *old_id = dev->id;
4225 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4226 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4227 
4228 	if (dev->class != new_class) {
4229 		ata_dev_info(dev, "class mismatch %d != %d\n",
4230 			     dev->class, new_class);
4231 		return 0;
4232 	}
4233 
4234 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4235 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4236 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4237 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4238 
4239 	if (strcmp(model[0], model[1])) {
4240 		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4241 			     model[0], model[1]);
4242 		return 0;
4243 	}
4244 
4245 	if (strcmp(serial[0], serial[1])) {
4246 		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4247 			     serial[0], serial[1]);
4248 		return 0;
4249 	}
4250 
4251 	return 1;
4252 }
4253 
4254 /**
4255  *	ata_dev_reread_id - Re-read IDENTIFY data
4256  *	@dev: target ATA device
4257  *	@readid_flags: read ID flags
4258  *
4259  *	Re-read IDENTIFY page and make sure @dev is still attached to
4260  *	the port.
4261  *
4262  *	LOCKING:
4263  *	Kernel thread context (may sleep)
4264  *
4265  *	RETURNS:
4266  *	0 on success, negative errno otherwise
4267  */
4268 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4269 {
4270 	unsigned int class = dev->class;
4271 	u16 *id = (void *)dev->link->ap->sector_buf;
4272 	int rc;
4273 
4274 	/* read ID data */
4275 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4276 	if (rc)
4277 		return rc;
4278 
4279 	/* is the device still there? */
4280 	if (!ata_dev_same_device(dev, class, id))
4281 		return -ENODEV;
4282 
4283 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4284 	return 0;
4285 }
4286 
4287 /**
4288  *	ata_dev_revalidate - Revalidate ATA device
4289  *	@dev: device to revalidate
4290  *	@new_class: new class code
4291  *	@readid_flags: read ID flags
4292  *
4293  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4294  *	port and reconfigure it according to the new IDENTIFY page.
4295  *
4296  *	LOCKING:
4297  *	Kernel thread context (may sleep)
4298  *
4299  *	RETURNS:
4300  *	0 on success, negative errno otherwise
4301  */
4302 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4303 		       unsigned int readid_flags)
4304 {
4305 	u64 n_sectors = dev->n_sectors;
4306 	u64 n_native_sectors = dev->n_native_sectors;
4307 	int rc;
4308 
4309 	if (!ata_dev_enabled(dev))
4310 		return -ENODEV;
4311 
4312 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4313 	if (ata_class_enabled(new_class) &&
4314 	    new_class != ATA_DEV_ATA &&
4315 	    new_class != ATA_DEV_ATAPI &&
4316 	    new_class != ATA_DEV_ZAC &&
4317 	    new_class != ATA_DEV_SEMB) {
4318 		ata_dev_info(dev, "class mismatch %u != %u\n",
4319 			     dev->class, new_class);
4320 		rc = -ENODEV;
4321 		goto fail;
4322 	}
4323 
4324 	/* re-read ID */
4325 	rc = ata_dev_reread_id(dev, readid_flags);
4326 	if (rc)
4327 		goto fail;
4328 
4329 	/* configure device according to the new ID */
4330 	rc = ata_dev_configure(dev);
4331 	if (rc)
4332 		goto fail;
4333 
4334 	/* verify n_sectors hasn't changed */
4335 	if (dev->class != ATA_DEV_ATA || !n_sectors ||
4336 	    dev->n_sectors == n_sectors)
4337 		return 0;
4338 
4339 	/* n_sectors has changed */
4340 	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4341 		     (unsigned long long)n_sectors,
4342 		     (unsigned long long)dev->n_sectors);
4343 
4344 	/*
4345 	 * Something could have caused HPA to be unlocked
4346 	 * involuntarily.  If n_native_sectors hasn't changed and the
4347 	 * new size matches it, keep the device.
4348 	 */
4349 	if (dev->n_native_sectors == n_native_sectors &&
4350 	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4351 		ata_dev_warn(dev,
4352 			     "new n_sectors matches native, probably "
4353 			     "late HPA unlock, n_sectors updated\n");
4354 		/* use the larger n_sectors */
4355 		return 0;
4356 	}
4357 
4358 	/*
4359 	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4360 	 * unlocking HPA in those cases.
4361 	 *
4362 	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4363 	 */
4364 	if (dev->n_native_sectors == n_native_sectors &&
4365 	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4366 	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4367 		ata_dev_warn(dev,
4368 			     "old n_sectors matches native, probably "
4369 			     "late HPA lock, will try to unlock HPA\n");
4370 		/* try unlocking HPA */
4371 		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4372 		rc = -EIO;
4373 	} else
4374 		rc = -ENODEV;
4375 
4376 	/* restore original n_[native_]sectors and fail */
4377 	dev->n_native_sectors = n_native_sectors;
4378 	dev->n_sectors = n_sectors;
4379  fail:
4380 	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4381 	return rc;
4382 }
4383 
4384 struct ata_blacklist_entry {
4385 	const char *model_num;
4386 	const char *model_rev;
4387 	unsigned long horkage;
4388 };
4389 
4390 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4391 	/* Devices with DMA related problems under Linux */
4392 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4393 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4394 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4395 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4396 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4397 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4398 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4399 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4400 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4401 	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4402 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4403 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4404 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4405 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4406 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4407 	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4408 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4409 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4410 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4411 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4412 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4413 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4414 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4415 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4416 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4417 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4418 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4419 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4420 	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4421 	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
4422 	/* Odd clown on sil3726/4726 PMPs */
4423 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4424 
4425 	/* Weird ATAPI devices */
4426 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4427 	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4428 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4429 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4430 
4431 	/*
4432 	 * Causes silent data corruption with higher max sects.
4433 	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4434 	 */
4435 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4436 
4437 	/*
4438 	 * These devices time out with higher max sects.
4439 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4440 	 */
4441 	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4442 
4443 	/* Devices we expect to fail diagnostics */
4444 
4445 	/* Devices where NCQ should be avoided */
4446 	/* NCQ is slow */
4447 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4448 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
4449 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4450 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4451 	/* NCQ is broken */
4452 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4453 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4454 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4455 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4456 	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4457 
4458 	/* Seagate NCQ + FLUSH CACHE firmware bug */
4459 	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4460 						ATA_HORKAGE_FIRMWARE_WARN },
4461 
4462 	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4463 						ATA_HORKAGE_FIRMWARE_WARN },
4464 
4465 	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4466 						ATA_HORKAGE_FIRMWARE_WARN },
4467 
4468 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4469 						ATA_HORKAGE_FIRMWARE_WARN },
4470 
4471 	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4472 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4473 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
4474 	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4475 
4476 	/* Blacklist entries taken from Silicon Image 3124/3132
4477 	   Windows driver .inf file - also several Linux problem reports */
4478 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
4479 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
4480 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4481 
4482 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4483 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
4484 
4485 	/* devices which puke on READ_NATIVE_MAX */
4486 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
4487 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4488 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4489 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4490 
4491 	/* this one allows HPA unlocking but fails IOs on the area */
4492 	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4493 
4494 	/* Devices which report 1 sector over size HPA */
4495 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4496 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4497 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4498 
4499 	/* Devices which get the IVB wrong */
4500 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4501 	/* Maybe we should just blacklist TSSTcorp... */
4502 	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB, },
4503 
4504 	/* Devices that do not need bridging limits applied */
4505 	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4506 	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK, },
4507 
4508 	/* Devices which aren't very happy with higher link speeds */
4509 	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS, },
4510 	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS, },
4511 
4512 	/*
4513 	 * Devices which choke on SETXFER.  Applies only if both the
4514 	 * device and controller are SATA.
4515 	 */
4516 	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4517 	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4518 	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4519 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4520 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4521 
4522 	/* devices that don't properly handle queued TRIM commands */
4523 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4524 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4525 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4526 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4527 	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4528 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4529 	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4530 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4531 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4532 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4533 	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4534 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4535 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4536 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4537 
4538 	/* devices that don't properly handle TRIM commands */
4539 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
4540 
4541 	/*
4542 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4543 	 * (Return Zero After Trim) flags in the ATA Command Set are
4544 	 * unreliable in the sense that they only define what happens if
4545 	 * the device successfully executed the DSM TRIM command. TRIM
4546 	 * is only advisory, however, and the device is free to silently
4547 	 * ignore all or parts of the request.
4548 	 *
4549 	 * Whitelist drives that are known to reliably return zeroes
4550 	 * after TRIM.
4551 	 */
4552 
4553 	/*
4554 	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4555 	 * that model before whitelisting all other intel SSDs.
4556 	 */
4557 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
4558 
4559 	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4560 	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4561 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4562 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4563 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4564 	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4565 	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
4566 
4567 	/*
4568 	 * Some WD SATA-I drives spin up and down erratically when the link
4569 	 * is put into the slumber mode.  We don't have full list of the
4570 	 * affected devices.  Disable LPM if the device matches one of the
4571 	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4572 	 * lost too.
4573 	 *
4574 	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4575 	 */
4576 	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4577 	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4578 	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4579 	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4580 	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4581 	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4582 	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4583 
4584 	/* End Marker */
4585 	{ }
4586 };
4587 
4588 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4589 {
4590 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4591 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4592 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4593 
4594 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4595 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4596 
4597 	while (ad->model_num) {
4598 		if (glob_match(ad->model_num, model_num)) {
4599 			if (ad->model_rev == NULL)
4600 				return ad->horkage;
4601 			if (glob_match(ad->model_rev, model_rev))
4602 				return ad->horkage;
4603 		}
4604 		ad++;
4605 	}
4606 	return 0;
4607 }
4608 
4609 static int ata_dma_blacklisted(const struct ata_device *dev)
4610 {
4611 	/* We don't support polling DMA.
4612 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4613 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4614 	 */
4615 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4616 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4617 		return 1;
4618 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4619 }
4620 
4621 /**
4622  *	ata_is_40wire		-	check drive side detection
4623  *	@dev: device
4624  *
4625  *	Perform drive side detection decoding, allowing for device vendors
4626  *	who can't follow the documentation.
4627  */
4628 
4629 static int ata_is_40wire(struct ata_device *dev)
4630 {
4631 	if (dev->horkage & ATA_HORKAGE_IVB)
4632 		return ata_drive_40wire_relaxed(dev->id);
4633 	return ata_drive_40wire(dev->id);
4634 }
4635 
4636 /**
4637  *	cable_is_40wire		-	40/80/SATA decider
4638  *	@ap: port to consider
4639  *
4640  *	This function encapsulates the policy for speed management
4641  *	in one place. At the moment we don't cache the result but
4642  *	there is a good case for setting ap->cbl to the result when
4643  *	we are called with unknown cables (and figuring out if it
4644  *	impacts hotplug at all).
4645  *
4646  *	Return 1 if the cable appears to be 40 wire.
4647  */
4648 
4649 static int cable_is_40wire(struct ata_port *ap)
4650 {
4651 	struct ata_link *link;
4652 	struct ata_device *dev;
4653 
4654 	/* If the controller thinks we are 40 wire, we are. */
4655 	if (ap->cbl == ATA_CBL_PATA40)
4656 		return 1;
4657 
4658 	/* If the controller thinks we are 80 wire, we are. */
4659 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4660 		return 0;
4661 
4662 	/* If the system is known to be 40 wire short cable (eg
4663 	 * laptop), then we allow 80 wire modes even if the drive
4664 	 * isn't sure.
4665 	 */
4666 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4667 		return 0;
4668 
4669 	/* If the controller doesn't know, we scan.
4670 	 *
4671 	 * Note: We look for all 40 wire detects at this point.  Any
4672 	 *       80 wire detect is taken to be 80 wire cable because
4673 	 * - in many setups only the one drive (slave if present) will
4674 	 *   give a valid detect
4675 	 * - if you have a non detect capable drive you don't want it
4676 	 *   to colour the choice
4677 	 */
4678 	ata_for_each_link(link, ap, EDGE) {
4679 		ata_for_each_dev(dev, link, ENABLED) {
4680 			if (!ata_is_40wire(dev))
4681 				return 0;
4682 		}
4683 	}
4684 	return 1;
4685 }
4686 
4687 /**
4688  *	ata_dev_xfermask - Compute supported xfermask of the given device
4689  *	@dev: Device to compute xfermask for
4690  *
4691  *	Compute supported xfermask of @dev and store it in
4692  *	dev->*_mask.  This function is responsible for applying all
4693  *	known limits including host controller limits, device
4694  *	blacklist, etc...
4695  *
4696  *	LOCKING:
4697  *	None.
4698  */
4699 static void ata_dev_xfermask(struct ata_device *dev)
4700 {
4701 	struct ata_link *link = dev->link;
4702 	struct ata_port *ap = link->ap;
4703 	struct ata_host *host = ap->host;
4704 	unsigned long xfer_mask;
4705 
4706 	/* controller modes available */
4707 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4708 				      ap->mwdma_mask, ap->udma_mask);
4709 
4710 	/* drive modes available */
4711 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4712 				       dev->mwdma_mask, dev->udma_mask);
4713 	xfer_mask &= ata_id_xfermask(dev->id);
4714 
4715 	/*
4716 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4717 	 *	cable
4718 	 */
4719 	if (ata_dev_pair(dev)) {
4720 		/* No PIO5 or PIO6 */
4721 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4722 		/* No MWDMA3 or MWDMA 4 */
4723 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4724 	}
4725 
4726 	if (ata_dma_blacklisted(dev)) {
4727 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4728 		ata_dev_warn(dev,
4729 			     "device is on DMA blacklist, disabling DMA\n");
4730 	}
4731 
4732 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4733 	    host->simplex_claimed && host->simplex_claimed != ap) {
4734 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4735 		ata_dev_warn(dev,
4736 			     "simplex DMA is claimed by other device, disabling DMA\n");
4737 	}
4738 
4739 	if (ap->flags & ATA_FLAG_NO_IORDY)
4740 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4741 
4742 	if (ap->ops->mode_filter)
4743 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4744 
4745 	/* Apply cable rule here.  Don't apply it early because when
4746 	 * we handle hot plug the cable type can itself change.
4747 	 * Check this last so that we know if the transfer rate was
4748 	 * solely limited by the cable.
4749 	 * Unknown or 80 wire cables reported host side are checked
4750 	 * drive side as well. Cases where we know a 40wire cable
4751 	 * is used safely for 80 are not checked here.
4752 	 */
4753 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4754 		/* UDMA/44 or higher would be available */
4755 		if (cable_is_40wire(ap)) {
4756 			ata_dev_warn(dev,
4757 				     "limited to UDMA/33 due to 40-wire cable\n");
4758 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4759 		}
4760 
4761 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4762 			    &dev->mwdma_mask, &dev->udma_mask);
4763 }
4764 
4765 /**
4766  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4767  *	@dev: Device to which command will be sent
4768  *
4769  *	Issue SET FEATURES - XFER MODE command to device @dev
4770  *	on port @ap.
4771  *
4772  *	LOCKING:
4773  *	PCI/etc. bus probe sem.
4774  *
4775  *	RETURNS:
4776  *	0 on success, AC_ERR_* mask otherwise.
4777  */
4778 
4779 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4780 {
4781 	struct ata_taskfile tf;
4782 	unsigned int err_mask;
4783 
4784 	/* set up set-features taskfile */
4785 	DPRINTK("set features - xfer mode\n");
4786 
4787 	/* Some controllers and ATAPI devices show flaky interrupt
4788 	 * behavior after setting xfer mode.  Use polling instead.
4789 	 */
4790 	ata_tf_init(dev, &tf);
4791 	tf.command = ATA_CMD_SET_FEATURES;
4792 	tf.feature = SETFEATURES_XFER;
4793 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4794 	tf.protocol = ATA_PROT_NODATA;
4795 	/* If we are using IORDY we must send the mode setting command */
4796 	if (ata_pio_need_iordy(dev))
4797 		tf.nsect = dev->xfer_mode;
4798 	/* If the device has IORDY and the controller does not - turn it off */
4799  	else if (ata_id_has_iordy(dev->id))
4800 		tf.nsect = 0x01;
4801 	else /* In the ancient relic department - skip all of this */
4802 		return 0;
4803 
4804 	/* On some disks, this command causes spin-up, so we need longer timeout */
4805 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4806 
4807 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4808 	return err_mask;
4809 }
4810 
4811 /**
4812  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4813  *	@dev: Device to which command will be sent
4814  *	@enable: Whether to enable or disable the feature
4815  *	@feature: The sector count represents the feature to set
4816  *
4817  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4818  *	on port @ap with sector count
4819  *
4820  *	LOCKING:
4821  *	PCI/etc. bus probe sem.
4822  *
4823  *	RETURNS:
4824  *	0 on success, AC_ERR_* mask otherwise.
4825  */
4826 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4827 {
4828 	struct ata_taskfile tf;
4829 	unsigned int err_mask;
4830 	unsigned long timeout = 0;
4831 
4832 	/* set up set-features taskfile */
4833 	DPRINTK("set features - SATA features\n");
4834 
4835 	ata_tf_init(dev, &tf);
4836 	tf.command = ATA_CMD_SET_FEATURES;
4837 	tf.feature = enable;
4838 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4839 	tf.protocol = ATA_PROT_NODATA;
4840 	tf.nsect = feature;
4841 
4842 	if (enable == SETFEATURES_SPINUP)
4843 		timeout = ata_probe_timeout ?
4844 			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4845 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4846 
4847 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4848 	return err_mask;
4849 }
4850 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4851 
4852 /**
4853  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4854  *	@dev: Device to which command will be sent
4855  *	@heads: Number of heads (taskfile parameter)
4856  *	@sectors: Number of sectors (taskfile parameter)
4857  *
4858  *	LOCKING:
4859  *	Kernel thread context (may sleep)
4860  *
4861  *	RETURNS:
4862  *	0 on success, AC_ERR_* mask otherwise.
4863  */
4864 static unsigned int ata_dev_init_params(struct ata_device *dev,
4865 					u16 heads, u16 sectors)
4866 {
4867 	struct ata_taskfile tf;
4868 	unsigned int err_mask;
4869 
4870 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4871 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4872 		return AC_ERR_INVALID;
4873 
4874 	/* set up init dev params taskfile */
4875 	DPRINTK("init dev params \n");
4876 
4877 	ata_tf_init(dev, &tf);
4878 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4879 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4880 	tf.protocol = ATA_PROT_NODATA;
4881 	tf.nsect = sectors;
4882 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4883 
4884 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4885 	/* A clean abort indicates an original or just out of spec drive
4886 	   and we should continue as we issue the setup based on the
4887 	   drive reported working geometry */
4888 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4889 		err_mask = 0;
4890 
4891 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4892 	return err_mask;
4893 }
4894 
4895 /**
4896  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4897  *	@qc: Metadata associated with taskfile to check
4898  *
4899  *	Allow low-level driver to filter ATA PACKET commands, returning
4900  *	a status indicating whether or not it is OK to use DMA for the
4901  *	supplied PACKET command.
4902  *
4903  *	LOCKING:
4904  *	spin_lock_irqsave(host lock)
4905  *
4906  *	RETURNS: 0 when ATAPI DMA can be used
4907  *               nonzero otherwise
4908  */
4909 int atapi_check_dma(struct ata_queued_cmd *qc)
4910 {
4911 	struct ata_port *ap = qc->ap;
4912 
4913 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4914 	 * few ATAPI devices choke on such DMA requests.
4915 	 */
4916 	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4917 	    unlikely(qc->nbytes & 15))
4918 		return 1;
4919 
4920 	if (ap->ops->check_atapi_dma)
4921 		return ap->ops->check_atapi_dma(qc);
4922 
4923 	return 0;
4924 }
4925 
4926 /**
4927  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4928  *	@qc: ATA command in question
4929  *
4930  *	Non-NCQ commands cannot run with any other command, NCQ or
4931  *	not.  As upper layer only knows the queue depth, we are
4932  *	responsible for maintaining exclusion.  This function checks
4933  *	whether a new command @qc can be issued.
4934  *
4935  *	LOCKING:
4936  *	spin_lock_irqsave(host lock)
4937  *
4938  *	RETURNS:
4939  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4940  */
4941 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4942 {
4943 	struct ata_link *link = qc->dev->link;
4944 
4945 	if (ata_is_ncq(qc->tf.protocol)) {
4946 		if (!ata_tag_valid(link->active_tag))
4947 			return 0;
4948 	} else {
4949 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4950 			return 0;
4951 	}
4952 
4953 	return ATA_DEFER_LINK;
4954 }
4955 
4956 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4957 
4958 /**
4959  *	ata_sg_init - Associate command with scatter-gather table.
4960  *	@qc: Command to be associated
4961  *	@sg: Scatter-gather table.
4962  *	@n_elem: Number of elements in s/g table.
4963  *
4964  *	Initialize the data-related elements of queued_cmd @qc
4965  *	to point to a scatter-gather table @sg, containing @n_elem
4966  *	elements.
4967  *
4968  *	LOCKING:
4969  *	spin_lock_irqsave(host lock)
4970  */
4971 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4972 		 unsigned int n_elem)
4973 {
4974 	qc->sg = sg;
4975 	qc->n_elem = n_elem;
4976 	qc->cursg = qc->sg;
4977 }
4978 
4979 #ifdef CONFIG_HAS_DMA
4980 
4981 /**
4982  *	ata_sg_clean - Unmap DMA memory associated with command
4983  *	@qc: Command containing DMA memory to be released
4984  *
4985  *	Unmap all mapped DMA memory associated with this command.
4986  *
4987  *	LOCKING:
4988  *	spin_lock_irqsave(host lock)
4989  */
4990 static void ata_sg_clean(struct ata_queued_cmd *qc)
4991 {
4992 	struct ata_port *ap = qc->ap;
4993 	struct scatterlist *sg = qc->sg;
4994 	int dir = qc->dma_dir;
4995 
4996 	WARN_ON_ONCE(sg == NULL);
4997 
4998 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4999 
5000 	if (qc->n_elem)
5001 		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5002 
5003 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5004 	qc->sg = NULL;
5005 }
5006 
5007 /**
5008  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5009  *	@qc: Command with scatter-gather table to be mapped.
5010  *
5011  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
5012  *
5013  *	LOCKING:
5014  *	spin_lock_irqsave(host lock)
5015  *
5016  *	RETURNS:
5017  *	Zero on success, negative on error.
5018  *
5019  */
5020 static int ata_sg_setup(struct ata_queued_cmd *qc)
5021 {
5022 	struct ata_port *ap = qc->ap;
5023 	unsigned int n_elem;
5024 
5025 	VPRINTK("ENTER, ata%u\n", ap->print_id);
5026 
5027 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5028 	if (n_elem < 1)
5029 		return -1;
5030 
5031 	DPRINTK("%d sg elements mapped\n", n_elem);
5032 	qc->orig_n_elem = qc->n_elem;
5033 	qc->n_elem = n_elem;
5034 	qc->flags |= ATA_QCFLAG_DMAMAP;
5035 
5036 	return 0;
5037 }
5038 
5039 #else /* !CONFIG_HAS_DMA */
5040 
5041 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
5042 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5043 
5044 #endif /* !CONFIG_HAS_DMA */
5045 
5046 /**
5047  *	swap_buf_le16 - swap halves of 16-bit words in place
5048  *	@buf:  Buffer to swap
5049  *	@buf_words:  Number of 16-bit words in buffer.
5050  *
5051  *	Swap halves of 16-bit words if needed to convert from
5052  *	little-endian byte order to native cpu byte order, or
5053  *	vice-versa.
5054  *
5055  *	LOCKING:
5056  *	Inherited from caller.
5057  */
5058 void swap_buf_le16(u16 *buf, unsigned int buf_words)
5059 {
5060 #ifdef __BIG_ENDIAN
5061 	unsigned int i;
5062 
5063 	for (i = 0; i < buf_words; i++)
5064 		buf[i] = le16_to_cpu(buf[i]);
5065 #endif /* __BIG_ENDIAN */
5066 }
5067 
5068 /**
5069  *	ata_qc_new_init - Request an available ATA command, and initialize it
5070  *	@dev: Device from whom we request an available command structure
5071  *	@tag: tag
5072  *
5073  *	LOCKING:
5074  *	None.
5075  */
5076 
5077 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
5078 {
5079 	struct ata_port *ap = dev->link->ap;
5080 	struct ata_queued_cmd *qc;
5081 
5082 	/* no command while frozen */
5083 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5084 		return NULL;
5085 
5086 	/* libsas case */
5087 	if (ap->flags & ATA_FLAG_SAS_HOST) {
5088 		tag = ata_sas_allocate_tag(ap);
5089 		if (tag < 0)
5090 			return NULL;
5091 	}
5092 
5093 	qc = __ata_qc_from_tag(ap, tag);
5094 	qc->tag = tag;
5095 	qc->scsicmd = NULL;
5096 	qc->ap = ap;
5097 	qc->dev = dev;
5098 
5099 	ata_qc_reinit(qc);
5100 
5101 	return qc;
5102 }
5103 
5104 /**
5105  *	ata_qc_free - free unused ata_queued_cmd
5106  *	@qc: Command to complete
5107  *
5108  *	Designed to free unused ata_queued_cmd object
5109  *	in case something prevents using it.
5110  *
5111  *	LOCKING:
5112  *	spin_lock_irqsave(host lock)
5113  */
5114 void ata_qc_free(struct ata_queued_cmd *qc)
5115 {
5116 	struct ata_port *ap;
5117 	unsigned int tag;
5118 
5119 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5120 	ap = qc->ap;
5121 
5122 	qc->flags = 0;
5123 	tag = qc->tag;
5124 	if (likely(ata_tag_valid(tag))) {
5125 		qc->tag = ATA_TAG_POISON;
5126 		if (ap->flags & ATA_FLAG_SAS_HOST)
5127 			ata_sas_free_tag(tag, ap);
5128 	}
5129 }
5130 
5131 void __ata_qc_complete(struct ata_queued_cmd *qc)
5132 {
5133 	struct ata_port *ap;
5134 	struct ata_link *link;
5135 
5136 	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5137 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5138 	ap = qc->ap;
5139 	link = qc->dev->link;
5140 
5141 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5142 		ata_sg_clean(qc);
5143 
5144 	/* command should be marked inactive atomically with qc completion */
5145 	if (ata_is_ncq(qc->tf.protocol)) {
5146 		link->sactive &= ~(1 << qc->tag);
5147 		if (!link->sactive)
5148 			ap->nr_active_links--;
5149 	} else {
5150 		link->active_tag = ATA_TAG_POISON;
5151 		ap->nr_active_links--;
5152 	}
5153 
5154 	/* clear exclusive status */
5155 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5156 		     ap->excl_link == link))
5157 		ap->excl_link = NULL;
5158 
5159 	/* atapi: mark qc as inactive to prevent the interrupt handler
5160 	 * from completing the command twice later, before the error handler
5161 	 * is called. (when rc != 0 and atapi request sense is needed)
5162 	 */
5163 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5164 	ap->qc_active &= ~(1 << qc->tag);
5165 
5166 	/* call completion callback */
5167 	qc->complete_fn(qc);
5168 }
5169 
5170 static void fill_result_tf(struct ata_queued_cmd *qc)
5171 {
5172 	struct ata_port *ap = qc->ap;
5173 
5174 	qc->result_tf.flags = qc->tf.flags;
5175 	ap->ops->qc_fill_rtf(qc);
5176 }
5177 
5178 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5179 {
5180 	struct ata_device *dev = qc->dev;
5181 
5182 	if (!ata_is_data(qc->tf.protocol))
5183 		return;
5184 
5185 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5186 		return;
5187 
5188 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5189 }
5190 
5191 /**
5192  *	ata_qc_complete - Complete an active ATA command
5193  *	@qc: Command to complete
5194  *
5195  *	Indicate to the mid and upper layers that an ATA command has
5196  *	completed, with either an ok or not-ok status.
5197  *
5198  *	Refrain from calling this function multiple times when
5199  *	successfully completing multiple NCQ commands.
5200  *	ata_qc_complete_multiple() should be used instead, which will
5201  *	properly update IRQ expect state.
5202  *
5203  *	LOCKING:
5204  *	spin_lock_irqsave(host lock)
5205  */
5206 void ata_qc_complete(struct ata_queued_cmd *qc)
5207 {
5208 	struct ata_port *ap = qc->ap;
5209 
5210 	/* Trigger the LED (if available) */
5211 	ledtrig_disk_activity();
5212 
5213 	/* XXX: New EH and old EH use different mechanisms to
5214 	 * synchronize EH with regular execution path.
5215 	 *
5216 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5217 	 * Normal execution path is responsible for not accessing a
5218 	 * failed qc.  libata core enforces the rule by returning NULL
5219 	 * from ata_qc_from_tag() for failed qcs.
5220 	 *
5221 	 * Old EH depends on ata_qc_complete() nullifying completion
5222 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5223 	 * not synchronize with interrupt handler.  Only PIO task is
5224 	 * taken care of.
5225 	 */
5226 	if (ap->ops->error_handler) {
5227 		struct ata_device *dev = qc->dev;
5228 		struct ata_eh_info *ehi = &dev->link->eh_info;
5229 
5230 		if (unlikely(qc->err_mask))
5231 			qc->flags |= ATA_QCFLAG_FAILED;
5232 
5233 		/*
5234 		 * Finish internal commands without any further processing
5235 		 * and always with the result TF filled.
5236 		 */
5237 		if (unlikely(ata_tag_internal(qc->tag))) {
5238 			fill_result_tf(qc);
5239 			trace_ata_qc_complete_internal(qc);
5240 			__ata_qc_complete(qc);
5241 			return;
5242 		}
5243 
5244 		/*
5245 		 * Non-internal qc has failed.  Fill the result TF and
5246 		 * summon EH.
5247 		 */
5248 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5249 			fill_result_tf(qc);
5250 			trace_ata_qc_complete_failed(qc);
5251 			ata_qc_schedule_eh(qc);
5252 			return;
5253 		}
5254 
5255 		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5256 
5257 		/* read result TF if requested */
5258 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5259 			fill_result_tf(qc);
5260 
5261 		trace_ata_qc_complete_done(qc);
5262 		/* Some commands need post-processing after successful
5263 		 * completion.
5264 		 */
5265 		switch (qc->tf.command) {
5266 		case ATA_CMD_SET_FEATURES:
5267 			if (qc->tf.feature != SETFEATURES_WC_ON &&
5268 			    qc->tf.feature != SETFEATURES_WC_OFF &&
5269 			    qc->tf.feature != SETFEATURES_RA_ON &&
5270 			    qc->tf.feature != SETFEATURES_RA_OFF)
5271 				break;
5272 			/* fall through */
5273 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5274 		case ATA_CMD_SET_MULTI: /* multi_count changed */
5275 			/* revalidate device */
5276 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5277 			ata_port_schedule_eh(ap);
5278 			break;
5279 
5280 		case ATA_CMD_SLEEP:
5281 			dev->flags |= ATA_DFLAG_SLEEPING;
5282 			break;
5283 		}
5284 
5285 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5286 			ata_verify_xfer(qc);
5287 
5288 		__ata_qc_complete(qc);
5289 	} else {
5290 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5291 			return;
5292 
5293 		/* read result TF if failed or requested */
5294 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5295 			fill_result_tf(qc);
5296 
5297 		__ata_qc_complete(qc);
5298 	}
5299 }
5300 
5301 /**
5302  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5303  *	@ap: port in question
5304  *	@qc_active: new qc_active mask
5305  *
5306  *	Complete in-flight commands.  This functions is meant to be
5307  *	called from low-level driver's interrupt routine to complete
5308  *	requests normally.  ap->qc_active and @qc_active is compared
5309  *	and commands are completed accordingly.
5310  *
5311  *	Always use this function when completing multiple NCQ commands
5312  *	from IRQ handlers instead of calling ata_qc_complete()
5313  *	multiple times to keep IRQ expect status properly in sync.
5314  *
5315  *	LOCKING:
5316  *	spin_lock_irqsave(host lock)
5317  *
5318  *	RETURNS:
5319  *	Number of completed commands on success, -errno otherwise.
5320  */
5321 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5322 {
5323 	int nr_done = 0;
5324 	u32 done_mask;
5325 
5326 	done_mask = ap->qc_active ^ qc_active;
5327 
5328 	if (unlikely(done_mask & qc_active)) {
5329 		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5330 			     ap->qc_active, qc_active);
5331 		return -EINVAL;
5332 	}
5333 
5334 	while (done_mask) {
5335 		struct ata_queued_cmd *qc;
5336 		unsigned int tag = __ffs(done_mask);
5337 
5338 		qc = ata_qc_from_tag(ap, tag);
5339 		if (qc) {
5340 			ata_qc_complete(qc);
5341 			nr_done++;
5342 		}
5343 		done_mask &= ~(1 << tag);
5344 	}
5345 
5346 	return nr_done;
5347 }
5348 
5349 /**
5350  *	ata_qc_issue - issue taskfile to device
5351  *	@qc: command to issue to device
5352  *
5353  *	Prepare an ATA command to submission to device.
5354  *	This includes mapping the data into a DMA-able
5355  *	area, filling in the S/G table, and finally
5356  *	writing the taskfile to hardware, starting the command.
5357  *
5358  *	LOCKING:
5359  *	spin_lock_irqsave(host lock)
5360  */
5361 void ata_qc_issue(struct ata_queued_cmd *qc)
5362 {
5363 	struct ata_port *ap = qc->ap;
5364 	struct ata_link *link = qc->dev->link;
5365 	u8 prot = qc->tf.protocol;
5366 
5367 	/* Make sure only one non-NCQ command is outstanding.  The
5368 	 * check is skipped for old EH because it reuses active qc to
5369 	 * request ATAPI sense.
5370 	 */
5371 	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5372 
5373 	if (ata_is_ncq(prot)) {
5374 		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5375 
5376 		if (!link->sactive)
5377 			ap->nr_active_links++;
5378 		link->sactive |= 1 << qc->tag;
5379 	} else {
5380 		WARN_ON_ONCE(link->sactive);
5381 
5382 		ap->nr_active_links++;
5383 		link->active_tag = qc->tag;
5384 	}
5385 
5386 	qc->flags |= ATA_QCFLAG_ACTIVE;
5387 	ap->qc_active |= 1 << qc->tag;
5388 
5389 	/*
5390 	 * We guarantee to LLDs that they will have at least one
5391 	 * non-zero sg if the command is a data command.
5392 	 */
5393 	if (WARN_ON_ONCE(ata_is_data(prot) &&
5394 			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5395 		goto sys_err;
5396 
5397 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5398 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5399 		if (ata_sg_setup(qc))
5400 			goto sys_err;
5401 
5402 	/* if device is sleeping, schedule reset and abort the link */
5403 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5404 		link->eh_info.action |= ATA_EH_RESET;
5405 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5406 		ata_link_abort(link);
5407 		return;
5408 	}
5409 
5410 	ap->ops->qc_prep(qc);
5411 	trace_ata_qc_issue(qc);
5412 	qc->err_mask |= ap->ops->qc_issue(qc);
5413 	if (unlikely(qc->err_mask))
5414 		goto err;
5415 	return;
5416 
5417 sys_err:
5418 	qc->err_mask |= AC_ERR_SYSTEM;
5419 err:
5420 	ata_qc_complete(qc);
5421 }
5422 
5423 /**
5424  *	sata_scr_valid - test whether SCRs are accessible
5425  *	@link: ATA link to test SCR accessibility for
5426  *
5427  *	Test whether SCRs are accessible for @link.
5428  *
5429  *	LOCKING:
5430  *	None.
5431  *
5432  *	RETURNS:
5433  *	1 if SCRs are accessible, 0 otherwise.
5434  */
5435 int sata_scr_valid(struct ata_link *link)
5436 {
5437 	struct ata_port *ap = link->ap;
5438 
5439 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5440 }
5441 
5442 /**
5443  *	sata_scr_read - read SCR register of the specified port
5444  *	@link: ATA link to read SCR for
5445  *	@reg: SCR to read
5446  *	@val: Place to store read value
5447  *
5448  *	Read SCR register @reg of @link into *@val.  This function is
5449  *	guaranteed to succeed if @link is ap->link, the cable type of
5450  *	the port is SATA and the port implements ->scr_read.
5451  *
5452  *	LOCKING:
5453  *	None if @link is ap->link.  Kernel thread context otherwise.
5454  *
5455  *	RETURNS:
5456  *	0 on success, negative errno on failure.
5457  */
5458 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5459 {
5460 	if (ata_is_host_link(link)) {
5461 		if (sata_scr_valid(link))
5462 			return link->ap->ops->scr_read(link, reg, val);
5463 		return -EOPNOTSUPP;
5464 	}
5465 
5466 	return sata_pmp_scr_read(link, reg, val);
5467 }
5468 
5469 /**
5470  *	sata_scr_write - write SCR register of the specified port
5471  *	@link: ATA link to write SCR for
5472  *	@reg: SCR to write
5473  *	@val: value to write
5474  *
5475  *	Write @val to SCR register @reg of @link.  This function is
5476  *	guaranteed to succeed if @link is ap->link, the cable type of
5477  *	the port is SATA and the port implements ->scr_read.
5478  *
5479  *	LOCKING:
5480  *	None if @link is ap->link.  Kernel thread context otherwise.
5481  *
5482  *	RETURNS:
5483  *	0 on success, negative errno on failure.
5484  */
5485 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5486 {
5487 	if (ata_is_host_link(link)) {
5488 		if (sata_scr_valid(link))
5489 			return link->ap->ops->scr_write(link, reg, val);
5490 		return -EOPNOTSUPP;
5491 	}
5492 
5493 	return sata_pmp_scr_write(link, reg, val);
5494 }
5495 
5496 /**
5497  *	sata_scr_write_flush - write SCR register of the specified port and flush
5498  *	@link: ATA link to write SCR for
5499  *	@reg: SCR to write
5500  *	@val: value to write
5501  *
5502  *	This function is identical to sata_scr_write() except that this
5503  *	function performs flush after writing to the register.
5504  *
5505  *	LOCKING:
5506  *	None if @link is ap->link.  Kernel thread context otherwise.
5507  *
5508  *	RETURNS:
5509  *	0 on success, negative errno on failure.
5510  */
5511 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5512 {
5513 	if (ata_is_host_link(link)) {
5514 		int rc;
5515 
5516 		if (sata_scr_valid(link)) {
5517 			rc = link->ap->ops->scr_write(link, reg, val);
5518 			if (rc == 0)
5519 				rc = link->ap->ops->scr_read(link, reg, &val);
5520 			return rc;
5521 		}
5522 		return -EOPNOTSUPP;
5523 	}
5524 
5525 	return sata_pmp_scr_write(link, reg, val);
5526 }
5527 
5528 /**
5529  *	ata_phys_link_online - test whether the given link is online
5530  *	@link: ATA link to test
5531  *
5532  *	Test whether @link is online.  Note that this function returns
5533  *	0 if online status of @link cannot be obtained, so
5534  *	ata_link_online(link) != !ata_link_offline(link).
5535  *
5536  *	LOCKING:
5537  *	None.
5538  *
5539  *	RETURNS:
5540  *	True if the port online status is available and online.
5541  */
5542 bool ata_phys_link_online(struct ata_link *link)
5543 {
5544 	u32 sstatus;
5545 
5546 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5547 	    ata_sstatus_online(sstatus))
5548 		return true;
5549 	return false;
5550 }
5551 
5552 /**
5553  *	ata_phys_link_offline - test whether the given link is offline
5554  *	@link: ATA link to test
5555  *
5556  *	Test whether @link is offline.  Note that this function
5557  *	returns 0 if offline status of @link cannot be obtained, so
5558  *	ata_link_online(link) != !ata_link_offline(link).
5559  *
5560  *	LOCKING:
5561  *	None.
5562  *
5563  *	RETURNS:
5564  *	True if the port offline status is available and offline.
5565  */
5566 bool ata_phys_link_offline(struct ata_link *link)
5567 {
5568 	u32 sstatus;
5569 
5570 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5571 	    !ata_sstatus_online(sstatus))
5572 		return true;
5573 	return false;
5574 }
5575 
5576 /**
5577  *	ata_link_online - test whether the given link is online
5578  *	@link: ATA link to test
5579  *
5580  *	Test whether @link is online.  This is identical to
5581  *	ata_phys_link_online() when there's no slave link.  When
5582  *	there's a slave link, this function should only be called on
5583  *	the master link and will return true if any of M/S links is
5584  *	online.
5585  *
5586  *	LOCKING:
5587  *	None.
5588  *
5589  *	RETURNS:
5590  *	True if the port online status is available and online.
5591  */
5592 bool ata_link_online(struct ata_link *link)
5593 {
5594 	struct ata_link *slave = link->ap->slave_link;
5595 
5596 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5597 
5598 	return ata_phys_link_online(link) ||
5599 		(slave && ata_phys_link_online(slave));
5600 }
5601 
5602 /**
5603  *	ata_link_offline - test whether the given link is offline
5604  *	@link: ATA link to test
5605  *
5606  *	Test whether @link is offline.  This is identical to
5607  *	ata_phys_link_offline() when there's no slave link.  When
5608  *	there's a slave link, this function should only be called on
5609  *	the master link and will return true if both M/S links are
5610  *	offline.
5611  *
5612  *	LOCKING:
5613  *	None.
5614  *
5615  *	RETURNS:
5616  *	True if the port offline status is available and offline.
5617  */
5618 bool ata_link_offline(struct ata_link *link)
5619 {
5620 	struct ata_link *slave = link->ap->slave_link;
5621 
5622 	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5623 
5624 	return ata_phys_link_offline(link) &&
5625 		(!slave || ata_phys_link_offline(slave));
5626 }
5627 
5628 #ifdef CONFIG_PM
5629 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5630 				unsigned int action, unsigned int ehi_flags,
5631 				bool async)
5632 {
5633 	struct ata_link *link;
5634 	unsigned long flags;
5635 
5636 	/* Previous resume operation might still be in
5637 	 * progress.  Wait for PM_PENDING to clear.
5638 	 */
5639 	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5640 		ata_port_wait_eh(ap);
5641 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5642 	}
5643 
5644 	/* request PM ops to EH */
5645 	spin_lock_irqsave(ap->lock, flags);
5646 
5647 	ap->pm_mesg = mesg;
5648 	ap->pflags |= ATA_PFLAG_PM_PENDING;
5649 	ata_for_each_link(link, ap, HOST_FIRST) {
5650 		link->eh_info.action |= action;
5651 		link->eh_info.flags |= ehi_flags;
5652 	}
5653 
5654 	ata_port_schedule_eh(ap);
5655 
5656 	spin_unlock_irqrestore(ap->lock, flags);
5657 
5658 	if (!async) {
5659 		ata_port_wait_eh(ap);
5660 		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5661 	}
5662 }
5663 
5664 /*
5665  * On some hardware, device fails to respond after spun down for suspend.  As
5666  * the device won't be used before being resumed, we don't need to touch the
5667  * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
5668  *
5669  * http://thread.gmane.org/gmane.linux.ide/46764
5670  */
5671 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5672 						 | ATA_EHI_NO_AUTOPSY
5673 						 | ATA_EHI_NO_RECOVERY;
5674 
5675 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5676 {
5677 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5678 }
5679 
5680 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5681 {
5682 	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5683 }
5684 
5685 static int ata_port_pm_suspend(struct device *dev)
5686 {
5687 	struct ata_port *ap = to_ata_port(dev);
5688 
5689 	if (pm_runtime_suspended(dev))
5690 		return 0;
5691 
5692 	ata_port_suspend(ap, PMSG_SUSPEND);
5693 	return 0;
5694 }
5695 
5696 static int ata_port_pm_freeze(struct device *dev)
5697 {
5698 	struct ata_port *ap = to_ata_port(dev);
5699 
5700 	if (pm_runtime_suspended(dev))
5701 		return 0;
5702 
5703 	ata_port_suspend(ap, PMSG_FREEZE);
5704 	return 0;
5705 }
5706 
5707 static int ata_port_pm_poweroff(struct device *dev)
5708 {
5709 	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5710 	return 0;
5711 }
5712 
5713 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5714 						| ATA_EHI_QUIET;
5715 
5716 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5717 {
5718 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5719 }
5720 
5721 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5722 {
5723 	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5724 }
5725 
5726 static int ata_port_pm_resume(struct device *dev)
5727 {
5728 	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5729 	pm_runtime_disable(dev);
5730 	pm_runtime_set_active(dev);
5731 	pm_runtime_enable(dev);
5732 	return 0;
5733 }
5734 
5735 /*
5736  * For ODDs, the upper layer will poll for media change every few seconds,
5737  * which will make it enter and leave suspend state every few seconds. And
5738  * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5739  * is very little and the ODD may malfunction after constantly being reset.
5740  * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5741  * ODD is attached to the port.
5742  */
5743 static int ata_port_runtime_idle(struct device *dev)
5744 {
5745 	struct ata_port *ap = to_ata_port(dev);
5746 	struct ata_link *link;
5747 	struct ata_device *adev;
5748 
5749 	ata_for_each_link(link, ap, HOST_FIRST) {
5750 		ata_for_each_dev(adev, link, ENABLED)
5751 			if (adev->class == ATA_DEV_ATAPI &&
5752 			    !zpodd_dev_enabled(adev))
5753 				return -EBUSY;
5754 	}
5755 
5756 	return 0;
5757 }
5758 
5759 static int ata_port_runtime_suspend(struct device *dev)
5760 {
5761 	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5762 	return 0;
5763 }
5764 
5765 static int ata_port_runtime_resume(struct device *dev)
5766 {
5767 	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5768 	return 0;
5769 }
5770 
5771 static const struct dev_pm_ops ata_port_pm_ops = {
5772 	.suspend = ata_port_pm_suspend,
5773 	.resume = ata_port_pm_resume,
5774 	.freeze = ata_port_pm_freeze,
5775 	.thaw = ata_port_pm_resume,
5776 	.poweroff = ata_port_pm_poweroff,
5777 	.restore = ata_port_pm_resume,
5778 
5779 	.runtime_suspend = ata_port_runtime_suspend,
5780 	.runtime_resume = ata_port_runtime_resume,
5781 	.runtime_idle = ata_port_runtime_idle,
5782 };
5783 
5784 /* sas ports don't participate in pm runtime management of ata_ports,
5785  * and need to resume ata devices at the domain level, not the per-port
5786  * level. sas suspend/resume is async to allow parallel port recovery
5787  * since sas has multiple ata_port instances per Scsi_Host.
5788  */
5789 void ata_sas_port_suspend(struct ata_port *ap)
5790 {
5791 	ata_port_suspend_async(ap, PMSG_SUSPEND);
5792 }
5793 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5794 
5795 void ata_sas_port_resume(struct ata_port *ap)
5796 {
5797 	ata_port_resume_async(ap, PMSG_RESUME);
5798 }
5799 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5800 
5801 /**
5802  *	ata_host_suspend - suspend host
5803  *	@host: host to suspend
5804  *	@mesg: PM message
5805  *
5806  *	Suspend @host.  Actual operation is performed by port suspend.
5807  */
5808 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5809 {
5810 	host->dev->power.power_state = mesg;
5811 	return 0;
5812 }
5813 
5814 /**
5815  *	ata_host_resume - resume host
5816  *	@host: host to resume
5817  *
5818  *	Resume @host.  Actual operation is performed by port resume.
5819  */
5820 void ata_host_resume(struct ata_host *host)
5821 {
5822 	host->dev->power.power_state = PMSG_ON;
5823 }
5824 #endif
5825 
5826 struct device_type ata_port_type = {
5827 	.name = "ata_port",
5828 #ifdef CONFIG_PM
5829 	.pm = &ata_port_pm_ops,
5830 #endif
5831 };
5832 
5833 /**
5834  *	ata_dev_init - Initialize an ata_device structure
5835  *	@dev: Device structure to initialize
5836  *
5837  *	Initialize @dev in preparation for probing.
5838  *
5839  *	LOCKING:
5840  *	Inherited from caller.
5841  */
5842 void ata_dev_init(struct ata_device *dev)
5843 {
5844 	struct ata_link *link = ata_dev_phys_link(dev);
5845 	struct ata_port *ap = link->ap;
5846 	unsigned long flags;
5847 
5848 	/* SATA spd limit is bound to the attached device, reset together */
5849 	link->sata_spd_limit = link->hw_sata_spd_limit;
5850 	link->sata_spd = 0;
5851 
5852 	/* High bits of dev->flags are used to record warm plug
5853 	 * requests which occur asynchronously.  Synchronize using
5854 	 * host lock.
5855 	 */
5856 	spin_lock_irqsave(ap->lock, flags);
5857 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5858 	dev->horkage = 0;
5859 	spin_unlock_irqrestore(ap->lock, flags);
5860 
5861 	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5862 	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5863 	dev->pio_mask = UINT_MAX;
5864 	dev->mwdma_mask = UINT_MAX;
5865 	dev->udma_mask = UINT_MAX;
5866 }
5867 
5868 /**
5869  *	ata_link_init - Initialize an ata_link structure
5870  *	@ap: ATA port link is attached to
5871  *	@link: Link structure to initialize
5872  *	@pmp: Port multiplier port number
5873  *
5874  *	Initialize @link.
5875  *
5876  *	LOCKING:
5877  *	Kernel thread context (may sleep)
5878  */
5879 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5880 {
5881 	int i;
5882 
5883 	/* clear everything except for devices */
5884 	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5885 	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5886 
5887 	link->ap = ap;
5888 	link->pmp = pmp;
5889 	link->active_tag = ATA_TAG_POISON;
5890 	link->hw_sata_spd_limit = UINT_MAX;
5891 
5892 	/* can't use iterator, ap isn't initialized yet */
5893 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5894 		struct ata_device *dev = &link->device[i];
5895 
5896 		dev->link = link;
5897 		dev->devno = dev - link->device;
5898 #ifdef CONFIG_ATA_ACPI
5899 		dev->gtf_filter = ata_acpi_gtf_filter;
5900 #endif
5901 		ata_dev_init(dev);
5902 	}
5903 }
5904 
5905 /**
5906  *	sata_link_init_spd - Initialize link->sata_spd_limit
5907  *	@link: Link to configure sata_spd_limit for
5908  *
5909  *	Initialize @link->[hw_]sata_spd_limit to the currently
5910  *	configured value.
5911  *
5912  *	LOCKING:
5913  *	Kernel thread context (may sleep).
5914  *
5915  *	RETURNS:
5916  *	0 on success, -errno on failure.
5917  */
5918 int sata_link_init_spd(struct ata_link *link)
5919 {
5920 	u8 spd;
5921 	int rc;
5922 
5923 	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5924 	if (rc)
5925 		return rc;
5926 
5927 	spd = (link->saved_scontrol >> 4) & 0xf;
5928 	if (spd)
5929 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5930 
5931 	ata_force_link_limits(link);
5932 
5933 	link->sata_spd_limit = link->hw_sata_spd_limit;
5934 
5935 	return 0;
5936 }
5937 
5938 /**
5939  *	ata_port_alloc - allocate and initialize basic ATA port resources
5940  *	@host: ATA host this allocated port belongs to
5941  *
5942  *	Allocate and initialize basic ATA port resources.
5943  *
5944  *	RETURNS:
5945  *	Allocate ATA port on success, NULL on failure.
5946  *
5947  *	LOCKING:
5948  *	Inherited from calling layer (may sleep).
5949  */
5950 struct ata_port *ata_port_alloc(struct ata_host *host)
5951 {
5952 	struct ata_port *ap;
5953 
5954 	DPRINTK("ENTER\n");
5955 
5956 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5957 	if (!ap)
5958 		return NULL;
5959 
5960 	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5961 	ap->lock = &host->lock;
5962 	ap->print_id = -1;
5963 	ap->local_port_no = -1;
5964 	ap->host = host;
5965 	ap->dev = host->dev;
5966 
5967 #if defined(ATA_VERBOSE_DEBUG)
5968 	/* turn on all debugging levels */
5969 	ap->msg_enable = 0x00FF;
5970 #elif defined(ATA_DEBUG)
5971 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5972 #else
5973 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5974 #endif
5975 
5976 	mutex_init(&ap->scsi_scan_mutex);
5977 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5978 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5979 	INIT_LIST_HEAD(&ap->eh_done_q);
5980 	init_waitqueue_head(&ap->eh_wait_q);
5981 	init_completion(&ap->park_req_pending);
5982 	setup_deferrable_timer(&ap->fastdrain_timer,
5983 			       ata_eh_fastdrain_timerfn,
5984 			       (unsigned long)ap);
5985 
5986 	ap->cbl = ATA_CBL_NONE;
5987 
5988 	ata_link_init(ap, &ap->link, 0);
5989 
5990 #ifdef ATA_IRQ_TRAP
5991 	ap->stats.unhandled_irq = 1;
5992 	ap->stats.idle_irq = 1;
5993 #endif
5994 	ata_sff_port_init(ap);
5995 
5996 	return ap;
5997 }
5998 
5999 static void ata_host_release(struct device *gendev, void *res)
6000 {
6001 	struct ata_host *host = dev_get_drvdata(gendev);
6002 	int i;
6003 
6004 	for (i = 0; i < host->n_ports; i++) {
6005 		struct ata_port *ap = host->ports[i];
6006 
6007 		if (!ap)
6008 			continue;
6009 
6010 		if (ap->scsi_host)
6011 			scsi_host_put(ap->scsi_host);
6012 
6013 		kfree(ap->pmp_link);
6014 		kfree(ap->slave_link);
6015 		kfree(ap);
6016 		host->ports[i] = NULL;
6017 	}
6018 
6019 	dev_set_drvdata(gendev, NULL);
6020 }
6021 
6022 /**
6023  *	ata_host_alloc - allocate and init basic ATA host resources
6024  *	@dev: generic device this host is associated with
6025  *	@max_ports: maximum number of ATA ports associated with this host
6026  *
6027  *	Allocate and initialize basic ATA host resources.  LLD calls
6028  *	this function to allocate a host, initializes it fully and
6029  *	attaches it using ata_host_register().
6030  *
6031  *	@max_ports ports are allocated and host->n_ports is
6032  *	initialized to @max_ports.  The caller is allowed to decrease
6033  *	host->n_ports before calling ata_host_register().  The unused
6034  *	ports will be automatically freed on registration.
6035  *
6036  *	RETURNS:
6037  *	Allocate ATA host on success, NULL on failure.
6038  *
6039  *	LOCKING:
6040  *	Inherited from calling layer (may sleep).
6041  */
6042 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6043 {
6044 	struct ata_host *host;
6045 	size_t sz;
6046 	int i;
6047 
6048 	DPRINTK("ENTER\n");
6049 
6050 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6051 		return NULL;
6052 
6053 	/* alloc a container for our list of ATA ports (buses) */
6054 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6055 	/* alloc a container for our list of ATA ports (buses) */
6056 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6057 	if (!host)
6058 		goto err_out;
6059 
6060 	devres_add(dev, host);
6061 	dev_set_drvdata(dev, host);
6062 
6063 	spin_lock_init(&host->lock);
6064 	mutex_init(&host->eh_mutex);
6065 	host->dev = dev;
6066 	host->n_ports = max_ports;
6067 
6068 	/* allocate ports bound to this host */
6069 	for (i = 0; i < max_ports; i++) {
6070 		struct ata_port *ap;
6071 
6072 		ap = ata_port_alloc(host);
6073 		if (!ap)
6074 			goto err_out;
6075 
6076 		ap->port_no = i;
6077 		host->ports[i] = ap;
6078 	}
6079 
6080 	devres_remove_group(dev, NULL);
6081 	return host;
6082 
6083  err_out:
6084 	devres_release_group(dev, NULL);
6085 	return NULL;
6086 }
6087 
6088 /**
6089  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6090  *	@dev: generic device this host is associated with
6091  *	@ppi: array of ATA port_info to initialize host with
6092  *	@n_ports: number of ATA ports attached to this host
6093  *
6094  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6095  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6096  *	last entry will be used for the remaining ports.
6097  *
6098  *	RETURNS:
6099  *	Allocate ATA host on success, NULL on failure.
6100  *
6101  *	LOCKING:
6102  *	Inherited from calling layer (may sleep).
6103  */
6104 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6105 				      const struct ata_port_info * const * ppi,
6106 				      int n_ports)
6107 {
6108 	const struct ata_port_info *pi;
6109 	struct ata_host *host;
6110 	int i, j;
6111 
6112 	host = ata_host_alloc(dev, n_ports);
6113 	if (!host)
6114 		return NULL;
6115 
6116 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6117 		struct ata_port *ap = host->ports[i];
6118 
6119 		if (ppi[j])
6120 			pi = ppi[j++];
6121 
6122 		ap->pio_mask = pi->pio_mask;
6123 		ap->mwdma_mask = pi->mwdma_mask;
6124 		ap->udma_mask = pi->udma_mask;
6125 		ap->flags |= pi->flags;
6126 		ap->link.flags |= pi->link_flags;
6127 		ap->ops = pi->port_ops;
6128 
6129 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6130 			host->ops = pi->port_ops;
6131 	}
6132 
6133 	return host;
6134 }
6135 
6136 /**
6137  *	ata_slave_link_init - initialize slave link
6138  *	@ap: port to initialize slave link for
6139  *
6140  *	Create and initialize slave link for @ap.  This enables slave
6141  *	link handling on the port.
6142  *
6143  *	In libata, a port contains links and a link contains devices.
6144  *	There is single host link but if a PMP is attached to it,
6145  *	there can be multiple fan-out links.  On SATA, there's usually
6146  *	a single device connected to a link but PATA and SATA
6147  *	controllers emulating TF based interface can have two - master
6148  *	and slave.
6149  *
6150  *	However, there are a few controllers which don't fit into this
6151  *	abstraction too well - SATA controllers which emulate TF
6152  *	interface with both master and slave devices but also have
6153  *	separate SCR register sets for each device.  These controllers
6154  *	need separate links for physical link handling
6155  *	(e.g. onlineness, link speed) but should be treated like a
6156  *	traditional M/S controller for everything else (e.g. command
6157  *	issue, softreset).
6158  *
6159  *	slave_link is libata's way of handling this class of
6160  *	controllers without impacting core layer too much.  For
6161  *	anything other than physical link handling, the default host
6162  *	link is used for both master and slave.  For physical link
6163  *	handling, separate @ap->slave_link is used.  All dirty details
6164  *	are implemented inside libata core layer.  From LLD's POV, the
6165  *	only difference is that prereset, hardreset and postreset are
6166  *	called once more for the slave link, so the reset sequence
6167  *	looks like the following.
6168  *
6169  *	prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6170  *	softreset(M) -> postreset(M) -> postreset(S)
6171  *
6172  *	Note that softreset is called only for the master.  Softreset
6173  *	resets both M/S by definition, so SRST on master should handle
6174  *	both (the standard method will work just fine).
6175  *
6176  *	LOCKING:
6177  *	Should be called before host is registered.
6178  *
6179  *	RETURNS:
6180  *	0 on success, -errno on failure.
6181  */
6182 int ata_slave_link_init(struct ata_port *ap)
6183 {
6184 	struct ata_link *link;
6185 
6186 	WARN_ON(ap->slave_link);
6187 	WARN_ON(ap->flags & ATA_FLAG_PMP);
6188 
6189 	link = kzalloc(sizeof(*link), GFP_KERNEL);
6190 	if (!link)
6191 		return -ENOMEM;
6192 
6193 	ata_link_init(ap, link, 1);
6194 	ap->slave_link = link;
6195 	return 0;
6196 }
6197 
6198 static void ata_host_stop(struct device *gendev, void *res)
6199 {
6200 	struct ata_host *host = dev_get_drvdata(gendev);
6201 	int i;
6202 
6203 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
6204 
6205 	for (i = 0; i < host->n_ports; i++) {
6206 		struct ata_port *ap = host->ports[i];
6207 
6208 		if (ap->ops->port_stop)
6209 			ap->ops->port_stop(ap);
6210 	}
6211 
6212 	if (host->ops->host_stop)
6213 		host->ops->host_stop(host);
6214 }
6215 
6216 /**
6217  *	ata_finalize_port_ops - finalize ata_port_operations
6218  *	@ops: ata_port_operations to finalize
6219  *
6220  *	An ata_port_operations can inherit from another ops and that
6221  *	ops can again inherit from another.  This can go on as many
6222  *	times as necessary as long as there is no loop in the
6223  *	inheritance chain.
6224  *
6225  *	Ops tables are finalized when the host is started.  NULL or
6226  *	unspecified entries are inherited from the closet ancestor
6227  *	which has the method and the entry is populated with it.
6228  *	After finalization, the ops table directly points to all the
6229  *	methods and ->inherits is no longer necessary and cleared.
6230  *
6231  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6232  *
6233  *	LOCKING:
6234  *	None.
6235  */
6236 static void ata_finalize_port_ops(struct ata_port_operations *ops)
6237 {
6238 	static DEFINE_SPINLOCK(lock);
6239 	const struct ata_port_operations *cur;
6240 	void **begin = (void **)ops;
6241 	void **end = (void **)&ops->inherits;
6242 	void **pp;
6243 
6244 	if (!ops || !ops->inherits)
6245 		return;
6246 
6247 	spin_lock(&lock);
6248 
6249 	for (cur = ops->inherits; cur; cur = cur->inherits) {
6250 		void **inherit = (void **)cur;
6251 
6252 		for (pp = begin; pp < end; pp++, inherit++)
6253 			if (!*pp)
6254 				*pp = *inherit;
6255 	}
6256 
6257 	for (pp = begin; pp < end; pp++)
6258 		if (IS_ERR(*pp))
6259 			*pp = NULL;
6260 
6261 	ops->inherits = NULL;
6262 
6263 	spin_unlock(&lock);
6264 }
6265 
6266 /**
6267  *	ata_host_start - start and freeze ports of an ATA host
6268  *	@host: ATA host to start ports for
6269  *
6270  *	Start and then freeze ports of @host.  Started status is
6271  *	recorded in host->flags, so this function can be called
6272  *	multiple times.  Ports are guaranteed to get started only
6273  *	once.  If host->ops isn't initialized yet, its set to the
6274  *	first non-dummy port ops.
6275  *
6276  *	LOCKING:
6277  *	Inherited from calling layer (may sleep).
6278  *
6279  *	RETURNS:
6280  *	0 if all ports are started successfully, -errno otherwise.
6281  */
6282 int ata_host_start(struct ata_host *host)
6283 {
6284 	int have_stop = 0;
6285 	void *start_dr = NULL;
6286 	int i, rc;
6287 
6288 	if (host->flags & ATA_HOST_STARTED)
6289 		return 0;
6290 
6291 	ata_finalize_port_ops(host->ops);
6292 
6293 	for (i = 0; i < host->n_ports; i++) {
6294 		struct ata_port *ap = host->ports[i];
6295 
6296 		ata_finalize_port_ops(ap->ops);
6297 
6298 		if (!host->ops && !ata_port_is_dummy(ap))
6299 			host->ops = ap->ops;
6300 
6301 		if (ap->ops->port_stop)
6302 			have_stop = 1;
6303 	}
6304 
6305 	if (host->ops->host_stop)
6306 		have_stop = 1;
6307 
6308 	if (have_stop) {
6309 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6310 		if (!start_dr)
6311 			return -ENOMEM;
6312 	}
6313 
6314 	for (i = 0; i < host->n_ports; i++) {
6315 		struct ata_port *ap = host->ports[i];
6316 
6317 		if (ap->ops->port_start) {
6318 			rc = ap->ops->port_start(ap);
6319 			if (rc) {
6320 				if (rc != -ENODEV)
6321 					dev_err(host->dev,
6322 						"failed to start port %d (errno=%d)\n",
6323 						i, rc);
6324 				goto err_out;
6325 			}
6326 		}
6327 		ata_eh_freeze_port(ap);
6328 	}
6329 
6330 	if (start_dr)
6331 		devres_add(host->dev, start_dr);
6332 	host->flags |= ATA_HOST_STARTED;
6333 	return 0;
6334 
6335  err_out:
6336 	while (--i >= 0) {
6337 		struct ata_port *ap = host->ports[i];
6338 
6339 		if (ap->ops->port_stop)
6340 			ap->ops->port_stop(ap);
6341 	}
6342 	devres_free(start_dr);
6343 	return rc;
6344 }
6345 
6346 /**
6347  *	ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6348  *	@host:	host to initialize
6349  *	@dev:	device host is attached to
6350  *	@ops:	port_ops
6351  *
6352  */
6353 void ata_host_init(struct ata_host *host, struct device *dev,
6354 		   struct ata_port_operations *ops)
6355 {
6356 	spin_lock_init(&host->lock);
6357 	mutex_init(&host->eh_mutex);
6358 	host->n_tags = ATA_MAX_QUEUE - 1;
6359 	host->dev = dev;
6360 	host->ops = ops;
6361 }
6362 
6363 void __ata_port_probe(struct ata_port *ap)
6364 {
6365 	struct ata_eh_info *ehi = &ap->link.eh_info;
6366 	unsigned long flags;
6367 
6368 	/* kick EH for boot probing */
6369 	spin_lock_irqsave(ap->lock, flags);
6370 
6371 	ehi->probe_mask |= ATA_ALL_DEVICES;
6372 	ehi->action |= ATA_EH_RESET;
6373 	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6374 
6375 	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6376 	ap->pflags |= ATA_PFLAG_LOADING;
6377 	ata_port_schedule_eh(ap);
6378 
6379 	spin_unlock_irqrestore(ap->lock, flags);
6380 }
6381 
6382 int ata_port_probe(struct ata_port *ap)
6383 {
6384 	int rc = 0;
6385 
6386 	if (ap->ops->error_handler) {
6387 		__ata_port_probe(ap);
6388 		ata_port_wait_eh(ap);
6389 	} else {
6390 		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6391 		rc = ata_bus_probe(ap);
6392 		DPRINTK("ata%u: bus probe end\n", ap->print_id);
6393 	}
6394 	return rc;
6395 }
6396 
6397 
6398 static void async_port_probe(void *data, async_cookie_t cookie)
6399 {
6400 	struct ata_port *ap = data;
6401 
6402 	/*
6403 	 * If we're not allowed to scan this host in parallel,
6404 	 * we need to wait until all previous scans have completed
6405 	 * before going further.
6406 	 * Jeff Garzik says this is only within a controller, so we
6407 	 * don't need to wait for port 0, only for later ports.
6408 	 */
6409 	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6410 		async_synchronize_cookie(cookie);
6411 
6412 	(void)ata_port_probe(ap);
6413 
6414 	/* in order to keep device order, we need to synchronize at this point */
6415 	async_synchronize_cookie(cookie);
6416 
6417 	ata_scsi_scan_host(ap, 1);
6418 }
6419 
6420 /**
6421  *	ata_host_register - register initialized ATA host
6422  *	@host: ATA host to register
6423  *	@sht: template for SCSI host
6424  *
6425  *	Register initialized ATA host.  @host is allocated using
6426  *	ata_host_alloc() and fully initialized by LLD.  This function
6427  *	starts ports, registers @host with ATA and SCSI layers and
6428  *	probe registered devices.
6429  *
6430  *	LOCKING:
6431  *	Inherited from calling layer (may sleep).
6432  *
6433  *	RETURNS:
6434  *	0 on success, -errno otherwise.
6435  */
6436 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6437 {
6438 	int i, rc;
6439 
6440 	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6441 
6442 	/* host must have been started */
6443 	if (!(host->flags & ATA_HOST_STARTED)) {
6444 		dev_err(host->dev, "BUG: trying to register unstarted host\n");
6445 		WARN_ON(1);
6446 		return -EINVAL;
6447 	}
6448 
6449 	/* Blow away unused ports.  This happens when LLD can't
6450 	 * determine the exact number of ports to allocate at
6451 	 * allocation time.
6452 	 */
6453 	for (i = host->n_ports; host->ports[i]; i++)
6454 		kfree(host->ports[i]);
6455 
6456 	/* give ports names and add SCSI hosts */
6457 	for (i = 0; i < host->n_ports; i++) {
6458 		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6459 		host->ports[i]->local_port_no = i + 1;
6460 	}
6461 
6462 	/* Create associated sysfs transport objects  */
6463 	for (i = 0; i < host->n_ports; i++) {
6464 		rc = ata_tport_add(host->dev,host->ports[i]);
6465 		if (rc) {
6466 			goto err_tadd;
6467 		}
6468 	}
6469 
6470 	rc = ata_scsi_add_hosts(host, sht);
6471 	if (rc)
6472 		goto err_tadd;
6473 
6474 	/* set cable, sata_spd_limit and report */
6475 	for (i = 0; i < host->n_ports; i++) {
6476 		struct ata_port *ap = host->ports[i];
6477 		unsigned long xfer_mask;
6478 
6479 		/* set SATA cable type if still unset */
6480 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6481 			ap->cbl = ATA_CBL_SATA;
6482 
6483 		/* init sata_spd_limit to the current value */
6484 		sata_link_init_spd(&ap->link);
6485 		if (ap->slave_link)
6486 			sata_link_init_spd(ap->slave_link);
6487 
6488 		/* print per-port info to dmesg */
6489 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6490 					      ap->udma_mask);
6491 
6492 		if (!ata_port_is_dummy(ap)) {
6493 			ata_port_info(ap, "%cATA max %s %s\n",
6494 				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6495 				      ata_mode_string(xfer_mask),
6496 				      ap->link.eh_info.desc);
6497 			ata_ehi_clear_desc(&ap->link.eh_info);
6498 		} else
6499 			ata_port_info(ap, "DUMMY\n");
6500 	}
6501 
6502 	/* perform each probe asynchronously */
6503 	for (i = 0; i < host->n_ports; i++) {
6504 		struct ata_port *ap = host->ports[i];
6505 		async_schedule(async_port_probe, ap);
6506 	}
6507 
6508 	return 0;
6509 
6510  err_tadd:
6511 	while (--i >= 0) {
6512 		ata_tport_delete(host->ports[i]);
6513 	}
6514 	return rc;
6515 
6516 }
6517 
6518 /**
6519  *	ata_host_activate - start host, request IRQ and register it
6520  *	@host: target ATA host
6521  *	@irq: IRQ to request
6522  *	@irq_handler: irq_handler used when requesting IRQ
6523  *	@irq_flags: irq_flags used when requesting IRQ
6524  *	@sht: scsi_host_template to use when registering the host
6525  *
6526  *	After allocating an ATA host and initializing it, most libata
6527  *	LLDs perform three steps to activate the host - start host,
6528  *	request IRQ and register it.  This helper takes necessary
6529  *	arguments and performs the three steps in one go.
6530  *
6531  *	An invalid IRQ skips the IRQ registration and expects the host to
6532  *	have set polling mode on the port. In this case, @irq_handler
6533  *	should be NULL.
6534  *
6535  *	LOCKING:
6536  *	Inherited from calling layer (may sleep).
6537  *
6538  *	RETURNS:
6539  *	0 on success, -errno otherwise.
6540  */
6541 int ata_host_activate(struct ata_host *host, int irq,
6542 		      irq_handler_t irq_handler, unsigned long irq_flags,
6543 		      struct scsi_host_template *sht)
6544 {
6545 	int i, rc;
6546 	char *irq_desc;
6547 
6548 	rc = ata_host_start(host);
6549 	if (rc)
6550 		return rc;
6551 
6552 	/* Special case for polling mode */
6553 	if (!irq) {
6554 		WARN_ON(irq_handler);
6555 		return ata_host_register(host, sht);
6556 	}
6557 
6558 	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6559 				  dev_driver_string(host->dev),
6560 				  dev_name(host->dev));
6561 	if (!irq_desc)
6562 		return -ENOMEM;
6563 
6564 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6565 			      irq_desc, host);
6566 	if (rc)
6567 		return rc;
6568 
6569 	for (i = 0; i < host->n_ports; i++)
6570 		ata_port_desc(host->ports[i], "irq %d", irq);
6571 
6572 	rc = ata_host_register(host, sht);
6573 	/* if failed, just free the IRQ and leave ports alone */
6574 	if (rc)
6575 		devm_free_irq(host->dev, irq, host);
6576 
6577 	return rc;
6578 }
6579 
6580 /**
6581  *	ata_port_detach - Detach ATA port in preparation of device removal
6582  *	@ap: ATA port to be detached
6583  *
6584  *	Detach all ATA devices and the associated SCSI devices of @ap;
6585  *	then, remove the associated SCSI host.  @ap is guaranteed to
6586  *	be quiescent on return from this function.
6587  *
6588  *	LOCKING:
6589  *	Kernel thread context (may sleep).
6590  */
6591 static void ata_port_detach(struct ata_port *ap)
6592 {
6593 	unsigned long flags;
6594 	struct ata_link *link;
6595 	struct ata_device *dev;
6596 
6597 	if (!ap->ops->error_handler)
6598 		goto skip_eh;
6599 
6600 	/* tell EH we're leaving & flush EH */
6601 	spin_lock_irqsave(ap->lock, flags);
6602 	ap->pflags |= ATA_PFLAG_UNLOADING;
6603 	ata_port_schedule_eh(ap);
6604 	spin_unlock_irqrestore(ap->lock, flags);
6605 
6606 	/* wait till EH commits suicide */
6607 	ata_port_wait_eh(ap);
6608 
6609 	/* it better be dead now */
6610 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6611 
6612 	cancel_delayed_work_sync(&ap->hotplug_task);
6613 
6614  skip_eh:
6615 	/* clean up zpodd on port removal */
6616 	ata_for_each_link(link, ap, HOST_FIRST) {
6617 		ata_for_each_dev(dev, link, ALL) {
6618 			if (zpodd_dev_enabled(dev))
6619 				zpodd_exit(dev);
6620 		}
6621 	}
6622 	if (ap->pmp_link) {
6623 		int i;
6624 		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6625 			ata_tlink_delete(&ap->pmp_link[i]);
6626 	}
6627 	/* remove the associated SCSI host */
6628 	scsi_remove_host(ap->scsi_host);
6629 	ata_tport_delete(ap);
6630 }
6631 
6632 /**
6633  *	ata_host_detach - Detach all ports of an ATA host
6634  *	@host: Host to detach
6635  *
6636  *	Detach all ports of @host.
6637  *
6638  *	LOCKING:
6639  *	Kernel thread context (may sleep).
6640  */
6641 void ata_host_detach(struct ata_host *host)
6642 {
6643 	int i;
6644 
6645 	for (i = 0; i < host->n_ports; i++)
6646 		ata_port_detach(host->ports[i]);
6647 
6648 	/* the host is dead now, dissociate ACPI */
6649 	ata_acpi_dissociate(host);
6650 }
6651 
6652 #ifdef CONFIG_PCI
6653 
6654 /**
6655  *	ata_pci_remove_one - PCI layer callback for device removal
6656  *	@pdev: PCI device that was removed
6657  *
6658  *	PCI layer indicates to libata via this hook that hot-unplug or
6659  *	module unload event has occurred.  Detach all ports.  Resource
6660  *	release is handled via devres.
6661  *
6662  *	LOCKING:
6663  *	Inherited from PCI layer (may sleep).
6664  */
6665 void ata_pci_remove_one(struct pci_dev *pdev)
6666 {
6667 	struct ata_host *host = pci_get_drvdata(pdev);
6668 
6669 	ata_host_detach(host);
6670 }
6671 
6672 /* move to PCI subsystem */
6673 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6674 {
6675 	unsigned long tmp = 0;
6676 
6677 	switch (bits->width) {
6678 	case 1: {
6679 		u8 tmp8 = 0;
6680 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6681 		tmp = tmp8;
6682 		break;
6683 	}
6684 	case 2: {
6685 		u16 tmp16 = 0;
6686 		pci_read_config_word(pdev, bits->reg, &tmp16);
6687 		tmp = tmp16;
6688 		break;
6689 	}
6690 	case 4: {
6691 		u32 tmp32 = 0;
6692 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6693 		tmp = tmp32;
6694 		break;
6695 	}
6696 
6697 	default:
6698 		return -EINVAL;
6699 	}
6700 
6701 	tmp &= bits->mask;
6702 
6703 	return (tmp == bits->val) ? 1 : 0;
6704 }
6705 
6706 #ifdef CONFIG_PM
6707 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6708 {
6709 	pci_save_state(pdev);
6710 	pci_disable_device(pdev);
6711 
6712 	if (mesg.event & PM_EVENT_SLEEP)
6713 		pci_set_power_state(pdev, PCI_D3hot);
6714 }
6715 
6716 int ata_pci_device_do_resume(struct pci_dev *pdev)
6717 {
6718 	int rc;
6719 
6720 	pci_set_power_state(pdev, PCI_D0);
6721 	pci_restore_state(pdev);
6722 
6723 	rc = pcim_enable_device(pdev);
6724 	if (rc) {
6725 		dev_err(&pdev->dev,
6726 			"failed to enable device after resume (%d)\n", rc);
6727 		return rc;
6728 	}
6729 
6730 	pci_set_master(pdev);
6731 	return 0;
6732 }
6733 
6734 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6735 {
6736 	struct ata_host *host = pci_get_drvdata(pdev);
6737 	int rc = 0;
6738 
6739 	rc = ata_host_suspend(host, mesg);
6740 	if (rc)
6741 		return rc;
6742 
6743 	ata_pci_device_do_suspend(pdev, mesg);
6744 
6745 	return 0;
6746 }
6747 
6748 int ata_pci_device_resume(struct pci_dev *pdev)
6749 {
6750 	struct ata_host *host = pci_get_drvdata(pdev);
6751 	int rc;
6752 
6753 	rc = ata_pci_device_do_resume(pdev);
6754 	if (rc == 0)
6755 		ata_host_resume(host);
6756 	return rc;
6757 }
6758 #endif /* CONFIG_PM */
6759 
6760 #endif /* CONFIG_PCI */
6761 
6762 /**
6763  *	ata_platform_remove_one - Platform layer callback for device removal
6764  *	@pdev: Platform device that was removed
6765  *
6766  *	Platform layer indicates to libata via this hook that hot-unplug or
6767  *	module unload event has occurred.  Detach all ports.  Resource
6768  *	release is handled via devres.
6769  *
6770  *	LOCKING:
6771  *	Inherited from platform layer (may sleep).
6772  */
6773 int ata_platform_remove_one(struct platform_device *pdev)
6774 {
6775 	struct ata_host *host = platform_get_drvdata(pdev);
6776 
6777 	ata_host_detach(host);
6778 
6779 	return 0;
6780 }
6781 
6782 static int __init ata_parse_force_one(char **cur,
6783 				      struct ata_force_ent *force_ent,
6784 				      const char **reason)
6785 {
6786 	static const struct ata_force_param force_tbl[] __initconst = {
6787 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
6788 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
6789 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
6790 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
6791 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
6792 		{ "sata",	.cbl		= ATA_CBL_SATA },
6793 		{ "1.5Gbps",	.spd_limit	= 1 },
6794 		{ "3.0Gbps",	.spd_limit	= 2 },
6795 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
6796 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
6797 		{ "noncqtrim",	.horkage_on	= ATA_HORKAGE_NO_NCQ_TRIM },
6798 		{ "ncqtrim",	.horkage_off	= ATA_HORKAGE_NO_NCQ_TRIM },
6799 		{ "dump_id",	.horkage_on	= ATA_HORKAGE_DUMP_ID },
6800 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
6801 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
6802 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
6803 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
6804 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
6805 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
6806 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
6807 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
6808 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
6809 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
6810 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
6811 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
6812 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6813 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6814 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
6815 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6816 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6817 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
6818 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6819 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6820 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
6821 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6822 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6823 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
6824 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6825 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6826 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
6827 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6828 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6829 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
6830 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6831 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6832 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
6833 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
6834 		{ "nohrst",	.lflags		= ATA_LFLAG_NO_HRST },
6835 		{ "nosrst",	.lflags		= ATA_LFLAG_NO_SRST },
6836 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6837 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE },
6838 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR },
6839 		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE },
6840 	};
6841 	char *start = *cur, *p = *cur;
6842 	char *id, *val, *endp;
6843 	const struct ata_force_param *match_fp = NULL;
6844 	int nr_matches = 0, i;
6845 
6846 	/* find where this param ends and update *cur */
6847 	while (*p != '\0' && *p != ',')
6848 		p++;
6849 
6850 	if (*p == '\0')
6851 		*cur = p;
6852 	else
6853 		*cur = p + 1;
6854 
6855 	*p = '\0';
6856 
6857 	/* parse */
6858 	p = strchr(start, ':');
6859 	if (!p) {
6860 		val = strstrip(start);
6861 		goto parse_val;
6862 	}
6863 	*p = '\0';
6864 
6865 	id = strstrip(start);
6866 	val = strstrip(p + 1);
6867 
6868 	/* parse id */
6869 	p = strchr(id, '.');
6870 	if (p) {
6871 		*p++ = '\0';
6872 		force_ent->device = simple_strtoul(p, &endp, 10);
6873 		if (p == endp || *endp != '\0') {
6874 			*reason = "invalid device";
6875 			return -EINVAL;
6876 		}
6877 	}
6878 
6879 	force_ent->port = simple_strtoul(id, &endp, 10);
6880 	if (id == endp || *endp != '\0') {
6881 		*reason = "invalid port/link";
6882 		return -EINVAL;
6883 	}
6884 
6885  parse_val:
6886 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6887 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6888 		const struct ata_force_param *fp = &force_tbl[i];
6889 
6890 		if (strncasecmp(val, fp->name, strlen(val)))
6891 			continue;
6892 
6893 		nr_matches++;
6894 		match_fp = fp;
6895 
6896 		if (strcasecmp(val, fp->name) == 0) {
6897 			nr_matches = 1;
6898 			break;
6899 		}
6900 	}
6901 
6902 	if (!nr_matches) {
6903 		*reason = "unknown value";
6904 		return -EINVAL;
6905 	}
6906 	if (nr_matches > 1) {
6907 		*reason = "ambigious value";
6908 		return -EINVAL;
6909 	}
6910 
6911 	force_ent->param = *match_fp;
6912 
6913 	return 0;
6914 }
6915 
6916 static void __init ata_parse_force_param(void)
6917 {
6918 	int idx = 0, size = 1;
6919 	int last_port = -1, last_device = -1;
6920 	char *p, *cur, *next;
6921 
6922 	/* calculate maximum number of params and allocate force_tbl */
6923 	for (p = ata_force_param_buf; *p; p++)
6924 		if (*p == ',')
6925 			size++;
6926 
6927 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6928 	if (!ata_force_tbl) {
6929 		printk(KERN_WARNING "ata: failed to extend force table, "
6930 		       "libata.force ignored\n");
6931 		return;
6932 	}
6933 
6934 	/* parse and populate the table */
6935 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6936 		const char *reason = "";
6937 		struct ata_force_ent te = { .port = -1, .device = -1 };
6938 
6939 		next = cur;
6940 		if (ata_parse_force_one(&next, &te, &reason)) {
6941 			printk(KERN_WARNING "ata: failed to parse force "
6942 			       "parameter \"%s\" (%s)\n",
6943 			       cur, reason);
6944 			continue;
6945 		}
6946 
6947 		if (te.port == -1) {
6948 			te.port = last_port;
6949 			te.device = last_device;
6950 		}
6951 
6952 		ata_force_tbl[idx++] = te;
6953 
6954 		last_port = te.port;
6955 		last_device = te.device;
6956 	}
6957 
6958 	ata_force_tbl_size = idx;
6959 }
6960 
6961 static int __init ata_init(void)
6962 {
6963 	int rc;
6964 
6965 	ata_parse_force_param();
6966 
6967 	rc = ata_sff_init();
6968 	if (rc) {
6969 		kfree(ata_force_tbl);
6970 		return rc;
6971 	}
6972 
6973 	libata_transport_init();
6974 	ata_scsi_transport_template = ata_attach_transport();
6975 	if (!ata_scsi_transport_template) {
6976 		ata_sff_exit();
6977 		rc = -ENOMEM;
6978 		goto err_out;
6979 	}
6980 
6981 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6982 	return 0;
6983 
6984 err_out:
6985 	return rc;
6986 }
6987 
6988 static void __exit ata_exit(void)
6989 {
6990 	ata_release_transport(ata_scsi_transport_template);
6991 	libata_transport_exit();
6992 	ata_sff_exit();
6993 	kfree(ata_force_tbl);
6994 }
6995 
6996 subsys_initcall(ata_init);
6997 module_exit(ata_exit);
6998 
6999 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
7000 
7001 int ata_ratelimit(void)
7002 {
7003 	return __ratelimit(&ratelimit);
7004 }
7005 
7006 /**
7007  *	ata_msleep - ATA EH owner aware msleep
7008  *	@ap: ATA port to attribute the sleep to
7009  *	@msecs: duration to sleep in milliseconds
7010  *
7011  *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
7012  *	ownership is released before going to sleep and reacquired
7013  *	after the sleep is complete.  IOW, other ports sharing the
7014  *	@ap->host will be allowed to own the EH while this task is
7015  *	sleeping.
7016  *
7017  *	LOCKING:
7018  *	Might sleep.
7019  */
7020 void ata_msleep(struct ata_port *ap, unsigned int msecs)
7021 {
7022 	bool owns_eh = ap && ap->host->eh_owner == current;
7023 
7024 	if (owns_eh)
7025 		ata_eh_release(ap);
7026 
7027 	if (msecs < 20) {
7028 		unsigned long usecs = msecs * USEC_PER_MSEC;
7029 		usleep_range(usecs, usecs + 50);
7030 	} else {
7031 		msleep(msecs);
7032 	}
7033 
7034 	if (owns_eh)
7035 		ata_eh_acquire(ap);
7036 }
7037 
7038 /**
7039  *	ata_wait_register - wait until register value changes
7040  *	@ap: ATA port to wait register for, can be NULL
7041  *	@reg: IO-mapped register
7042  *	@mask: Mask to apply to read register value
7043  *	@val: Wait condition
7044  *	@interval: polling interval in milliseconds
7045  *	@timeout: timeout in milliseconds
7046  *
7047  *	Waiting for some bits of register to change is a common
7048  *	operation for ATA controllers.  This function reads 32bit LE
7049  *	IO-mapped register @reg and tests for the following condition.
7050  *
7051  *	(*@reg & mask) != val
7052  *
7053  *	If the condition is met, it returns; otherwise, the process is
7054  *	repeated after @interval_msec until timeout.
7055  *
7056  *	LOCKING:
7057  *	Kernel thread context (may sleep)
7058  *
7059  *	RETURNS:
7060  *	The final register value.
7061  */
7062 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
7063 		      unsigned long interval, unsigned long timeout)
7064 {
7065 	unsigned long deadline;
7066 	u32 tmp;
7067 
7068 	tmp = ioread32(reg);
7069 
7070 	/* Calculate timeout _after_ the first read to make sure
7071 	 * preceding writes reach the controller before starting to
7072 	 * eat away the timeout.
7073 	 */
7074 	deadline = ata_deadline(jiffies, timeout);
7075 
7076 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
7077 		ata_msleep(ap, interval);
7078 		tmp = ioread32(reg);
7079 	}
7080 
7081 	return tmp;
7082 }
7083 
7084 /**
7085  *	sata_lpm_ignore_phy_events - test if PHY event should be ignored
7086  *	@link: Link receiving the event
7087  *
7088  *	Test whether the received PHY event has to be ignored or not.
7089  *
7090  *	LOCKING:
7091  *	None:
7092  *
7093  *	RETURNS:
7094  *	True if the event has to be ignored.
7095  */
7096 bool sata_lpm_ignore_phy_events(struct ata_link *link)
7097 {
7098 	unsigned long lpm_timeout = link->last_lpm_change +
7099 				    msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7100 
7101 	/* if LPM is enabled, PHYRDY doesn't mean anything */
7102 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
7103 		return true;
7104 
7105 	/* ignore the first PHY event after the LPM policy changed
7106 	 * as it is might be spurious
7107 	 */
7108 	if ((link->flags & ATA_LFLAG_CHANGED) &&
7109 	    time_before(jiffies, lpm_timeout))
7110 		return true;
7111 
7112 	return false;
7113 }
7114 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7115 
7116 /*
7117  * Dummy port_ops
7118  */
7119 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7120 {
7121 	return AC_ERR_SYSTEM;
7122 }
7123 
7124 static void ata_dummy_error_handler(struct ata_port *ap)
7125 {
7126 	/* truly dummy */
7127 }
7128 
7129 struct ata_port_operations ata_dummy_port_ops = {
7130 	.qc_prep		= ata_noop_qc_prep,
7131 	.qc_issue		= ata_dummy_qc_issue,
7132 	.error_handler		= ata_dummy_error_handler,
7133 	.sched_eh		= ata_std_sched_eh,
7134 	.end_eh			= ata_std_end_eh,
7135 };
7136 
7137 const struct ata_port_info ata_dummy_port_info = {
7138 	.port_ops		= &ata_dummy_port_ops,
7139 };
7140 
7141 /*
7142  * Utility print functions
7143  */
7144 void ata_port_printk(const struct ata_port *ap, const char *level,
7145 		     const char *fmt, ...)
7146 {
7147 	struct va_format vaf;
7148 	va_list args;
7149 
7150 	va_start(args, fmt);
7151 
7152 	vaf.fmt = fmt;
7153 	vaf.va = &args;
7154 
7155 	printk("%sata%u: %pV", level, ap->print_id, &vaf);
7156 
7157 	va_end(args);
7158 }
7159 EXPORT_SYMBOL(ata_port_printk);
7160 
7161 void ata_link_printk(const struct ata_link *link, const char *level,
7162 		     const char *fmt, ...)
7163 {
7164 	struct va_format vaf;
7165 	va_list args;
7166 
7167 	va_start(args, fmt);
7168 
7169 	vaf.fmt = fmt;
7170 	vaf.va = &args;
7171 
7172 	if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7173 		printk("%sata%u.%02u: %pV",
7174 		       level, link->ap->print_id, link->pmp, &vaf);
7175 	else
7176 		printk("%sata%u: %pV",
7177 		       level, link->ap->print_id, &vaf);
7178 
7179 	va_end(args);
7180 }
7181 EXPORT_SYMBOL(ata_link_printk);
7182 
7183 void ata_dev_printk(const struct ata_device *dev, const char *level,
7184 		    const char *fmt, ...)
7185 {
7186 	struct va_format vaf;
7187 	va_list args;
7188 
7189 	va_start(args, fmt);
7190 
7191 	vaf.fmt = fmt;
7192 	vaf.va = &args;
7193 
7194 	printk("%sata%u.%02u: %pV",
7195 	       level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7196 	       &vaf);
7197 
7198 	va_end(args);
7199 }
7200 EXPORT_SYMBOL(ata_dev_printk);
7201 
7202 void ata_print_version(const struct device *dev, const char *version)
7203 {
7204 	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7205 }
7206 EXPORT_SYMBOL(ata_print_version);
7207 
7208 /*
7209  * libata is essentially a library of internal helper functions for
7210  * low-level ATA host controller drivers.  As such, the API/ABI is
7211  * likely to change as new drivers are added and updated.
7212  * Do not depend on ABI/API stability.
7213  */
7214 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7215 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7216 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7217 EXPORT_SYMBOL_GPL(ata_base_port_ops);
7218 EXPORT_SYMBOL_GPL(sata_port_ops);
7219 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7220 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7221 EXPORT_SYMBOL_GPL(ata_link_next);
7222 EXPORT_SYMBOL_GPL(ata_dev_next);
7223 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7224 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7225 EXPORT_SYMBOL_GPL(ata_host_init);
7226 EXPORT_SYMBOL_GPL(ata_host_alloc);
7227 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7228 EXPORT_SYMBOL_GPL(ata_slave_link_init);
7229 EXPORT_SYMBOL_GPL(ata_host_start);
7230 EXPORT_SYMBOL_GPL(ata_host_register);
7231 EXPORT_SYMBOL_GPL(ata_host_activate);
7232 EXPORT_SYMBOL_GPL(ata_host_detach);
7233 EXPORT_SYMBOL_GPL(ata_sg_init);
7234 EXPORT_SYMBOL_GPL(ata_qc_complete);
7235 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7236 EXPORT_SYMBOL_GPL(atapi_cmd_type);
7237 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7238 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7239 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7240 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7241 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7242 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7243 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7244 EXPORT_SYMBOL_GPL(ata_mode_string);
7245 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7246 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7247 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7248 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7249 EXPORT_SYMBOL_GPL(ata_dev_disable);
7250 EXPORT_SYMBOL_GPL(sata_set_spd);
7251 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7252 EXPORT_SYMBOL_GPL(sata_link_debounce);
7253 EXPORT_SYMBOL_GPL(sata_link_resume);
7254 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7255 EXPORT_SYMBOL_GPL(ata_std_prereset);
7256 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7257 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7258 EXPORT_SYMBOL_GPL(ata_std_postreset);
7259 EXPORT_SYMBOL_GPL(ata_dev_classify);
7260 EXPORT_SYMBOL_GPL(ata_dev_pair);
7261 EXPORT_SYMBOL_GPL(ata_ratelimit);
7262 EXPORT_SYMBOL_GPL(ata_msleep);
7263 EXPORT_SYMBOL_GPL(ata_wait_register);
7264 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7265 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7266 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7267 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7268 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7269 EXPORT_SYMBOL_GPL(sata_scr_valid);
7270 EXPORT_SYMBOL_GPL(sata_scr_read);
7271 EXPORT_SYMBOL_GPL(sata_scr_write);
7272 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7273 EXPORT_SYMBOL_GPL(ata_link_online);
7274 EXPORT_SYMBOL_GPL(ata_link_offline);
7275 #ifdef CONFIG_PM
7276 EXPORT_SYMBOL_GPL(ata_host_suspend);
7277 EXPORT_SYMBOL_GPL(ata_host_resume);
7278 #endif /* CONFIG_PM */
7279 EXPORT_SYMBOL_GPL(ata_id_string);
7280 EXPORT_SYMBOL_GPL(ata_id_c_string);
7281 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7282 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7283 
7284 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7285 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7286 EXPORT_SYMBOL_GPL(ata_timing_compute);
7287 EXPORT_SYMBOL_GPL(ata_timing_merge);
7288 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7289 
7290 #ifdef CONFIG_PCI
7291 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7292 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7293 #ifdef CONFIG_PM
7294 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7295 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7296 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7297 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7298 #endif /* CONFIG_PM */
7299 #endif /* CONFIG_PCI */
7300 
7301 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7302 
7303 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7304 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7305 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7306 EXPORT_SYMBOL_GPL(ata_port_desc);
7307 #ifdef CONFIG_PCI
7308 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7309 #endif /* CONFIG_PCI */
7310 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7311 EXPORT_SYMBOL_GPL(ata_link_abort);
7312 EXPORT_SYMBOL_GPL(ata_port_abort);
7313 EXPORT_SYMBOL_GPL(ata_port_freeze);
7314 EXPORT_SYMBOL_GPL(sata_async_notification);
7315 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7316 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7317 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7318 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7319 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7320 EXPORT_SYMBOL_GPL(ata_do_eh);
7321 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7322 
7323 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7324 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7325 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7326 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7327 EXPORT_SYMBOL_GPL(ata_cable_sata);
7328