xref: /openbmc/linux/drivers/ata/pata_amd.c (revision 64c70b1c)
1 /*
2  * pata_amd.c 	- AMD PATA for new ATA layer
3  *			  (C) 2005-2006 Red Hat Inc
4  *			  Alan Cox <alan@redhat.com>
5  *
6  *  Based on pata-sil680. Errata information is taken from data sheets
7  *  and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8  *  claimed by sata-nv.c.
9  *
10  *  TODO:
11  *	Variable system clock when/if it makes sense
12  *	Power management on ports
13  *
14  *
15  *  Documentation publically available.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_host.h>
25 #include <linux/libata.h>
26 
27 #define DRV_NAME "pata_amd"
28 #define DRV_VERSION "0.3.8"
29 
30 /**
31  *	timing_setup		-	shared timing computation and load
32  *	@ap: ATA port being set up
33  *	@adev: drive being configured
34  *	@offset: port offset
35  *	@speed: target speed
36  *	@clock: clock multiplier (number of times 33MHz for this part)
37  *
38  *	Perform the actual timing set up for Nvidia or AMD PATA devices.
39  *	The actual devices vary so they all call into this helper function
40  *	providing the clock multipler and offset (because AMD and Nvidia put
41  *	the ports at different locations).
42  */
43 
44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
45 {
46 	static const unsigned char amd_cyc2udma[] = {
47 		6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
48 	};
49 
50 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 	struct ata_device *peer = ata_dev_pair(adev);
52 	int dn = ap->port_no * 2 + adev->devno;
53 	struct ata_timing at, apeer;
54 	int T, UT;
55 	const int amd_clock = 33333;	/* KHz. */
56 	u8 t;
57 
58 	T = 1000000000 / amd_clock;
59 	UT = T / min_t(int, max_t(int, clock, 1), 2);
60 
61 	if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 		dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
63 		return;
64 	}
65 
66 	if (peer) {
67 		/* This may be over conservative */
68 		if (peer->dma_mode) {
69 			ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 			ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
71 		}
72 		ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 		ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
74 	}
75 
76 	if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 	if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
78 
79 	/*
80 	 *	Now do the setup work
81 	 */
82 
83 	/* Configure the address set up timing */
84 	pci_read_config_byte(pdev, offset + 0x0C, &t);
85 	t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 	pci_write_config_byte(pdev, offset + 0x0C , t);
87 
88 	/* Configure the 8bit I/O timing */
89 	pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 		((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
91 
92 	/* Drive timing */
93 	pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 		((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
95 
96 	switch (clock) {
97 		case 1:
98 		t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
99 		break;
100 
101 		case 2:
102 		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
103 		break;
104 
105 		case 3:
106 		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
107 		break;
108 
109 		case 4:
110 		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
111 		break;
112 
113 		default:
114 			return;
115 	}
116 
117 	/* UDMA timing */
118 	pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
119 }
120 
121 /**
122  *	amd_probe_init		-	perform reset handling
123  *	@ap: ATA port
124  *	@deadline: deadline jiffies for the operation
125  *
126  *	Reset sequence checking enable bits to see which ports are
127  *	active.
128  */
129 
130 static int amd_pre_reset(struct ata_port *ap, unsigned long deadline)
131 {
132 	static const struct pci_bits amd_enable_bits[] = {
133 		{ 0x40, 1, 0x02, 0x02 },
134 		{ 0x40, 1, 0x01, 0x01 }
135 	};
136 
137 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
138 
139 	if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
140 		return -ENOENT;
141 
142 	return ata_std_prereset(ap, deadline);
143 }
144 
145 static void amd_error_handler(struct ata_port *ap)
146 {
147 	return ata_bmdma_drive_eh(ap, amd_pre_reset,
148 				      ata_std_softreset, NULL,
149 				      ata_std_postreset);
150 }
151 
152 static int amd_cable_detect(struct ata_port *ap)
153 {
154 	static const u32 bitmask[2] = {0x03, 0x0C};
155 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
156 	u8 ata66;
157 
158 	pci_read_config_byte(pdev, 0x42, &ata66);
159 	if (ata66 & bitmask[ap->port_no])
160 		return ATA_CBL_PATA80;
161 	return ATA_CBL_PATA40;
162 }
163 
164 /**
165  *	amd33_set_piomode	-	set initial PIO mode data
166  *	@ap: ATA interface
167  *	@adev: ATA device
168  *
169  *	Program the AMD registers for PIO mode.
170  */
171 
172 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
173 {
174 	timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
175 }
176 
177 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
178 {
179 	timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
180 }
181 
182 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
183 {
184 	timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
185 }
186 
187 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
188 {
189 	timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
190 }
191 
192 /**
193  *	amd33_set_dmamode	-	set initial DMA mode data
194  *	@ap: ATA interface
195  *	@adev: ATA device
196  *
197  *	Program the MWDMA/UDMA modes for the AMD and Nvidia
198  *	chipset.
199  */
200 
201 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
202 {
203 	timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
204 }
205 
206 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
207 {
208 	timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
209 }
210 
211 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
212 {
213 	timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
214 }
215 
216 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
217 {
218 	timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
219 }
220 
221 
222 /**
223  *	nv_probe_init	-	cable detection
224  *	@ap: ATA port
225  *
226  *	Perform cable detection. The BIOS stores this in PCI config
227  *	space for us.
228  */
229 
230 static int nv_pre_reset(struct ata_port *ap, unsigned long deadline)
231 {
232 	static const struct pci_bits nv_enable_bits[] = {
233 		{ 0x50, 1, 0x02, 0x02 },
234 		{ 0x50, 1, 0x01, 0x01 }
235 	};
236 
237 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
238 
239 	if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
240 		return -ENOENT;
241 
242 	return ata_std_prereset(ap, deadline);
243 }
244 
245 static void nv_error_handler(struct ata_port *ap)
246 {
247 	ata_bmdma_drive_eh(ap, nv_pre_reset,
248 			       ata_std_softreset, NULL,
249 			       ata_std_postreset);
250 }
251 
252 static int nv_cable_detect(struct ata_port *ap)
253 {
254 	static const u8 bitmask[2] = {0x03, 0x0C};
255 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
256 	u8 ata66;
257 	u16 udma;
258 	int cbl;
259 
260 	pci_read_config_byte(pdev, 0x52, &ata66);
261 	if (ata66 & bitmask[ap->port_no])
262 		cbl = ATA_CBL_PATA80;
263 	else
264 		cbl = ATA_CBL_PATA40;
265 
266  	/* We now have to double check because the Nvidia boxes BIOS
267  	   doesn't always set the cable bits but does set mode bits */
268  	pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
269  	if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
270 		cbl = ATA_CBL_PATA80;
271 	return cbl;
272 }
273 
274 /**
275  *	nv100_set_piomode	-	set initial PIO mode data
276  *	@ap: ATA interface
277  *	@adev: ATA device
278  *
279  *	Program the AMD registers for PIO mode.
280  */
281 
282 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
283 {
284 	timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
285 }
286 
287 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
288 {
289 	timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
290 }
291 
292 /**
293  *	nv100_set_dmamode	-	set initial DMA mode data
294  *	@ap: ATA interface
295  *	@adev: ATA device
296  *
297  *	Program the MWDMA/UDMA modes for the AMD and Nvidia
298  *	chipset.
299  */
300 
301 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
302 {
303 	timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
304 }
305 
306 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
307 {
308 	timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
309 }
310 
311 static struct scsi_host_template amd_sht = {
312 	.module			= THIS_MODULE,
313 	.name			= DRV_NAME,
314 	.ioctl			= ata_scsi_ioctl,
315 	.queuecommand		= ata_scsi_queuecmd,
316 	.can_queue		= ATA_DEF_QUEUE,
317 	.this_id		= ATA_SHT_THIS_ID,
318 	.sg_tablesize		= LIBATA_MAX_PRD,
319 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
320 	.emulated		= ATA_SHT_EMULATED,
321 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
322 	.proc_name		= DRV_NAME,
323 	.dma_boundary		= ATA_DMA_BOUNDARY,
324 	.slave_configure	= ata_scsi_slave_config,
325 	.slave_destroy		= ata_scsi_slave_destroy,
326 	.bios_param		= ata_std_bios_param,
327 };
328 
329 static struct ata_port_operations amd33_port_ops = {
330 	.port_disable	= ata_port_disable,
331 	.set_piomode	= amd33_set_piomode,
332 	.set_dmamode	= amd33_set_dmamode,
333 	.mode_filter	= ata_pci_default_filter,
334 	.tf_load	= ata_tf_load,
335 	.tf_read	= ata_tf_read,
336 	.check_status 	= ata_check_status,
337 	.exec_command	= ata_exec_command,
338 	.dev_select 	= ata_std_dev_select,
339 
340 	.freeze		= ata_bmdma_freeze,
341 	.thaw		= ata_bmdma_thaw,
342 	.error_handler	= amd_error_handler,
343 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
344 	.cable_detect	= ata_cable_40wire,
345 
346 	.bmdma_setup 	= ata_bmdma_setup,
347 	.bmdma_start 	= ata_bmdma_start,
348 	.bmdma_stop	= ata_bmdma_stop,
349 	.bmdma_status 	= ata_bmdma_status,
350 
351 	.qc_prep 	= ata_qc_prep,
352 	.qc_issue	= ata_qc_issue_prot,
353 
354 	.data_xfer	= ata_data_xfer,
355 
356 	.irq_handler	= ata_interrupt,
357 	.irq_clear	= ata_bmdma_irq_clear,
358 	.irq_on		= ata_irq_on,
359 	.irq_ack	= ata_irq_ack,
360 
361 	.port_start	= ata_port_start,
362 };
363 
364 static struct ata_port_operations amd66_port_ops = {
365 	.port_disable	= ata_port_disable,
366 	.set_piomode	= amd66_set_piomode,
367 	.set_dmamode	= amd66_set_dmamode,
368 	.mode_filter	= ata_pci_default_filter,
369 	.tf_load	= ata_tf_load,
370 	.tf_read	= ata_tf_read,
371 	.check_status 	= ata_check_status,
372 	.exec_command	= ata_exec_command,
373 	.dev_select 	= ata_std_dev_select,
374 
375 	.freeze		= ata_bmdma_freeze,
376 	.thaw		= ata_bmdma_thaw,
377 	.error_handler	= amd_error_handler,
378 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
379 	.cable_detect	= ata_cable_unknown,
380 
381 	.bmdma_setup 	= ata_bmdma_setup,
382 	.bmdma_start 	= ata_bmdma_start,
383 	.bmdma_stop	= ata_bmdma_stop,
384 	.bmdma_status 	= ata_bmdma_status,
385 
386 	.qc_prep 	= ata_qc_prep,
387 	.qc_issue	= ata_qc_issue_prot,
388 
389 	.data_xfer	= ata_data_xfer,
390 
391 	.irq_handler	= ata_interrupt,
392 	.irq_clear	= ata_bmdma_irq_clear,
393 	.irq_on		= ata_irq_on,
394 	.irq_ack	= ata_irq_ack,
395 
396 	.port_start	= ata_port_start,
397 };
398 
399 static struct ata_port_operations amd100_port_ops = {
400 	.port_disable	= ata_port_disable,
401 	.set_piomode	= amd100_set_piomode,
402 	.set_dmamode	= amd100_set_dmamode,
403 	.mode_filter	= ata_pci_default_filter,
404 	.tf_load	= ata_tf_load,
405 	.tf_read	= ata_tf_read,
406 	.check_status 	= ata_check_status,
407 	.exec_command	= ata_exec_command,
408 	.dev_select 	= ata_std_dev_select,
409 
410 	.freeze		= ata_bmdma_freeze,
411 	.thaw		= ata_bmdma_thaw,
412 	.error_handler	= amd_error_handler,
413 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
414 	.cable_detect	= ata_cable_unknown,
415 
416 	.bmdma_setup 	= ata_bmdma_setup,
417 	.bmdma_start 	= ata_bmdma_start,
418 	.bmdma_stop	= ata_bmdma_stop,
419 	.bmdma_status 	= ata_bmdma_status,
420 
421 	.qc_prep 	= ata_qc_prep,
422 	.qc_issue	= ata_qc_issue_prot,
423 
424 	.data_xfer	= ata_data_xfer,
425 
426 	.irq_handler	= ata_interrupt,
427 	.irq_clear	= ata_bmdma_irq_clear,
428 	.irq_on		= ata_irq_on,
429 	.irq_ack	= ata_irq_ack,
430 
431 	.port_start	= ata_port_start,
432 };
433 
434 static struct ata_port_operations amd133_port_ops = {
435 	.port_disable	= ata_port_disable,
436 	.set_piomode	= amd133_set_piomode,
437 	.set_dmamode	= amd133_set_dmamode,
438 	.mode_filter	= ata_pci_default_filter,
439 	.tf_load	= ata_tf_load,
440 	.tf_read	= ata_tf_read,
441 	.check_status 	= ata_check_status,
442 	.exec_command	= ata_exec_command,
443 	.dev_select 	= ata_std_dev_select,
444 
445 	.freeze		= ata_bmdma_freeze,
446 	.thaw		= ata_bmdma_thaw,
447 	.error_handler	= amd_error_handler,
448 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
449 	.cable_detect	= amd_cable_detect,
450 
451 	.bmdma_setup 	= ata_bmdma_setup,
452 	.bmdma_start 	= ata_bmdma_start,
453 	.bmdma_stop	= ata_bmdma_stop,
454 	.bmdma_status 	= ata_bmdma_status,
455 
456 	.qc_prep 	= ata_qc_prep,
457 	.qc_issue	= ata_qc_issue_prot,
458 
459 	.data_xfer	= ata_data_xfer,
460 
461 	.irq_handler	= ata_interrupt,
462 	.irq_clear	= ata_bmdma_irq_clear,
463 	.irq_on		= ata_irq_on,
464 	.irq_ack	= ata_irq_ack,
465 
466 	.port_start	= ata_port_start,
467 };
468 
469 static struct ata_port_operations nv100_port_ops = {
470 	.port_disable	= ata_port_disable,
471 	.set_piomode	= nv100_set_piomode,
472 	.set_dmamode	= nv100_set_dmamode,
473 	.mode_filter	= ata_pci_default_filter,
474 	.tf_load	= ata_tf_load,
475 	.tf_read	= ata_tf_read,
476 	.check_status 	= ata_check_status,
477 	.exec_command	= ata_exec_command,
478 	.dev_select 	= ata_std_dev_select,
479 
480 	.freeze		= ata_bmdma_freeze,
481 	.thaw		= ata_bmdma_thaw,
482 	.error_handler	= nv_error_handler,
483 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
484 	.cable_detect	= nv_cable_detect,
485 
486 	.bmdma_setup 	= ata_bmdma_setup,
487 	.bmdma_start 	= ata_bmdma_start,
488 	.bmdma_stop	= ata_bmdma_stop,
489 	.bmdma_status 	= ata_bmdma_status,
490 
491 	.qc_prep 	= ata_qc_prep,
492 	.qc_issue	= ata_qc_issue_prot,
493 
494 	.data_xfer	= ata_data_xfer,
495 
496 	.irq_handler	= ata_interrupt,
497 	.irq_clear	= ata_bmdma_irq_clear,
498 	.irq_on		= ata_irq_on,
499 	.irq_ack	= ata_irq_ack,
500 
501 	.port_start	= ata_port_start,
502 };
503 
504 static struct ata_port_operations nv133_port_ops = {
505 	.port_disable	= ata_port_disable,
506 	.set_piomode	= nv133_set_piomode,
507 	.set_dmamode	= nv133_set_dmamode,
508 	.mode_filter	= ata_pci_default_filter,
509 	.tf_load	= ata_tf_load,
510 	.tf_read	= ata_tf_read,
511 	.check_status 	= ata_check_status,
512 	.exec_command	= ata_exec_command,
513 	.dev_select 	= ata_std_dev_select,
514 
515 	.freeze		= ata_bmdma_freeze,
516 	.thaw		= ata_bmdma_thaw,
517 	.error_handler	= nv_error_handler,
518 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
519 	.cable_detect	= nv_cable_detect,
520 
521 	.bmdma_setup 	= ata_bmdma_setup,
522 	.bmdma_start 	= ata_bmdma_start,
523 	.bmdma_stop	= ata_bmdma_stop,
524 	.bmdma_status 	= ata_bmdma_status,
525 
526 	.qc_prep 	= ata_qc_prep,
527 	.qc_issue	= ata_qc_issue_prot,
528 
529 	.data_xfer	= ata_data_xfer,
530 
531 	.irq_handler	= ata_interrupt,
532 	.irq_clear	= ata_bmdma_irq_clear,
533 	.irq_on		= ata_irq_on,
534 	.irq_ack	= ata_irq_ack,
535 
536 	.port_start	= ata_port_start,
537 };
538 
539 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
540 {
541 	static const struct ata_port_info info[10] = {
542 		{	/* 0: AMD 7401 */
543 			.sht = &amd_sht,
544 			.flags = ATA_FLAG_SLAVE_POSS,
545 			.pio_mask = 0x1f,
546 			.mwdma_mask = 0x07,	/* No SWDMA */
547 			.udma_mask = 0x07,	/* UDMA 33 */
548 			.port_ops = &amd33_port_ops
549 		},
550 		{	/* 1: Early AMD7409 - no swdma */
551 			.sht = &amd_sht,
552 			.flags = ATA_FLAG_SLAVE_POSS,
553 			.pio_mask = 0x1f,
554 			.mwdma_mask = 0x07,
555 			.udma_mask = ATA_UDMA4,	/* UDMA 66 */
556 			.port_ops = &amd66_port_ops
557 		},
558 		{	/* 2: AMD 7409, no swdma errata */
559 			.sht = &amd_sht,
560 			.flags = ATA_FLAG_SLAVE_POSS,
561 			.pio_mask = 0x1f,
562 			.mwdma_mask = 0x07,
563 			.udma_mask = ATA_UDMA4,	/* UDMA 66 */
564 			.port_ops = &amd66_port_ops
565 		},
566 		{	/* 3: AMD 7411 */
567 			.sht = &amd_sht,
568 			.flags = ATA_FLAG_SLAVE_POSS,
569 			.pio_mask = 0x1f,
570 			.mwdma_mask = 0x07,
571 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
572 			.port_ops = &amd100_port_ops
573 		},
574 		{	/* 4: AMD 7441 */
575 			.sht = &amd_sht,
576 			.flags = ATA_FLAG_SLAVE_POSS,
577 			.pio_mask = 0x1f,
578 			.mwdma_mask = 0x07,
579 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
580 			.port_ops = &amd100_port_ops
581 		},
582 		{	/* 5: AMD 8111*/
583 			.sht = &amd_sht,
584 			.flags = ATA_FLAG_SLAVE_POSS,
585 			.pio_mask = 0x1f,
586 			.mwdma_mask = 0x07,
587 			.udma_mask = ATA_UDMA6,	/* UDMA 133, no swdma */
588 			.port_ops = &amd133_port_ops
589 		},
590 		{	/* 6: AMD 8111 UDMA 100 (Serenade) */
591 			.sht = &amd_sht,
592 			.flags = ATA_FLAG_SLAVE_POSS,
593 			.pio_mask = 0x1f,
594 			.mwdma_mask = 0x07,
595 			.udma_mask = ATA_UDMA5,	/* UDMA 100, no swdma */
596 			.port_ops = &amd133_port_ops
597 		},
598 		{	/* 7: Nvidia Nforce */
599 			.sht = &amd_sht,
600 			.flags = ATA_FLAG_SLAVE_POSS,
601 			.pio_mask = 0x1f,
602 			.mwdma_mask = 0x07,
603 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
604 			.port_ops = &nv100_port_ops
605 		},
606 		{	/* 8: Nvidia Nforce2 and later */
607 			.sht = &amd_sht,
608 			.flags = ATA_FLAG_SLAVE_POSS,
609 			.pio_mask = 0x1f,
610 			.mwdma_mask = 0x07,
611 			.udma_mask = ATA_UDMA6,	/* UDMA 133, no swdma */
612 			.port_ops = &nv133_port_ops
613 		},
614 		{	/* 9: AMD CS5536 (Geode companion) */
615 			.sht = &amd_sht,
616 			.flags = ATA_FLAG_SLAVE_POSS,
617 			.pio_mask = 0x1f,
618 			.mwdma_mask = 0x07,
619 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
620 			.port_ops = &amd100_port_ops
621 		}
622 	};
623 	const struct ata_port_info *ppi[] = { NULL, NULL };
624 	static int printed_version;
625 	int type = id->driver_data;
626 	u8 rev;
627 	u8 fifo;
628 
629 	if (!printed_version++)
630 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
631 
632 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
633 	pci_read_config_byte(pdev, 0x41, &fifo);
634 
635 	/* Check for AMD7409 without swdma errata and if found adjust type */
636 	if (type == 1 && rev > 0x7)
637 		type = 2;
638 
639 	/* Check for AMD7411 */
640 	if (type == 3)
641 		/* FIFO is broken */
642 		pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
643 	else
644 		pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
645 
646 	/* Serenade ? */
647 	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
648 			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
649 		type = 6;	/* UDMA 100 only */
650 
651 	if (type < 3)
652 		ata_pci_clear_simplex(pdev);
653 
654 	/* And fire it up */
655 	ppi[0] = &info[type];
656 	return ata_pci_init_one(pdev, ppi);
657 }
658 
659 #ifdef CONFIG_PM
660 static int amd_reinit_one(struct pci_dev *pdev)
661 {
662 	if (pdev->vendor == PCI_VENDOR_ID_AMD) {
663 		u8 fifo;
664 		pci_read_config_byte(pdev, 0x41, &fifo);
665 		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
666 			/* FIFO is broken */
667 			pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
668 		else
669 			pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
670 		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
671 		    pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
672 		    	ata_pci_clear_simplex(pdev);
673 	}
674 	return ata_pci_device_resume(pdev);
675 }
676 #endif
677 
678 static const struct pci_device_id amd[] = {
679 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_COBRA_7401),		0 },
680 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7409),		1 },
681 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7411),		3 },
682 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_OPUS_7441),		4 },
683 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_8111_IDE),		5 },
684 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_IDE),	7 },
685 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE),	8 },
686 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE),	8 },
687 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE),	8 },
688 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE),	8 },
689 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE),	8 },
690 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE),	8 },
691 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE),	8 },
692 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE),	8 },
693 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE),	8 },
694 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE),	8 },
695 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE),	8 },
696 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE),	8 },
697 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE),	8 },
698 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
699 
700 	{ },
701 };
702 
703 static struct pci_driver amd_pci_driver = {
704 	.name 		= DRV_NAME,
705 	.id_table	= amd,
706 	.probe 		= amd_init_one,
707 	.remove		= ata_pci_remove_one,
708 #ifdef CONFIG_PM
709 	.suspend	= ata_pci_device_suspend,
710 	.resume		= amd_reinit_one,
711 #endif
712 };
713 
714 static int __init amd_init(void)
715 {
716 	return pci_register_driver(&amd_pci_driver);
717 }
718 
719 static void __exit amd_exit(void)
720 {
721 	pci_unregister_driver(&amd_pci_driver);
722 }
723 
724 MODULE_AUTHOR("Alan Cox");
725 MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
726 MODULE_LICENSE("GPL");
727 MODULE_DEVICE_TABLE(pci, amd);
728 MODULE_VERSION(DRV_VERSION);
729 
730 module_init(amd_init);
731 module_exit(amd_exit);
732