xref: /openbmc/linux/drivers/ata/pata_amd.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * pata_amd.c 	- AMD PATA for new ATA layer
3  *			  (C) 2005-2006 Red Hat Inc
4  *			  Alan Cox <alan@redhat.com>
5  *
6  *  Based on pata-sil680. Errata information is taken from data sheets
7  *  and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8  *  claimed by sata-nv.c.
9  *
10  *  TODO:
11  *	Variable system clock when/if it makes sense
12  *	Power management on ports
13  *
14  *
15  *  Documentation publically available.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_host.h>
25 #include <linux/libata.h>
26 
27 #define DRV_NAME "pata_amd"
28 #define DRV_VERSION "0.3.9"
29 
30 /**
31  *	timing_setup		-	shared timing computation and load
32  *	@ap: ATA port being set up
33  *	@adev: drive being configured
34  *	@offset: port offset
35  *	@speed: target speed
36  *	@clock: clock multiplier (number of times 33MHz for this part)
37  *
38  *	Perform the actual timing set up for Nvidia or AMD PATA devices.
39  *	The actual devices vary so they all call into this helper function
40  *	providing the clock multipler and offset (because AMD and Nvidia put
41  *	the ports at different locations).
42  */
43 
44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
45 {
46 	static const unsigned char amd_cyc2udma[] = {
47 		6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
48 	};
49 
50 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 	struct ata_device *peer = ata_dev_pair(adev);
52 	int dn = ap->port_no * 2 + adev->devno;
53 	struct ata_timing at, apeer;
54 	int T, UT;
55 	const int amd_clock = 33333;	/* KHz. */
56 	u8 t;
57 
58 	T = 1000000000 / amd_clock;
59 	UT = T / min_t(int, max_t(int, clock, 1), 2);
60 
61 	if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 		dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
63 		return;
64 	}
65 
66 	if (peer) {
67 		/* This may be over conservative */
68 		if (peer->dma_mode) {
69 			ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 			ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
71 		}
72 		ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 		ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
74 	}
75 
76 	if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 	if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
78 
79 	/*
80 	 *	Now do the setup work
81 	 */
82 
83 	/* Configure the address set up timing */
84 	pci_read_config_byte(pdev, offset + 0x0C, &t);
85 	t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 	pci_write_config_byte(pdev, offset + 0x0C , t);
87 
88 	/* Configure the 8bit I/O timing */
89 	pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 		((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
91 
92 	/* Drive timing */
93 	pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 		((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
95 
96 	switch (clock) {
97 		case 1:
98 		t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
99 		break;
100 
101 		case 2:
102 		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
103 		break;
104 
105 		case 3:
106 		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
107 		break;
108 
109 		case 4:
110 		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
111 		break;
112 
113 		default:
114 			return;
115 	}
116 
117 	/* UDMA timing */
118 	pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
119 }
120 
121 /**
122  *	amd_pre_reset		-	perform reset handling
123  *	@link: ATA link
124  *	@deadline: deadline jiffies for the operation
125  *
126  *	Reset sequence checking enable bits to see which ports are
127  *	active.
128  */
129 
130 static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
131 {
132 	static const struct pci_bits amd_enable_bits[] = {
133 		{ 0x40, 1, 0x02, 0x02 },
134 		{ 0x40, 1, 0x01, 0x01 }
135 	};
136 
137 	struct ata_port *ap = link->ap;
138 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
139 
140 	if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
141 		return -ENOENT;
142 
143 	return ata_std_prereset(link, deadline);
144 }
145 
146 static void amd_error_handler(struct ata_port *ap)
147 {
148 	return ata_bmdma_drive_eh(ap, amd_pre_reset,
149 				      ata_std_softreset, NULL,
150 				      ata_std_postreset);
151 }
152 
153 static int amd_cable_detect(struct ata_port *ap)
154 {
155 	static const u32 bitmask[2] = {0x03, 0x0C};
156 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
157 	u8 ata66;
158 
159 	pci_read_config_byte(pdev, 0x42, &ata66);
160 	if (ata66 & bitmask[ap->port_no])
161 		return ATA_CBL_PATA80;
162 	return ATA_CBL_PATA40;
163 }
164 
165 /**
166  *	amd33_set_piomode	-	set initial PIO mode data
167  *	@ap: ATA interface
168  *	@adev: ATA device
169  *
170  *	Program the AMD registers for PIO mode.
171  */
172 
173 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
174 {
175 	timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
176 }
177 
178 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
179 {
180 	timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
181 }
182 
183 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
184 {
185 	timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
186 }
187 
188 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
189 {
190 	timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
191 }
192 
193 /**
194  *	amd33_set_dmamode	-	set initial DMA mode data
195  *	@ap: ATA interface
196  *	@adev: ATA device
197  *
198  *	Program the MWDMA/UDMA modes for the AMD and Nvidia
199  *	chipset.
200  */
201 
202 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
203 {
204 	timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
205 }
206 
207 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
208 {
209 	timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
210 }
211 
212 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
213 {
214 	timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
215 }
216 
217 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
218 {
219 	timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
220 }
221 
222 
223 /**
224  *	nv_probe_init	-	cable detection
225  *	@lin: ATA link
226  *
227  *	Perform cable detection. The BIOS stores this in PCI config
228  *	space for us.
229  */
230 
231 static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
232 {
233 	static const struct pci_bits nv_enable_bits[] = {
234 		{ 0x50, 1, 0x02, 0x02 },
235 		{ 0x50, 1, 0x01, 0x01 }
236 	};
237 
238 	struct ata_port *ap = link->ap;
239 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
240 
241 	if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
242 		return -ENOENT;
243 
244 	return ata_std_prereset(link, deadline);
245 }
246 
247 static void nv_error_handler(struct ata_port *ap)
248 {
249 	ata_bmdma_drive_eh(ap, nv_pre_reset,
250 			       ata_std_softreset, NULL,
251 			       ata_std_postreset);
252 }
253 
254 static int nv_cable_detect(struct ata_port *ap)
255 {
256 	static const u8 bitmask[2] = {0x03, 0x0C};
257 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
258 	u8 ata66;
259 	u16 udma;
260 	int cbl;
261 
262 	pci_read_config_byte(pdev, 0x52, &ata66);
263 	if (ata66 & bitmask[ap->port_no])
264 		cbl = ATA_CBL_PATA80;
265 	else
266 		cbl = ATA_CBL_PATA40;
267 
268  	/* We now have to double check because the Nvidia boxes BIOS
269  	   doesn't always set the cable bits but does set mode bits */
270  	pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
271  	if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
272 		cbl = ATA_CBL_PATA80;
273 	/* And a triple check across suspend/resume with ACPI around */
274 	if (ata_acpi_cbl_80wire(ap))
275 		cbl = ATA_CBL_PATA80;
276 	return cbl;
277 }
278 
279 /**
280  *	nv100_set_piomode	-	set initial PIO mode data
281  *	@ap: ATA interface
282  *	@adev: ATA device
283  *
284  *	Program the AMD registers for PIO mode.
285  */
286 
287 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
288 {
289 	timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
290 }
291 
292 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
293 {
294 	timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
295 }
296 
297 /**
298  *	nv100_set_dmamode	-	set initial DMA mode data
299  *	@ap: ATA interface
300  *	@adev: ATA device
301  *
302  *	Program the MWDMA/UDMA modes for the AMD and Nvidia
303  *	chipset.
304  */
305 
306 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
307 {
308 	timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
309 }
310 
311 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
312 {
313 	timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
314 }
315 
316 static struct scsi_host_template amd_sht = {
317 	.module			= THIS_MODULE,
318 	.name			= DRV_NAME,
319 	.ioctl			= ata_scsi_ioctl,
320 	.queuecommand		= ata_scsi_queuecmd,
321 	.can_queue		= ATA_DEF_QUEUE,
322 	.this_id		= ATA_SHT_THIS_ID,
323 	.sg_tablesize		= LIBATA_MAX_PRD,
324 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
325 	.emulated		= ATA_SHT_EMULATED,
326 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
327 	.proc_name		= DRV_NAME,
328 	.dma_boundary		= ATA_DMA_BOUNDARY,
329 	.slave_configure	= ata_scsi_slave_config,
330 	.slave_destroy		= ata_scsi_slave_destroy,
331 	.bios_param		= ata_std_bios_param,
332 };
333 
334 static struct ata_port_operations amd33_port_ops = {
335 	.set_piomode	= amd33_set_piomode,
336 	.set_dmamode	= amd33_set_dmamode,
337 	.mode_filter	= ata_pci_default_filter,
338 	.tf_load	= ata_tf_load,
339 	.tf_read	= ata_tf_read,
340 	.check_status 	= ata_check_status,
341 	.exec_command	= ata_exec_command,
342 	.dev_select 	= ata_std_dev_select,
343 
344 	.freeze		= ata_bmdma_freeze,
345 	.thaw		= ata_bmdma_thaw,
346 	.error_handler	= amd_error_handler,
347 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
348 	.cable_detect	= ata_cable_40wire,
349 
350 	.bmdma_setup 	= ata_bmdma_setup,
351 	.bmdma_start 	= ata_bmdma_start,
352 	.bmdma_stop	= ata_bmdma_stop,
353 	.bmdma_status 	= ata_bmdma_status,
354 
355 	.qc_prep 	= ata_qc_prep,
356 	.qc_issue	= ata_qc_issue_prot,
357 
358 	.data_xfer	= ata_data_xfer,
359 
360 	.irq_handler	= ata_interrupt,
361 	.irq_clear	= ata_bmdma_irq_clear,
362 	.irq_on		= ata_irq_on,
363 
364 	.port_start	= ata_sff_port_start,
365 };
366 
367 static struct ata_port_operations amd66_port_ops = {
368 	.set_piomode	= amd66_set_piomode,
369 	.set_dmamode	= amd66_set_dmamode,
370 	.mode_filter	= ata_pci_default_filter,
371 	.tf_load	= ata_tf_load,
372 	.tf_read	= ata_tf_read,
373 	.check_status 	= ata_check_status,
374 	.exec_command	= ata_exec_command,
375 	.dev_select 	= ata_std_dev_select,
376 
377 	.freeze		= ata_bmdma_freeze,
378 	.thaw		= ata_bmdma_thaw,
379 	.error_handler	= amd_error_handler,
380 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
381 	.cable_detect	= ata_cable_unknown,
382 
383 	.bmdma_setup 	= ata_bmdma_setup,
384 	.bmdma_start 	= ata_bmdma_start,
385 	.bmdma_stop	= ata_bmdma_stop,
386 	.bmdma_status 	= ata_bmdma_status,
387 
388 	.qc_prep 	= ata_qc_prep,
389 	.qc_issue	= ata_qc_issue_prot,
390 
391 	.data_xfer	= ata_data_xfer,
392 
393 	.irq_handler	= ata_interrupt,
394 	.irq_clear	= ata_bmdma_irq_clear,
395 	.irq_on		= ata_irq_on,
396 
397 	.port_start	= ata_sff_port_start,
398 };
399 
400 static struct ata_port_operations amd100_port_ops = {
401 	.set_piomode	= amd100_set_piomode,
402 	.set_dmamode	= amd100_set_dmamode,
403 	.mode_filter	= ata_pci_default_filter,
404 	.tf_load	= ata_tf_load,
405 	.tf_read	= ata_tf_read,
406 	.check_status 	= ata_check_status,
407 	.exec_command	= ata_exec_command,
408 	.dev_select 	= ata_std_dev_select,
409 
410 	.freeze		= ata_bmdma_freeze,
411 	.thaw		= ata_bmdma_thaw,
412 	.error_handler	= amd_error_handler,
413 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
414 	.cable_detect	= ata_cable_unknown,
415 
416 	.bmdma_setup 	= ata_bmdma_setup,
417 	.bmdma_start 	= ata_bmdma_start,
418 	.bmdma_stop	= ata_bmdma_stop,
419 	.bmdma_status 	= ata_bmdma_status,
420 
421 	.qc_prep 	= ata_qc_prep,
422 	.qc_issue	= ata_qc_issue_prot,
423 
424 	.data_xfer	= ata_data_xfer,
425 
426 	.irq_handler	= ata_interrupt,
427 	.irq_clear	= ata_bmdma_irq_clear,
428 	.irq_on		= ata_irq_on,
429 
430 	.port_start	= ata_sff_port_start,
431 };
432 
433 static struct ata_port_operations amd133_port_ops = {
434 	.set_piomode	= amd133_set_piomode,
435 	.set_dmamode	= amd133_set_dmamode,
436 	.mode_filter	= ata_pci_default_filter,
437 	.tf_load	= ata_tf_load,
438 	.tf_read	= ata_tf_read,
439 	.check_status 	= ata_check_status,
440 	.exec_command	= ata_exec_command,
441 	.dev_select 	= ata_std_dev_select,
442 
443 	.freeze		= ata_bmdma_freeze,
444 	.thaw		= ata_bmdma_thaw,
445 	.error_handler	= amd_error_handler,
446 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
447 	.cable_detect	= amd_cable_detect,
448 
449 	.bmdma_setup 	= ata_bmdma_setup,
450 	.bmdma_start 	= ata_bmdma_start,
451 	.bmdma_stop	= ata_bmdma_stop,
452 	.bmdma_status 	= ata_bmdma_status,
453 
454 	.qc_prep 	= ata_qc_prep,
455 	.qc_issue	= ata_qc_issue_prot,
456 
457 	.data_xfer	= ata_data_xfer,
458 
459 	.irq_handler	= ata_interrupt,
460 	.irq_clear	= ata_bmdma_irq_clear,
461 	.irq_on		= ata_irq_on,
462 
463 	.port_start	= ata_sff_port_start,
464 };
465 
466 static struct ata_port_operations nv100_port_ops = {
467 	.set_piomode	= nv100_set_piomode,
468 	.set_dmamode	= nv100_set_dmamode,
469 	.mode_filter	= ata_pci_default_filter,
470 	.tf_load	= ata_tf_load,
471 	.tf_read	= ata_tf_read,
472 	.check_status 	= ata_check_status,
473 	.exec_command	= ata_exec_command,
474 	.dev_select 	= ata_std_dev_select,
475 
476 	.freeze		= ata_bmdma_freeze,
477 	.thaw		= ata_bmdma_thaw,
478 	.error_handler	= nv_error_handler,
479 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
480 	.cable_detect	= nv_cable_detect,
481 
482 	.bmdma_setup 	= ata_bmdma_setup,
483 	.bmdma_start 	= ata_bmdma_start,
484 	.bmdma_stop	= ata_bmdma_stop,
485 	.bmdma_status 	= ata_bmdma_status,
486 
487 	.qc_prep 	= ata_qc_prep,
488 	.qc_issue	= ata_qc_issue_prot,
489 
490 	.data_xfer	= ata_data_xfer,
491 
492 	.irq_handler	= ata_interrupt,
493 	.irq_clear	= ata_bmdma_irq_clear,
494 	.irq_on		= ata_irq_on,
495 
496 	.port_start	= ata_sff_port_start,
497 };
498 
499 static struct ata_port_operations nv133_port_ops = {
500 	.set_piomode	= nv133_set_piomode,
501 	.set_dmamode	= nv133_set_dmamode,
502 	.mode_filter	= ata_pci_default_filter,
503 	.tf_load	= ata_tf_load,
504 	.tf_read	= ata_tf_read,
505 	.check_status 	= ata_check_status,
506 	.exec_command	= ata_exec_command,
507 	.dev_select 	= ata_std_dev_select,
508 
509 	.freeze		= ata_bmdma_freeze,
510 	.thaw		= ata_bmdma_thaw,
511 	.error_handler	= nv_error_handler,
512 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
513 	.cable_detect	= nv_cable_detect,
514 
515 	.bmdma_setup 	= ata_bmdma_setup,
516 	.bmdma_start 	= ata_bmdma_start,
517 	.bmdma_stop	= ata_bmdma_stop,
518 	.bmdma_status 	= ata_bmdma_status,
519 
520 	.qc_prep 	= ata_qc_prep,
521 	.qc_issue	= ata_qc_issue_prot,
522 
523 	.data_xfer	= ata_data_xfer,
524 
525 	.irq_handler	= ata_interrupt,
526 	.irq_clear	= ata_bmdma_irq_clear,
527 	.irq_on		= ata_irq_on,
528 
529 	.port_start	= ata_sff_port_start,
530 };
531 
532 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
533 {
534 	static const struct ata_port_info info[10] = {
535 		{	/* 0: AMD 7401 */
536 			.sht = &amd_sht,
537 			.flags = ATA_FLAG_SLAVE_POSS,
538 			.pio_mask = 0x1f,
539 			.mwdma_mask = 0x07,	/* No SWDMA */
540 			.udma_mask = 0x07,	/* UDMA 33 */
541 			.port_ops = &amd33_port_ops
542 		},
543 		{	/* 1: Early AMD7409 - no swdma */
544 			.sht = &amd_sht,
545 			.flags = ATA_FLAG_SLAVE_POSS,
546 			.pio_mask = 0x1f,
547 			.mwdma_mask = 0x07,
548 			.udma_mask = ATA_UDMA4,	/* UDMA 66 */
549 			.port_ops = &amd66_port_ops
550 		},
551 		{	/* 2: AMD 7409, no swdma errata */
552 			.sht = &amd_sht,
553 			.flags = ATA_FLAG_SLAVE_POSS,
554 			.pio_mask = 0x1f,
555 			.mwdma_mask = 0x07,
556 			.udma_mask = ATA_UDMA4,	/* UDMA 66 */
557 			.port_ops = &amd66_port_ops
558 		},
559 		{	/* 3: AMD 7411 */
560 			.sht = &amd_sht,
561 			.flags = ATA_FLAG_SLAVE_POSS,
562 			.pio_mask = 0x1f,
563 			.mwdma_mask = 0x07,
564 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
565 			.port_ops = &amd100_port_ops
566 		},
567 		{	/* 4: AMD 7441 */
568 			.sht = &amd_sht,
569 			.flags = ATA_FLAG_SLAVE_POSS,
570 			.pio_mask = 0x1f,
571 			.mwdma_mask = 0x07,
572 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
573 			.port_ops = &amd100_port_ops
574 		},
575 		{	/* 5: AMD 8111*/
576 			.sht = &amd_sht,
577 			.flags = ATA_FLAG_SLAVE_POSS,
578 			.pio_mask = 0x1f,
579 			.mwdma_mask = 0x07,
580 			.udma_mask = ATA_UDMA6,	/* UDMA 133, no swdma */
581 			.port_ops = &amd133_port_ops
582 		},
583 		{	/* 6: AMD 8111 UDMA 100 (Serenade) */
584 			.sht = &amd_sht,
585 			.flags = ATA_FLAG_SLAVE_POSS,
586 			.pio_mask = 0x1f,
587 			.mwdma_mask = 0x07,
588 			.udma_mask = ATA_UDMA5,	/* UDMA 100, no swdma */
589 			.port_ops = &amd133_port_ops
590 		},
591 		{	/* 7: Nvidia Nforce */
592 			.sht = &amd_sht,
593 			.flags = ATA_FLAG_SLAVE_POSS,
594 			.pio_mask = 0x1f,
595 			.mwdma_mask = 0x07,
596 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
597 			.port_ops = &nv100_port_ops
598 		},
599 		{	/* 8: Nvidia Nforce2 and later */
600 			.sht = &amd_sht,
601 			.flags = ATA_FLAG_SLAVE_POSS,
602 			.pio_mask = 0x1f,
603 			.mwdma_mask = 0x07,
604 			.udma_mask = ATA_UDMA6,	/* UDMA 133, no swdma */
605 			.port_ops = &nv133_port_ops
606 		},
607 		{	/* 9: AMD CS5536 (Geode companion) */
608 			.sht = &amd_sht,
609 			.flags = ATA_FLAG_SLAVE_POSS,
610 			.pio_mask = 0x1f,
611 			.mwdma_mask = 0x07,
612 			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
613 			.port_ops = &amd100_port_ops
614 		}
615 	};
616 	const struct ata_port_info *ppi[] = { NULL, NULL };
617 	static int printed_version;
618 	int type = id->driver_data;
619 	u8 fifo;
620 
621 	if (!printed_version++)
622 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
623 
624 	pci_read_config_byte(pdev, 0x41, &fifo);
625 
626 	/* Check for AMD7409 without swdma errata and if found adjust type */
627 	if (type == 1 && pdev->revision > 0x7)
628 		type = 2;
629 
630 	/* Check for AMD7411 */
631 	if (type == 3)
632 		/* FIFO is broken */
633 		pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
634 	else
635 		pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
636 
637 	/* Serenade ? */
638 	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
639 			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
640 		type = 6;	/* UDMA 100 only */
641 
642 	if (type < 3)
643 		ata_pci_clear_simplex(pdev);
644 
645 	/* And fire it up */
646 	ppi[0] = &info[type];
647 	return ata_pci_init_one(pdev, ppi);
648 }
649 
650 #ifdef CONFIG_PM
651 static int amd_reinit_one(struct pci_dev *pdev)
652 {
653 	if (pdev->vendor == PCI_VENDOR_ID_AMD) {
654 		u8 fifo;
655 		pci_read_config_byte(pdev, 0x41, &fifo);
656 		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
657 			/* FIFO is broken */
658 			pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
659 		else
660 			pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
661 		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
662 		    pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
663 		    	ata_pci_clear_simplex(pdev);
664 	}
665 	return ata_pci_device_resume(pdev);
666 }
667 #endif
668 
669 static const struct pci_device_id amd[] = {
670 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_COBRA_7401),		0 },
671 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7409),		1 },
672 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7411),		3 },
673 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_OPUS_7441),		4 },
674 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_8111_IDE),		5 },
675 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_IDE),	7 },
676 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE),	8 },
677 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE),	8 },
678 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE),	8 },
679 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE),	8 },
680 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE),	8 },
681 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE),	8 },
682 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE),	8 },
683 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE),	8 },
684 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE),	8 },
685 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE),	8 },
686 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE),	8 },
687 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE),	8 },
688 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE),	8 },
689 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
690 
691 	{ },
692 };
693 
694 static struct pci_driver amd_pci_driver = {
695 	.name 		= DRV_NAME,
696 	.id_table	= amd,
697 	.probe 		= amd_init_one,
698 	.remove		= ata_pci_remove_one,
699 #ifdef CONFIG_PM
700 	.suspend	= ata_pci_device_suspend,
701 	.resume		= amd_reinit_one,
702 #endif
703 };
704 
705 static int __init amd_init(void)
706 {
707 	return pci_register_driver(&amd_pci_driver);
708 }
709 
710 static void __exit amd_exit(void)
711 {
712 	pci_unregister_driver(&amd_pci_driver);
713 }
714 
715 MODULE_AUTHOR("Alan Cox");
716 MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
717 MODULE_LICENSE("GPL");
718 MODULE_DEVICE_TABLE(pci, amd);
719 MODULE_VERSION(DRV_VERSION);
720 
721 module_init(amd_init);
722 module_exit(amd_exit);
723