xref: /openbmc/linux/drivers/ata/pata_pdc202xx_old.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * pata_pdc202xx_old.c 	- Promise PDC202xx PATA for new ATA layer
3  *			  (C) 2005 Red Hat Inc
4  *			  Alan Cox <alan@redhat.com>
5  *			  (C) 2007 Bartlomiej Zolnierkiewicz
6  *
7  * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
8  *
9  * First cut with LBA48/ATAPI
10  *
11  * TODO:
12  *	Channel interlock/reset on both required ?
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/blkdev.h>
20 #include <linux/delay.h>
21 #include <scsi/scsi_host.h>
22 #include <linux/libata.h>
23 
24 #define DRV_NAME "pata_pdc202xx_old"
25 #define DRV_VERSION "0.4.3"
26 
27 static int pdc2026x_cable_detect(struct ata_port *ap)
28 {
29 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
30 	u16 cis;
31 
32 	pci_read_config_word(pdev, 0x50, &cis);
33 	if (cis & (1 << (10 + ap->port_no)))
34 		return ATA_CBL_PATA40;
35 	return ATA_CBL_PATA80;
36 }
37 
38 /**
39  *	pdc202xx_configure_piomode	-	set chip PIO timing
40  *	@ap: ATA interface
41  *	@adev: ATA device
42  *	@pio: PIO mode
43  *
44  *	Called to do the PIO mode setup. Our timing registers are shared
45  *	so a configure_dmamode call will undo any work we do here and vice
46  *	versa
47  */
48 
49 static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
50 {
51 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
53 	static u16 pio_timing[5] = {
54 		0x0913, 0x050C , 0x0308, 0x0206, 0x0104
55 	};
56 	u8 r_ap, r_bp;
57 
58 	pci_read_config_byte(pdev, port, &r_ap);
59 	pci_read_config_byte(pdev, port + 1, &r_bp);
60 	r_ap &= ~0x3F;	/* Preserve ERRDY_EN, SYNC_IN */
61 	r_bp &= ~0x1F;
62 	r_ap |= (pio_timing[pio] >> 8);
63 	r_bp |= (pio_timing[pio] & 0xFF);
64 
65 	if (ata_pio_need_iordy(adev))
66 		r_ap |= 0x20;	/* IORDY enable */
67 	if (adev->class == ATA_DEV_ATA)
68 		r_ap |= 0x10;	/* FIFO enable */
69 	pci_write_config_byte(pdev, port, r_ap);
70 	pci_write_config_byte(pdev, port + 1, r_bp);
71 }
72 
73 /**
74  *	pdc202xx_set_piomode	-	set initial PIO mode data
75  *	@ap: ATA interface
76  *	@adev: ATA device
77  *
78  *	Called to do the PIO mode setup. Our timing registers are shared
79  *	but we want to set the PIO timing by default.
80  */
81 
82 static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
83 {
84 	pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
85 }
86 
87 /**
88  *	pdc202xx_configure_dmamode	-	set DMA mode in chip
89  *	@ap: ATA interface
90  *	@adev: ATA device
91  *
92  *	Load DMA cycle times into the chip ready for a DMA transfer
93  *	to occur.
94  */
95 
96 static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
97 {
98 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
99 	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
100 	static u8 udma_timing[6][2] = {
101 		{ 0x60, 0x03 },	/* 33 Mhz Clock */
102 		{ 0x40, 0x02 },
103 		{ 0x20, 0x01 },
104 		{ 0x40, 0x02 },	/* 66 Mhz Clock */
105 		{ 0x20, 0x01 },
106 		{ 0x20, 0x01 }
107 	};
108 	static u8 mdma_timing[3][2] = {
109 		{ 0xe0, 0x0f },
110 		{ 0x60, 0x04 },
111 		{ 0x60, 0x03 },
112 	};
113 	u8 r_bp, r_cp;
114 
115 	pci_read_config_byte(pdev, port + 1, &r_bp);
116 	pci_read_config_byte(pdev, port + 2, &r_cp);
117 
118 	r_bp &= ~0xE0;
119 	r_cp &= ~0x0F;
120 
121 	if (adev->dma_mode >= XFER_UDMA_0) {
122 		int speed = adev->dma_mode - XFER_UDMA_0;
123 		r_bp |= udma_timing[speed][0];
124 		r_cp |= udma_timing[speed][1];
125 
126 	} else {
127 		int speed = adev->dma_mode - XFER_MW_DMA_0;
128 		r_bp |= mdma_timing[speed][0];
129 		r_cp |= mdma_timing[speed][1];
130 	}
131 	pci_write_config_byte(pdev, port + 1, r_bp);
132 	pci_write_config_byte(pdev, port + 2, r_cp);
133 
134 }
135 
136 /**
137  *	pdc2026x_bmdma_start		-	DMA engine begin
138  *	@qc: ATA command
139  *
140  *	In UDMA3 or higher we have to clock switch for the duration of the
141  *	DMA transfer sequence.
142  *
143  *	Note: The host lock held by the libata layer protects
144  *	us from two channels both trying to set DMA bits at once
145  */
146 
147 static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
148 {
149 	struct ata_port *ap = qc->ap;
150 	struct ata_device *adev = qc->dev;
151 	struct ata_taskfile *tf = &qc->tf;
152 	int sel66 = ap->port_no ? 0x08: 0x02;
153 
154 	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
155 	void __iomem *clock = master + 0x11;
156 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
157 
158 	u32 len;
159 
160 	/* Check we keep host level locking here */
161 	if (adev->dma_mode >= XFER_UDMA_2)
162 		iowrite8(ioread8(clock) | sel66, clock);
163 	else
164 		iowrite8(ioread8(clock) & ~sel66, clock);
165 
166 	/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
167 	   and move to qc_issue ? */
168 	pdc202xx_set_dmamode(ap, qc->dev);
169 
170 	/* Cases the state machine will not complete correctly without help */
171 	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATA_PROT_ATAPI_DMA)
172 	{
173 		len = qc->nbytes / 2;
174 
175 		if (tf->flags & ATA_TFLAG_WRITE)
176 			len |= 0x06000000;
177 		else
178 			len |= 0x05000000;
179 
180 		iowrite32(len, atapi_reg);
181 	}
182 
183 	/* Activate DMA */
184 	ata_bmdma_start(qc);
185 }
186 
187 /**
188  *	pdc2026x_bmdma_end		-	DMA engine stop
189  *	@qc: ATA command
190  *
191  *	After a DMA completes we need to put the clock back to 33MHz for
192  *	PIO timings.
193  *
194  *	Note: The host lock held by the libata layer protects
195  *	us from two channels both trying to set DMA bits at once
196  */
197 
198 static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
199 {
200 	struct ata_port *ap = qc->ap;
201 	struct ata_device *adev = qc->dev;
202 	struct ata_taskfile *tf = &qc->tf;
203 
204 	int sel66 = ap->port_no ? 0x08: 0x02;
205 	/* The clock bits are in the same register for both channels */
206 	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
207 	void __iomem *clock = master + 0x11;
208 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
209 
210 	/* Cases the state machine will not complete correctly */
211 	if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
212 		iowrite32(0, atapi_reg);
213 		iowrite8(ioread8(clock) & ~sel66, clock);
214 	}
215 	/* Flip back to 33Mhz for PIO */
216 	if (adev->dma_mode >= XFER_UDMA_2)
217 		iowrite8(ioread8(clock) & ~sel66, clock);
218 
219 	ata_bmdma_stop(qc);
220 }
221 
222 /**
223  *	pdc2026x_dev_config	-	device setup hook
224  *	@adev: newly found device
225  *
226  *	Perform chip specific early setup. We need to lock the transfer
227  *	sizes to 8bit to avoid making the state engine on the 2026x cards
228  *	barf.
229  */
230 
231 static void pdc2026x_dev_config(struct ata_device *adev)
232 {
233 	adev->max_sectors = 256;
234 }
235 
236 static struct scsi_host_template pdc202xx_sht = {
237 	.module			= THIS_MODULE,
238 	.name			= DRV_NAME,
239 	.ioctl			= ata_scsi_ioctl,
240 	.queuecommand		= ata_scsi_queuecmd,
241 	.can_queue		= ATA_DEF_QUEUE,
242 	.this_id		= ATA_SHT_THIS_ID,
243 	.sg_tablesize		= LIBATA_MAX_PRD,
244 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
245 	.emulated		= ATA_SHT_EMULATED,
246 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
247 	.proc_name		= DRV_NAME,
248 	.dma_boundary		= ATA_DMA_BOUNDARY,
249 	.slave_configure	= ata_scsi_slave_config,
250 	.slave_destroy		= ata_scsi_slave_destroy,
251 	.bios_param		= ata_std_bios_param,
252 };
253 
254 static struct ata_port_operations pdc2024x_port_ops = {
255 	.set_piomode	= pdc202xx_set_piomode,
256 	.set_dmamode	= pdc202xx_set_dmamode,
257 	.mode_filter	= ata_pci_default_filter,
258 	.tf_load	= ata_tf_load,
259 	.tf_read	= ata_tf_read,
260 	.check_status 	= ata_check_status,
261 	.exec_command	= ata_exec_command,
262 	.dev_select 	= ata_std_dev_select,
263 
264 	.freeze		= ata_bmdma_freeze,
265 	.thaw		= ata_bmdma_thaw,
266 	.error_handler	= ata_bmdma_error_handler,
267 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
268 	.cable_detect	= ata_cable_40wire,
269 
270 	.bmdma_setup 	= ata_bmdma_setup,
271 	.bmdma_start 	= ata_bmdma_start,
272 	.bmdma_stop	= ata_bmdma_stop,
273 	.bmdma_status 	= ata_bmdma_status,
274 
275 	.qc_prep 	= ata_qc_prep,
276 	.qc_issue	= ata_qc_issue_prot,
277 	.data_xfer	= ata_data_xfer,
278 
279 	.irq_handler	= ata_interrupt,
280 	.irq_clear	= ata_bmdma_irq_clear,
281 	.irq_on		= ata_irq_on,
282 
283 	.port_start	= ata_sff_port_start,
284 };
285 
286 static struct ata_port_operations pdc2026x_port_ops = {
287 	.set_piomode	= pdc202xx_set_piomode,
288 	.set_dmamode	= pdc202xx_set_dmamode,
289 	.mode_filter	= ata_pci_default_filter,
290 	.tf_load	= ata_tf_load,
291 	.tf_read	= ata_tf_read,
292 	.check_status 	= ata_check_status,
293 	.exec_command	= ata_exec_command,
294 	.dev_select 	= ata_std_dev_select,
295 	.dev_config	= pdc2026x_dev_config,
296 
297 	.freeze		= ata_bmdma_freeze,
298 	.thaw		= ata_bmdma_thaw,
299 	.error_handler	= ata_bmdma_error_handler,
300 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
301 	.cable_detect	= pdc2026x_cable_detect,
302 
303 	.bmdma_setup 	= ata_bmdma_setup,
304 	.bmdma_start 	= pdc2026x_bmdma_start,
305 	.bmdma_stop	= pdc2026x_bmdma_stop,
306 	.bmdma_status 	= ata_bmdma_status,
307 
308 	.qc_prep 	= ata_qc_prep,
309 	.qc_issue	= ata_qc_issue_prot,
310 	.data_xfer	= ata_data_xfer,
311 
312 	.irq_handler	= ata_interrupt,
313 	.irq_clear	= ata_bmdma_irq_clear,
314 	.irq_on		= ata_irq_on,
315 
316 	.port_start	= ata_sff_port_start,
317 };
318 
319 static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
320 {
321 	static const struct ata_port_info info[3] = {
322 		{
323 			.sht = &pdc202xx_sht,
324 			.flags = ATA_FLAG_SLAVE_POSS,
325 			.pio_mask = 0x1f,
326 			.mwdma_mask = 0x07,
327 			.udma_mask = ATA_UDMA2,
328 			.port_ops = &pdc2024x_port_ops
329 		},
330 		{
331 			.sht = &pdc202xx_sht,
332 			.flags = ATA_FLAG_SLAVE_POSS,
333 			.pio_mask = 0x1f,
334 			.mwdma_mask = 0x07,
335 			.udma_mask = ATA_UDMA4,
336 			.port_ops = &pdc2026x_port_ops
337 		},
338 		{
339 			.sht = &pdc202xx_sht,
340 			.flags = ATA_FLAG_SLAVE_POSS,
341 			.pio_mask = 0x1f,
342 			.mwdma_mask = 0x07,
343 			.udma_mask = ATA_UDMA5,
344 			.port_ops = &pdc2026x_port_ops
345 		}
346 
347 	};
348 	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
349 
350 	if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
351 		struct pci_dev *bridge = dev->bus->self;
352 		/* Don't grab anything behind a Promise I2O RAID */
353 		if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
354 			if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
355 				return -ENODEV;
356 			if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
357 				return -ENODEV;
358 		}
359 	}
360 	return ata_pci_init_one(dev, ppi);
361 }
362 
363 static const struct pci_device_id pdc202xx[] = {
364 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
365 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
366 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
367 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
368 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
369 
370 	{ },
371 };
372 
373 static struct pci_driver pdc202xx_pci_driver = {
374 	.name 		= DRV_NAME,
375 	.id_table	= pdc202xx,
376 	.probe 		= pdc202xx_init_one,
377 	.remove		= ata_pci_remove_one,
378 #ifdef CONFIG_PM
379 	.suspend	= ata_pci_device_suspend,
380 	.resume		= ata_pci_device_resume,
381 #endif
382 };
383 
384 static int __init pdc202xx_init(void)
385 {
386 	return pci_register_driver(&pdc202xx_pci_driver);
387 }
388 
389 static void __exit pdc202xx_exit(void)
390 {
391 	pci_unregister_driver(&pdc202xx_pci_driver);
392 }
393 
394 MODULE_AUTHOR("Alan Cox");
395 MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
396 MODULE_LICENSE("GPL");
397 MODULE_DEVICE_TABLE(pci, pdc202xx);
398 MODULE_VERSION(DRV_VERSION);
399 
400 module_init(pdc202xx_init);
401 module_exit(pdc202xx_exit);
402