1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * pata_atiixp.c - ATI PATA for new ATA layer
4 * (C) 2005 Red Hat Inc
5 * (C) 2009-2010 Bartlomiej Zolnierkiewicz
6 *
7 * Based on
8 *
9 * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
10 *
11 * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
12 * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
13 *
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/blkdev.h>
20 #include <linux/delay.h>
21 #include <scsi/scsi_host.h>
22 #include <linux/libata.h>
23 #include <linux/dmi.h>
24
25 #define DRV_NAME "pata_atiixp"
26 #define DRV_VERSION "0.4.6"
27
28 enum {
29 ATIIXP_IDE_PIO_TIMING = 0x40,
30 ATIIXP_IDE_MWDMA_TIMING = 0x44,
31 ATIIXP_IDE_PIO_CONTROL = 0x48,
32 ATIIXP_IDE_PIO_MODE = 0x4a,
33 ATIIXP_IDE_UDMA_CONTROL = 0x54,
34 ATIIXP_IDE_UDMA_MODE = 0x56
35 };
36
37 static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
38 {
39 /* Board has onboard PATA<->SATA converters */
40 .ident = "MSI E350DM-E33",
41 .matches = {
42 DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
43 DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
44 },
45 },
46 { }
47 };
48
atiixp_cable_detect(struct ata_port * ap)49 static int atiixp_cable_detect(struct ata_port *ap)
50 {
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 u8 udma;
53
54 if (dmi_check_system(attixp_cable_override_dmi_table))
55 return ATA_CBL_PATA40_SHORT;
56
57 /* Hack from drivers/ide/pci. Really we want to know how to do the
58 raw detection not play follow the bios mode guess */
59 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
60 if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
61 return ATA_CBL_PATA80;
62 return ATA_CBL_PATA40;
63 }
64
65 static DEFINE_SPINLOCK(atiixp_lock);
66
67 /**
68 * atiixp_prereset - perform reset handling
69 * @link: ATA link
70 * @deadline: deadline jiffies for the operation
71 *
72 * Reset sequence checking enable bits to see which ports are
73 * active.
74 */
75
atiixp_prereset(struct ata_link * link,unsigned long deadline)76 static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
77 {
78 static const struct pci_bits atiixp_enable_bits[] = {
79 { 0x48, 1, 0x01, 0x00 },
80 { 0x48, 1, 0x08, 0x00 }
81 };
82
83 struct ata_port *ap = link->ap;
84 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
85
86 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
87 return -ENOENT;
88
89 return ata_sff_prereset(link, deadline);
90 }
91
92 /**
93 * atiixp_set_pio_timing - set initial PIO mode data
94 * @ap: ATA interface
95 * @adev: ATA device
96 * @pio: Requested PIO
97 *
98 * Called by both the pio and dma setup functions to set the controller
99 * timings for PIO transfers. We must load both the mode number and
100 * timing values into the controller.
101 */
102
atiixp_set_pio_timing(struct ata_port * ap,struct ata_device * adev,int pio)103 static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
104 {
105 static const u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
106
107 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
108 int dn = 2 * ap->port_no + adev->devno;
109 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
110 u32 pio_timing_data;
111 u16 pio_mode_data;
112
113 pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
114 pio_mode_data &= ~(0x7 << (4 * dn));
115 pio_mode_data |= pio << (4 * dn);
116 pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
117
118 pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
119 pio_timing_data &= ~(0xFF << timing_shift);
120 pio_timing_data |= (pio_timings[pio] << timing_shift);
121 pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
122 }
123
124 /**
125 * atiixp_set_piomode - set initial PIO mode data
126 * @ap: ATA interface
127 * @adev: ATA device
128 *
129 * Called to do the PIO mode setup. We use a shared helper for this
130 * as the DMA setup must also adjust the PIO timing information.
131 */
132
atiixp_set_piomode(struct ata_port * ap,struct ata_device * adev)133 static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
134 {
135 unsigned long flags;
136 spin_lock_irqsave(&atiixp_lock, flags);
137 atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
138 spin_unlock_irqrestore(&atiixp_lock, flags);
139 }
140
141 /**
142 * atiixp_set_dmamode - set initial DMA mode data
143 * @ap: ATA interface
144 * @adev: ATA device
145 *
146 * Called to do the DMA mode setup. We use timing tables for most
147 * modes but must tune an appropriate PIO mode to match.
148 */
149
atiixp_set_dmamode(struct ata_port * ap,struct ata_device * adev)150 static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
151 {
152 static const u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
153
154 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
155 int dma = adev->dma_mode;
156 int dn = 2 * ap->port_no + adev->devno;
157 int wanted_pio;
158 unsigned long flags;
159
160 spin_lock_irqsave(&atiixp_lock, flags);
161
162 if (adev->dma_mode >= XFER_UDMA_0) {
163 u16 udma_mode_data;
164
165 dma -= XFER_UDMA_0;
166
167 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
168 udma_mode_data &= ~(0x7 << (4 * dn));
169 udma_mode_data |= dma << (4 * dn);
170 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
171 } else {
172 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
173 u32 mwdma_timing_data;
174
175 dma -= XFER_MW_DMA_0;
176
177 pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
178 &mwdma_timing_data);
179 mwdma_timing_data &= ~(0xFF << timing_shift);
180 mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
181 pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
182 mwdma_timing_data);
183 }
184 /*
185 * We must now look at the PIO mode situation. We may need to
186 * adjust the PIO mode to keep the timings acceptable
187 */
188 if (adev->dma_mode >= XFER_MW_DMA_2)
189 wanted_pio = 4;
190 else if (adev->dma_mode == XFER_MW_DMA_1)
191 wanted_pio = 3;
192 else if (adev->dma_mode == XFER_MW_DMA_0)
193 wanted_pio = 0;
194 else BUG();
195
196 if (adev->pio_mode != wanted_pio)
197 atiixp_set_pio_timing(ap, adev, wanted_pio);
198 spin_unlock_irqrestore(&atiixp_lock, flags);
199 }
200
201 /**
202 * atiixp_bmdma_start - DMA start callback
203 * @qc: Command in progress
204 *
205 * When DMA begins we need to ensure that the UDMA control
206 * register for the channel is correctly set.
207 *
208 * Note: The host lock held by the libata layer protects
209 * us from two channels both trying to set DMA bits at once
210 */
211
atiixp_bmdma_start(struct ata_queued_cmd * qc)212 static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
213 {
214 struct ata_port *ap = qc->ap;
215 struct ata_device *adev = qc->dev;
216
217 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
218 int dn = (2 * ap->port_no) + adev->devno;
219 u16 tmp16;
220
221 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
222 if (ata_using_udma(adev))
223 tmp16 |= (1 << dn);
224 else
225 tmp16 &= ~(1 << dn);
226 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
227 ata_bmdma_start(qc);
228 }
229
230 /**
231 * atiixp_bmdma_stop - DMA stop callback
232 * @qc: Command in progress
233 *
234 * DMA has completed. Clear the UDMA flag as the next operations will
235 * be PIO ones not UDMA data transfer.
236 *
237 * Note: The host lock held by the libata layer protects
238 * us from two channels both trying to set DMA bits at once
239 */
240
atiixp_bmdma_stop(struct ata_queued_cmd * qc)241 static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
242 {
243 struct ata_port *ap = qc->ap;
244 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
245 int dn = (2 * ap->port_no) + qc->dev->devno;
246 u16 tmp16;
247
248 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
249 tmp16 &= ~(1 << dn);
250 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
251 ata_bmdma_stop(qc);
252 }
253
254 static const struct scsi_host_template atiixp_sht = {
255 ATA_BASE_SHT(DRV_NAME),
256 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
257 .dma_boundary = ATA_DMA_BOUNDARY,
258 };
259
260 static struct ata_port_operations atiixp_port_ops = {
261 .inherits = &ata_bmdma_port_ops,
262
263 .qc_prep = ata_bmdma_dumb_qc_prep,
264 .bmdma_start = atiixp_bmdma_start,
265 .bmdma_stop = atiixp_bmdma_stop,
266
267 .prereset = atiixp_prereset,
268 .cable_detect = atiixp_cable_detect,
269 .set_piomode = atiixp_set_piomode,
270 .set_dmamode = atiixp_set_dmamode,
271 };
272
atiixp_init_one(struct pci_dev * pdev,const struct pci_device_id * id)273 static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
274 {
275 static const struct ata_port_info info = {
276 .flags = ATA_FLAG_SLAVE_POSS,
277 .pio_mask = ATA_PIO4,
278 .mwdma_mask = ATA_MWDMA12_ONLY,
279 .udma_mask = ATA_UDMA5,
280 .port_ops = &atiixp_port_ops
281 };
282 const struct ata_port_info *ppi[] = { &info, &info };
283
284 /* SB600 doesn't have secondary port wired */
285 if (pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE)
286 ppi[1] = &ata_dummy_port_info;
287
288 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
289 ATA_HOST_PARALLEL_SCAN);
290 }
291
292 static const struct pci_device_id atiixp[] = {
293 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
294 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
295 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
296 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
297 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
298 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
299
300 { },
301 };
302
303 static struct pci_driver atiixp_pci_driver = {
304 .name = DRV_NAME,
305 .id_table = atiixp,
306 .probe = atiixp_init_one,
307 .remove = ata_pci_remove_one,
308 #ifdef CONFIG_PM_SLEEP
309 .resume = ata_pci_device_resume,
310 .suspend = ata_pci_device_suspend,
311 #endif
312 };
313
314 module_pci_driver(atiixp_pci_driver);
315
316 MODULE_AUTHOR("Alan Cox");
317 MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
318 MODULE_LICENSE("GPL");
319 MODULE_DEVICE_TABLE(pci, atiixp);
320 MODULE_VERSION(DRV_VERSION);
321