1 /* 2 * pata_atiixp.c - ATI PATA for new ATA layer 3 * (C) 2005 Red Hat Inc 4 * Alan Cox <alan@redhat.com> 5 * 6 * Based on 7 * 8 * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004 9 * 10 * Copyright (C) 2003 ATI Inc. <hyu@ati.com> 11 * Copyright (C) 2004 Bartlomiej Zolnierkiewicz 12 * 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 #include <linux/init.h> 19 #include <linux/blkdev.h> 20 #include <linux/delay.h> 21 #include <scsi/scsi_host.h> 22 #include <linux/libata.h> 23 24 #define DRV_NAME "pata_atiixp" 25 #define DRV_VERSION "0.4.6" 26 27 enum { 28 ATIIXP_IDE_PIO_TIMING = 0x40, 29 ATIIXP_IDE_MWDMA_TIMING = 0x44, 30 ATIIXP_IDE_PIO_CONTROL = 0x48, 31 ATIIXP_IDE_PIO_MODE = 0x4a, 32 ATIIXP_IDE_UDMA_CONTROL = 0x54, 33 ATIIXP_IDE_UDMA_MODE = 0x56 34 }; 35 36 static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline) 37 { 38 struct ata_port *ap = link->ap; 39 static const struct pci_bits atiixp_enable_bits[] = { 40 { 0x48, 1, 0x01, 0x00 }, 41 { 0x48, 1, 0x08, 0x00 } 42 }; 43 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 44 45 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) 46 return -ENOENT; 47 48 return ata_std_prereset(link, deadline); 49 } 50 51 static void atiixp_error_handler(struct ata_port *ap) 52 { 53 ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL, ata_std_postreset); 54 } 55 56 static int atiixp_cable_detect(struct ata_port *ap) 57 { 58 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 59 u8 udma; 60 61 /* Hack from drivers/ide/pci. Really we want to know how to do the 62 raw detection not play follow the bios mode guess */ 63 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); 64 if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40) 65 return ATA_CBL_PATA80; 66 return ATA_CBL_PATA40; 67 } 68 69 /** 70 * atiixp_set_pio_timing - set initial PIO mode data 71 * @ap: ATA interface 72 * @adev: ATA device 73 * 74 * Called by both the pio and dma setup functions to set the controller 75 * timings for PIO transfers. We must load both the mode number and 76 * timing values into the controller. 77 */ 78 79 static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio) 80 { 81 static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 }; 82 83 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 84 int dn = 2 * ap->port_no + adev->devno; 85 86 /* Check this is correct - the order is odd in both drivers */ 87 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); 88 u16 pio_mode_data, pio_timing_data; 89 90 pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); 91 pio_mode_data &= ~(0x7 << (4 * dn)); 92 pio_mode_data |= pio << (4 * dn); 93 pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); 94 95 pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); 96 pio_mode_data &= ~(0xFF << timing_shift); 97 pio_mode_data |= (pio_timings[pio] << timing_shift); 98 pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); 99 } 100 101 /** 102 * atiixp_set_piomode - set initial PIO mode data 103 * @ap: ATA interface 104 * @adev: ATA device 105 * 106 * Called to do the PIO mode setup. We use a shared helper for this 107 * as the DMA setup must also adjust the PIO timing information. 108 */ 109 110 static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev) 111 { 112 atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0); 113 } 114 115 /** 116 * atiixp_set_dmamode - set initial DMA mode data 117 * @ap: ATA interface 118 * @adev: ATA device 119 * 120 * Called to do the DMA mode setup. We use timing tables for most 121 * modes but must tune an appropriate PIO mode to match. 122 */ 123 124 static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) 125 { 126 static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 }; 127 128 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 129 int dma = adev->dma_mode; 130 int dn = 2 * ap->port_no + adev->devno; 131 int wanted_pio; 132 133 if (adev->dma_mode >= XFER_UDMA_0) { 134 u16 udma_mode_data; 135 136 dma -= XFER_UDMA_0; 137 138 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data); 139 udma_mode_data &= ~(0x7 << (4 * dn)); 140 udma_mode_data |= dma << (4 * dn); 141 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data); 142 } else { 143 u16 mwdma_timing_data; 144 /* Check this is correct - the order is odd in both drivers */ 145 int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); 146 147 dma -= XFER_MW_DMA_0; 148 149 pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data); 150 mwdma_timing_data &= ~(0xFF << timing_shift); 151 mwdma_timing_data |= (mwdma_timings[dma] << timing_shift); 152 pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data); 153 } 154 /* 155 * We must now look at the PIO mode situation. We may need to 156 * adjust the PIO mode to keep the timings acceptable 157 */ 158 if (adev->dma_mode >= XFER_MW_DMA_2) 159 wanted_pio = 4; 160 else if (adev->dma_mode == XFER_MW_DMA_1) 161 wanted_pio = 3; 162 else if (adev->dma_mode == XFER_MW_DMA_0) 163 wanted_pio = 0; 164 else BUG(); 165 166 if (adev->pio_mode != wanted_pio) 167 atiixp_set_pio_timing(ap, adev, wanted_pio); 168 } 169 170 /** 171 * atiixp_bmdma_start - DMA start callback 172 * @qc: Command in progress 173 * 174 * When DMA begins we need to ensure that the UDMA control 175 * register for the channel is correctly set. 176 * 177 * Note: The host lock held by the libata layer protects 178 * us from two channels both trying to set DMA bits at once 179 */ 180 181 static void atiixp_bmdma_start(struct ata_queued_cmd *qc) 182 { 183 struct ata_port *ap = qc->ap; 184 struct ata_device *adev = qc->dev; 185 186 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 187 int dn = (2 * ap->port_no) + adev->devno; 188 u16 tmp16; 189 190 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); 191 if (adev->dma_mode >= XFER_UDMA_0) 192 tmp16 |= (1 << dn); 193 else 194 tmp16 &= ~(1 << dn); 195 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16); 196 ata_bmdma_start(qc); 197 } 198 199 /** 200 * atiixp_dma_stop - DMA stop callback 201 * @qc: Command in progress 202 * 203 * DMA has completed. Clear the UDMA flag as the next operations will 204 * be PIO ones not UDMA data transfer. 205 * 206 * Note: The host lock held by the libata layer protects 207 * us from two channels both trying to set DMA bits at once 208 */ 209 210 static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) 211 { 212 struct ata_port *ap = qc->ap; 213 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 214 int dn = (2 * ap->port_no) + qc->dev->devno; 215 u16 tmp16; 216 217 pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); 218 tmp16 &= ~(1 << dn); 219 pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16); 220 ata_bmdma_stop(qc); 221 } 222 223 static struct scsi_host_template atiixp_sht = { 224 .module = THIS_MODULE, 225 .name = DRV_NAME, 226 .ioctl = ata_scsi_ioctl, 227 .queuecommand = ata_scsi_queuecmd, 228 .can_queue = ATA_DEF_QUEUE, 229 .this_id = ATA_SHT_THIS_ID, 230 .sg_tablesize = LIBATA_MAX_PRD, 231 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 232 .emulated = ATA_SHT_EMULATED, 233 .use_clustering = ATA_SHT_USE_CLUSTERING, 234 .proc_name = DRV_NAME, 235 .dma_boundary = ATA_DMA_BOUNDARY, 236 .slave_configure = ata_scsi_slave_config, 237 .slave_destroy = ata_scsi_slave_destroy, 238 .bios_param = ata_std_bios_param, 239 }; 240 241 static struct ata_port_operations atiixp_port_ops = { 242 .set_piomode = atiixp_set_piomode, 243 .set_dmamode = atiixp_set_dmamode, 244 .mode_filter = ata_pci_default_filter, 245 .tf_load = ata_tf_load, 246 .tf_read = ata_tf_read, 247 .check_status = ata_check_status, 248 .exec_command = ata_exec_command, 249 .dev_select = ata_std_dev_select, 250 251 .freeze = ata_bmdma_freeze, 252 .thaw = ata_bmdma_thaw, 253 .error_handler = atiixp_error_handler, 254 .post_internal_cmd = ata_bmdma_post_internal_cmd, 255 .cable_detect = atiixp_cable_detect, 256 257 .bmdma_setup = ata_bmdma_setup, 258 .bmdma_start = atiixp_bmdma_start, 259 .bmdma_stop = atiixp_bmdma_stop, 260 .bmdma_status = ata_bmdma_status, 261 262 .qc_prep = ata_qc_prep, 263 .qc_issue = ata_qc_issue_prot, 264 265 .data_xfer = ata_data_xfer, 266 267 .irq_handler = ata_interrupt, 268 .irq_clear = ata_bmdma_irq_clear, 269 .irq_on = ata_irq_on, 270 271 .port_start = ata_sff_port_start, 272 }; 273 274 static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) 275 { 276 static const struct ata_port_info info = { 277 .sht = &atiixp_sht, 278 .flags = ATA_FLAG_SLAVE_POSS, 279 .pio_mask = 0x1f, 280 .mwdma_mask = 0x06, /* No MWDMA0 support */ 281 .udma_mask = 0x3F, 282 .port_ops = &atiixp_port_ops 283 }; 284 const struct ata_port_info *ppi[] = { &info, NULL }; 285 return ata_pci_init_one(dev, ppi); 286 } 287 288 static const struct pci_device_id atiixp[] = { 289 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), }, 290 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), }, 291 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, 292 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, 293 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, 294 295 { }, 296 }; 297 298 static struct pci_driver atiixp_pci_driver = { 299 .name = DRV_NAME, 300 .id_table = atiixp, 301 .probe = atiixp_init_one, 302 .remove = ata_pci_remove_one, 303 #ifdef CONFIG_PM 304 .resume = ata_pci_device_resume, 305 .suspend = ata_pci_device_suspend, 306 #endif 307 }; 308 309 static int __init atiixp_init(void) 310 { 311 return pci_register_driver(&atiixp_pci_driver); 312 } 313 314 315 static void __exit atiixp_exit(void) 316 { 317 pci_unregister_driver(&atiixp_pci_driver); 318 } 319 320 MODULE_AUTHOR("Alan Cox"); 321 MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400"); 322 MODULE_LICENSE("GPL"); 323 MODULE_DEVICE_TABLE(pci, atiixp); 324 MODULE_VERSION(DRV_VERSION); 325 326 module_init(atiixp_init); 327 module_exit(atiixp_exit); 328