1 /* 2 * Intel I/OAT DMA Linux driver 3 * Copyright(c) 2004 - 2015 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in 15 * the file called "COPYING". 16 * 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/slab.h> 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/dmaengine.h> 25 #include <linux/delay.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/workqueue.h> 28 #include <linux/prefetch.h> 29 #include <linux/dca.h> 30 #include <linux/aer.h> 31 #include <linux/sizes.h> 32 #include "dma.h" 33 #include "registers.h" 34 #include "hw.h" 35 36 #include "../dmaengine.h" 37 38 MODULE_VERSION(IOAT_DMA_VERSION); 39 MODULE_LICENSE("Dual BSD/GPL"); 40 MODULE_AUTHOR("Intel Corporation"); 41 42 static struct pci_device_id ioat_pci_tbl[] = { 43 /* I/OAT v3 platforms */ 44 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, 45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, 46 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, 47 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, 48 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, 49 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, 50 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, 51 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, 52 53 /* I/OAT v3.2 platforms */ 54 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, 55 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, 56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, 57 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, 58 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, 59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, 60 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, 61 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, 62 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, 63 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, 64 65 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, 66 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, 67 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, 68 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, 69 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, 70 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, 71 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, 72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, 73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, 74 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, 75 76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, 77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, 78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, 79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, 80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, 81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, 82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, 83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, 84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, 85 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, 86 87 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, 88 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, 89 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, 90 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, 91 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, 92 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, 93 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, 94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, 95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, 96 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, 97 98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) }, 99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) }, 100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) }, 101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) }, 102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) }, 103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) }, 104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) }, 105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) }, 106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, 107 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, 108 109 /* I/OAT v3.3 platforms */ 110 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, 111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, 112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, 113 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, 114 115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, 116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, 117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, 118 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, 119 120 { 0, } 121 }; 122 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 123 124 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); 125 static void ioat_remove(struct pci_dev *pdev); 126 static void 127 ioat_init_channel(struct ioatdma_device *ioat_dma, 128 struct ioatdma_chan *ioat_chan, int idx); 129 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma); 130 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); 131 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); 132 133 static int ioat_dca_enabled = 1; 134 module_param(ioat_dca_enabled, int, 0644); 135 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 136 int ioat_pending_level = 4; 137 module_param(ioat_pending_level, int, 0644); 138 MODULE_PARM_DESC(ioat_pending_level, 139 "high-water mark for pushing ioat descriptors (default: 4)"); 140 static char ioat_interrupt_style[32] = "msix"; 141 module_param_string(ioat_interrupt_style, ioat_interrupt_style, 142 sizeof(ioat_interrupt_style), 0644); 143 MODULE_PARM_DESC(ioat_interrupt_style, 144 "set ioat interrupt style: msix (default), msi, intx"); 145 146 struct kmem_cache *ioat_cache; 147 struct kmem_cache *ioat_sed_cache; 148 149 static bool is_jf_ioat(struct pci_dev *pdev) 150 { 151 switch (pdev->device) { 152 case PCI_DEVICE_ID_INTEL_IOAT_JSF0: 153 case PCI_DEVICE_ID_INTEL_IOAT_JSF1: 154 case PCI_DEVICE_ID_INTEL_IOAT_JSF2: 155 case PCI_DEVICE_ID_INTEL_IOAT_JSF3: 156 case PCI_DEVICE_ID_INTEL_IOAT_JSF4: 157 case PCI_DEVICE_ID_INTEL_IOAT_JSF5: 158 case PCI_DEVICE_ID_INTEL_IOAT_JSF6: 159 case PCI_DEVICE_ID_INTEL_IOAT_JSF7: 160 case PCI_DEVICE_ID_INTEL_IOAT_JSF8: 161 case PCI_DEVICE_ID_INTEL_IOAT_JSF9: 162 return true; 163 default: 164 return false; 165 } 166 } 167 168 static bool is_snb_ioat(struct pci_dev *pdev) 169 { 170 switch (pdev->device) { 171 case PCI_DEVICE_ID_INTEL_IOAT_SNB0: 172 case PCI_DEVICE_ID_INTEL_IOAT_SNB1: 173 case PCI_DEVICE_ID_INTEL_IOAT_SNB2: 174 case PCI_DEVICE_ID_INTEL_IOAT_SNB3: 175 case PCI_DEVICE_ID_INTEL_IOAT_SNB4: 176 case PCI_DEVICE_ID_INTEL_IOAT_SNB5: 177 case PCI_DEVICE_ID_INTEL_IOAT_SNB6: 178 case PCI_DEVICE_ID_INTEL_IOAT_SNB7: 179 case PCI_DEVICE_ID_INTEL_IOAT_SNB8: 180 case PCI_DEVICE_ID_INTEL_IOAT_SNB9: 181 return true; 182 default: 183 return false; 184 } 185 } 186 187 static bool is_ivb_ioat(struct pci_dev *pdev) 188 { 189 switch (pdev->device) { 190 case PCI_DEVICE_ID_INTEL_IOAT_IVB0: 191 case PCI_DEVICE_ID_INTEL_IOAT_IVB1: 192 case PCI_DEVICE_ID_INTEL_IOAT_IVB2: 193 case PCI_DEVICE_ID_INTEL_IOAT_IVB3: 194 case PCI_DEVICE_ID_INTEL_IOAT_IVB4: 195 case PCI_DEVICE_ID_INTEL_IOAT_IVB5: 196 case PCI_DEVICE_ID_INTEL_IOAT_IVB6: 197 case PCI_DEVICE_ID_INTEL_IOAT_IVB7: 198 case PCI_DEVICE_ID_INTEL_IOAT_IVB8: 199 case PCI_DEVICE_ID_INTEL_IOAT_IVB9: 200 return true; 201 default: 202 return false; 203 } 204 205 } 206 207 static bool is_hsw_ioat(struct pci_dev *pdev) 208 { 209 switch (pdev->device) { 210 case PCI_DEVICE_ID_INTEL_IOAT_HSW0: 211 case PCI_DEVICE_ID_INTEL_IOAT_HSW1: 212 case PCI_DEVICE_ID_INTEL_IOAT_HSW2: 213 case PCI_DEVICE_ID_INTEL_IOAT_HSW3: 214 case PCI_DEVICE_ID_INTEL_IOAT_HSW4: 215 case PCI_DEVICE_ID_INTEL_IOAT_HSW5: 216 case PCI_DEVICE_ID_INTEL_IOAT_HSW6: 217 case PCI_DEVICE_ID_INTEL_IOAT_HSW7: 218 case PCI_DEVICE_ID_INTEL_IOAT_HSW8: 219 case PCI_DEVICE_ID_INTEL_IOAT_HSW9: 220 return true; 221 default: 222 return false; 223 } 224 225 } 226 227 static bool is_bdx_ioat(struct pci_dev *pdev) 228 { 229 switch (pdev->device) { 230 case PCI_DEVICE_ID_INTEL_IOAT_BDX0: 231 case PCI_DEVICE_ID_INTEL_IOAT_BDX1: 232 case PCI_DEVICE_ID_INTEL_IOAT_BDX2: 233 case PCI_DEVICE_ID_INTEL_IOAT_BDX3: 234 case PCI_DEVICE_ID_INTEL_IOAT_BDX4: 235 case PCI_DEVICE_ID_INTEL_IOAT_BDX5: 236 case PCI_DEVICE_ID_INTEL_IOAT_BDX6: 237 case PCI_DEVICE_ID_INTEL_IOAT_BDX7: 238 case PCI_DEVICE_ID_INTEL_IOAT_BDX8: 239 case PCI_DEVICE_ID_INTEL_IOAT_BDX9: 240 return true; 241 default: 242 return false; 243 } 244 } 245 246 static bool is_xeon_cb32(struct pci_dev *pdev) 247 { 248 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || 249 is_hsw_ioat(pdev) || is_bdx_ioat(pdev); 250 } 251 252 bool is_bwd_ioat(struct pci_dev *pdev) 253 { 254 switch (pdev->device) { 255 case PCI_DEVICE_ID_INTEL_IOAT_BWD0: 256 case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 257 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 258 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 259 /* even though not Atom, BDX-DE has same DMA silicon */ 260 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: 261 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: 262 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: 263 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: 264 return true; 265 default: 266 return false; 267 } 268 } 269 270 static bool is_bwd_noraid(struct pci_dev *pdev) 271 { 272 switch (pdev->device) { 273 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 274 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 275 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: 276 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: 277 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: 278 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: 279 return true; 280 default: 281 return false; 282 } 283 284 } 285 286 /* 287 * Perform a IOAT transaction to verify the HW works. 288 */ 289 #define IOAT_TEST_SIZE 2000 290 291 static void ioat_dma_test_callback(void *dma_async_param) 292 { 293 struct completion *cmp = dma_async_param; 294 295 complete(cmp); 296 } 297 298 /** 299 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. 300 * @ioat_dma: dma device to be tested 301 */ 302 static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) 303 { 304 int i; 305 u8 *src; 306 u8 *dest; 307 struct dma_device *dma = &ioat_dma->dma_dev; 308 struct device *dev = &ioat_dma->pdev->dev; 309 struct dma_chan *dma_chan; 310 struct dma_async_tx_descriptor *tx; 311 dma_addr_t dma_dest, dma_src; 312 dma_cookie_t cookie; 313 int err = 0; 314 struct completion cmp; 315 unsigned long tmo; 316 unsigned long flags; 317 318 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 319 if (!src) 320 return -ENOMEM; 321 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 322 if (!dest) { 323 kfree(src); 324 return -ENOMEM; 325 } 326 327 /* Fill in src buffer */ 328 for (i = 0; i < IOAT_TEST_SIZE; i++) 329 src[i] = (u8)i; 330 331 /* Start copy, using first DMA channel */ 332 dma_chan = container_of(dma->channels.next, struct dma_chan, 333 device_node); 334 if (dma->device_alloc_chan_resources(dma_chan) < 1) { 335 dev_err(dev, "selftest cannot allocate chan resource\n"); 336 err = -ENODEV; 337 goto out; 338 } 339 340 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 341 if (dma_mapping_error(dev, dma_src)) { 342 dev_err(dev, "mapping src buffer failed\n"); 343 goto free_resources; 344 } 345 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 346 if (dma_mapping_error(dev, dma_dest)) { 347 dev_err(dev, "mapping dest buffer failed\n"); 348 goto unmap_src; 349 } 350 flags = DMA_PREP_INTERRUPT; 351 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, 352 dma_src, IOAT_TEST_SIZE, 353 flags); 354 if (!tx) { 355 dev_err(dev, "Self-test prep failed, disabling\n"); 356 err = -ENODEV; 357 goto unmap_dma; 358 } 359 360 async_tx_ack(tx); 361 init_completion(&cmp); 362 tx->callback = ioat_dma_test_callback; 363 tx->callback_param = &cmp; 364 cookie = tx->tx_submit(tx); 365 if (cookie < 0) { 366 dev_err(dev, "Self-test setup failed, disabling\n"); 367 err = -ENODEV; 368 goto unmap_dma; 369 } 370 dma->device_issue_pending(dma_chan); 371 372 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 373 374 if (tmo == 0 || 375 dma->device_tx_status(dma_chan, cookie, NULL) 376 != DMA_COMPLETE) { 377 dev_err(dev, "Self-test copy timed out, disabling\n"); 378 err = -ENODEV; 379 goto unmap_dma; 380 } 381 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 382 dev_err(dev, "Self-test copy failed compare, disabling\n"); 383 err = -ENODEV; 384 goto free_resources; 385 } 386 387 unmap_dma: 388 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 389 unmap_src: 390 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 391 free_resources: 392 dma->device_free_chan_resources(dma_chan); 393 out: 394 kfree(src); 395 kfree(dest); 396 return err; 397 } 398 399 /** 400 * ioat_dma_setup_interrupts - setup interrupt handler 401 * @ioat_dma: ioat dma device 402 */ 403 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) 404 { 405 struct ioatdma_chan *ioat_chan; 406 struct pci_dev *pdev = ioat_dma->pdev; 407 struct device *dev = &pdev->dev; 408 struct msix_entry *msix; 409 int i, j, msixcnt; 410 int err = -EINVAL; 411 u8 intrctrl = 0; 412 413 if (!strcmp(ioat_interrupt_style, "msix")) 414 goto msix; 415 if (!strcmp(ioat_interrupt_style, "msi")) 416 goto msi; 417 if (!strcmp(ioat_interrupt_style, "intx")) 418 goto intx; 419 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); 420 goto err_no_irq; 421 422 msix: 423 /* The number of MSI-X vectors should equal the number of channels */ 424 msixcnt = ioat_dma->dma_dev.chancnt; 425 for (i = 0; i < msixcnt; i++) 426 ioat_dma->msix_entries[i].entry = i; 427 428 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); 429 if (err) 430 goto msi; 431 432 for (i = 0; i < msixcnt; i++) { 433 msix = &ioat_dma->msix_entries[i]; 434 ioat_chan = ioat_chan_by_index(ioat_dma, i); 435 err = devm_request_irq(dev, msix->vector, 436 ioat_dma_do_interrupt_msix, 0, 437 "ioat-msix", ioat_chan); 438 if (err) { 439 for (j = 0; j < i; j++) { 440 msix = &ioat_dma->msix_entries[j]; 441 ioat_chan = ioat_chan_by_index(ioat_dma, j); 442 devm_free_irq(dev, msix->vector, ioat_chan); 443 } 444 goto msi; 445 } 446 } 447 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 448 ioat_dma->irq_mode = IOAT_MSIX; 449 goto done; 450 451 msi: 452 err = pci_enable_msi(pdev); 453 if (err) 454 goto intx; 455 456 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, 457 "ioat-msi", ioat_dma); 458 if (err) { 459 pci_disable_msi(pdev); 460 goto intx; 461 } 462 ioat_dma->irq_mode = IOAT_MSI; 463 goto done; 464 465 intx: 466 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 467 IRQF_SHARED, "ioat-intx", ioat_dma); 468 if (err) 469 goto err_no_irq; 470 471 ioat_dma->irq_mode = IOAT_INTX; 472 done: 473 if (is_bwd_ioat(pdev)) 474 ioat_intr_quirk(ioat_dma); 475 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; 476 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); 477 return 0; 478 479 err_no_irq: 480 /* Disable all interrupt generation */ 481 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); 482 ioat_dma->irq_mode = IOAT_NOIRQ; 483 dev_err(dev, "no usable interrupts\n"); 484 return err; 485 } 486 487 static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) 488 { 489 /* Disable all interrupt generation */ 490 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); 491 } 492 493 static int ioat_probe(struct ioatdma_device *ioat_dma) 494 { 495 int err = -ENODEV; 496 struct dma_device *dma = &ioat_dma->dma_dev; 497 struct pci_dev *pdev = ioat_dma->pdev; 498 struct device *dev = &pdev->dev; 499 500 ioat_dma->completion_pool = dma_pool_create("completion_pool", dev, 501 sizeof(u64), 502 SMP_CACHE_BYTES, 503 SMP_CACHE_BYTES); 504 505 if (!ioat_dma->completion_pool) { 506 err = -ENOMEM; 507 goto err_out; 508 } 509 510 ioat_enumerate_channels(ioat_dma); 511 512 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 513 dma->dev = &pdev->dev; 514 515 if (!dma->chancnt) { 516 dev_err(dev, "channel enumeration error\n"); 517 goto err_setup_interrupts; 518 } 519 520 err = ioat_dma_setup_interrupts(ioat_dma); 521 if (err) 522 goto err_setup_interrupts; 523 524 err = ioat3_dma_self_test(ioat_dma); 525 if (err) 526 goto err_self_test; 527 528 return 0; 529 530 err_self_test: 531 ioat_disable_interrupts(ioat_dma); 532 err_setup_interrupts: 533 dma_pool_destroy(ioat_dma->completion_pool); 534 err_out: 535 return err; 536 } 537 538 static int ioat_register(struct ioatdma_device *ioat_dma) 539 { 540 int err = dma_async_device_register(&ioat_dma->dma_dev); 541 542 if (err) { 543 ioat_disable_interrupts(ioat_dma); 544 dma_pool_destroy(ioat_dma->completion_pool); 545 } 546 547 return err; 548 } 549 550 static void ioat_dma_remove(struct ioatdma_device *ioat_dma) 551 { 552 struct dma_device *dma = &ioat_dma->dma_dev; 553 554 ioat_disable_interrupts(ioat_dma); 555 556 ioat_kobject_del(ioat_dma); 557 558 dma_async_device_unregister(dma); 559 560 dma_pool_destroy(ioat_dma->completion_pool); 561 562 INIT_LIST_HEAD(&dma->channels); 563 } 564 565 /** 566 * ioat_enumerate_channels - find and initialize the device's channels 567 * @ioat_dma: the ioat dma device to be enumerated 568 */ 569 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) 570 { 571 struct ioatdma_chan *ioat_chan; 572 struct device *dev = &ioat_dma->pdev->dev; 573 struct dma_device *dma = &ioat_dma->dma_dev; 574 u8 xfercap_log; 575 int i; 576 577 INIT_LIST_HEAD(&dma->channels); 578 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); 579 dma->chancnt &= 0x1f; /* bits [4:0] valid */ 580 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { 581 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", 582 dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); 583 dma->chancnt = ARRAY_SIZE(ioat_dma->idx); 584 } 585 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); 586 xfercap_log &= 0x1f; /* bits [4:0] valid */ 587 if (xfercap_log == 0) 588 return 0; 589 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); 590 591 for (i = 0; i < dma->chancnt; i++) { 592 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); 593 if (!ioat_chan) 594 break; 595 596 ioat_init_channel(ioat_dma, ioat_chan, i); 597 ioat_chan->xfercap_log = xfercap_log; 598 spin_lock_init(&ioat_chan->prep_lock); 599 if (ioat_reset_hw(ioat_chan)) { 600 i = 0; 601 break; 602 } 603 } 604 dma->chancnt = i; 605 return i; 606 } 607 608 /** 609 * ioat_free_chan_resources - release all the descriptors 610 * @chan: the channel to be cleaned 611 */ 612 static void ioat_free_chan_resources(struct dma_chan *c) 613 { 614 struct ioatdma_chan *ioat_chan = to_ioat_chan(c); 615 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; 616 struct ioat_ring_ent *desc; 617 const int total_descs = 1 << ioat_chan->alloc_order; 618 int descs; 619 int i; 620 621 /* Before freeing channel resources first check 622 * if they have been previously allocated for this channel. 623 */ 624 if (!ioat_chan->ring) 625 return; 626 627 ioat_stop(ioat_chan); 628 ioat_reset_hw(ioat_chan); 629 630 spin_lock_bh(&ioat_chan->cleanup_lock); 631 spin_lock_bh(&ioat_chan->prep_lock); 632 descs = ioat_ring_space(ioat_chan); 633 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); 634 for (i = 0; i < descs; i++) { 635 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); 636 ioat_free_ring_ent(desc, c); 637 } 638 639 if (descs < total_descs) 640 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", 641 total_descs - descs); 642 643 for (i = 0; i < total_descs - descs; i++) { 644 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i); 645 dump_desc_dbg(ioat_chan, desc); 646 ioat_free_ring_ent(desc, c); 647 } 648 649 for (i = 0; i < ioat_chan->desc_chunks; i++) { 650 dma_free_coherent(to_dev(ioat_chan), SZ_2M, 651 ioat_chan->descs[i].virt, 652 ioat_chan->descs[i].hw); 653 ioat_chan->descs[i].virt = NULL; 654 ioat_chan->descs[i].hw = 0; 655 } 656 ioat_chan->desc_chunks = 0; 657 658 kfree(ioat_chan->ring); 659 ioat_chan->ring = NULL; 660 ioat_chan->alloc_order = 0; 661 dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion, 662 ioat_chan->completion_dma); 663 spin_unlock_bh(&ioat_chan->prep_lock); 664 spin_unlock_bh(&ioat_chan->cleanup_lock); 665 666 ioat_chan->last_completion = 0; 667 ioat_chan->completion_dma = 0; 668 ioat_chan->dmacount = 0; 669 } 670 671 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring 672 * @chan: channel to be initialized 673 */ 674 static int ioat_alloc_chan_resources(struct dma_chan *c) 675 { 676 struct ioatdma_chan *ioat_chan = to_ioat_chan(c); 677 struct ioat_ring_ent **ring; 678 u64 status; 679 int order; 680 int i = 0; 681 u32 chanerr; 682 683 /* have we already been set up? */ 684 if (ioat_chan->ring) 685 return 1 << ioat_chan->alloc_order; 686 687 /* Setup register to interrupt and write completion status on error */ 688 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); 689 690 /* allocate a completion writeback area */ 691 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 692 ioat_chan->completion = 693 dma_pool_alloc(ioat_chan->ioat_dma->completion_pool, 694 GFP_KERNEL, &ioat_chan->completion_dma); 695 if (!ioat_chan->completion) 696 return -ENOMEM; 697 698 memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); 699 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, 700 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 701 writel(((u64)ioat_chan->completion_dma) >> 32, 702 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 703 704 order = IOAT_MAX_ORDER; 705 ring = ioat_alloc_ring(c, order, GFP_KERNEL); 706 if (!ring) 707 return -ENOMEM; 708 709 spin_lock_bh(&ioat_chan->cleanup_lock); 710 spin_lock_bh(&ioat_chan->prep_lock); 711 ioat_chan->ring = ring; 712 ioat_chan->head = 0; 713 ioat_chan->issued = 0; 714 ioat_chan->tail = 0; 715 ioat_chan->alloc_order = order; 716 set_bit(IOAT_RUN, &ioat_chan->state); 717 spin_unlock_bh(&ioat_chan->prep_lock); 718 spin_unlock_bh(&ioat_chan->cleanup_lock); 719 720 ioat_start_null_desc(ioat_chan); 721 722 /* check that we got off the ground */ 723 do { 724 udelay(1); 725 status = ioat_chansts(ioat_chan); 726 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); 727 728 if (is_ioat_active(status) || is_ioat_idle(status)) 729 return 1 << ioat_chan->alloc_order; 730 731 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 732 733 dev_WARN(to_dev(ioat_chan), 734 "failed to start channel chanerr: %#x\n", chanerr); 735 ioat_free_chan_resources(c); 736 return -EFAULT; 737 } 738 739 /* common channel initialization */ 740 static void 741 ioat_init_channel(struct ioatdma_device *ioat_dma, 742 struct ioatdma_chan *ioat_chan, int idx) 743 { 744 struct dma_device *dma = &ioat_dma->dma_dev; 745 struct dma_chan *c = &ioat_chan->dma_chan; 746 unsigned long data = (unsigned long) c; 747 748 ioat_chan->ioat_dma = ioat_dma; 749 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); 750 spin_lock_init(&ioat_chan->cleanup_lock); 751 ioat_chan->dma_chan.device = dma; 752 dma_cookie_init(&ioat_chan->dma_chan); 753 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); 754 ioat_dma->idx[idx] = ioat_chan; 755 init_timer(&ioat_chan->timer); 756 ioat_chan->timer.function = ioat_timer_event; 757 ioat_chan->timer.data = data; 758 tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); 759 } 760 761 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ 762 static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) 763 { 764 int i, src_idx; 765 struct page *dest; 766 struct page *xor_srcs[IOAT_NUM_SRC_TEST]; 767 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; 768 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; 769 dma_addr_t dest_dma; 770 struct dma_async_tx_descriptor *tx; 771 struct dma_chan *dma_chan; 772 dma_cookie_t cookie; 773 u8 cmp_byte = 0; 774 u32 cmp_word; 775 u32 xor_val_result; 776 int err = 0; 777 struct completion cmp; 778 unsigned long tmo; 779 struct device *dev = &ioat_dma->pdev->dev; 780 struct dma_device *dma = &ioat_dma->dma_dev; 781 u8 op = 0; 782 783 dev_dbg(dev, "%s\n", __func__); 784 785 if (!dma_has_cap(DMA_XOR, dma->cap_mask)) 786 return 0; 787 788 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 789 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 790 if (!xor_srcs[src_idx]) { 791 while (src_idx--) 792 __free_page(xor_srcs[src_idx]); 793 return -ENOMEM; 794 } 795 } 796 797 dest = alloc_page(GFP_KERNEL); 798 if (!dest) { 799 while (src_idx--) 800 __free_page(xor_srcs[src_idx]); 801 return -ENOMEM; 802 } 803 804 /* Fill in src buffers */ 805 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 806 u8 *ptr = page_address(xor_srcs[src_idx]); 807 808 for (i = 0; i < PAGE_SIZE; i++) 809 ptr[i] = (1 << src_idx); 810 } 811 812 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) 813 cmp_byte ^= (u8) (1 << src_idx); 814 815 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 816 (cmp_byte << 8) | cmp_byte; 817 818 memset(page_address(dest), 0, PAGE_SIZE); 819 820 dma_chan = container_of(dma->channels.next, struct dma_chan, 821 device_node); 822 if (dma->device_alloc_chan_resources(dma_chan) < 1) { 823 err = -ENODEV; 824 goto out; 825 } 826 827 /* test xor */ 828 op = IOAT_OP_XOR; 829 830 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); 831 if (dma_mapping_error(dev, dest_dma)) 832 goto dma_unmap; 833 834 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 835 dma_srcs[i] = DMA_ERROR_CODE; 836 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { 837 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, 838 DMA_TO_DEVICE); 839 if (dma_mapping_error(dev, dma_srcs[i])) 840 goto dma_unmap; 841 } 842 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 843 IOAT_NUM_SRC_TEST, PAGE_SIZE, 844 DMA_PREP_INTERRUPT); 845 846 if (!tx) { 847 dev_err(dev, "Self-test xor prep failed\n"); 848 err = -ENODEV; 849 goto dma_unmap; 850 } 851 852 async_tx_ack(tx); 853 init_completion(&cmp); 854 tx->callback = ioat_dma_test_callback; 855 tx->callback_param = &cmp; 856 cookie = tx->tx_submit(tx); 857 if (cookie < 0) { 858 dev_err(dev, "Self-test xor setup failed\n"); 859 err = -ENODEV; 860 goto dma_unmap; 861 } 862 dma->device_issue_pending(dma_chan); 863 864 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 865 866 if (tmo == 0 || 867 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 868 dev_err(dev, "Self-test xor timed out\n"); 869 err = -ENODEV; 870 goto dma_unmap; 871 } 872 873 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 874 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 875 876 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 877 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 878 u32 *ptr = page_address(dest); 879 880 if (ptr[i] != cmp_word) { 881 dev_err(dev, "Self-test xor failed compare\n"); 882 err = -ENODEV; 883 goto free_resources; 884 } 885 } 886 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 887 888 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 889 890 /* skip validate if the capability is not present */ 891 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 892 goto free_resources; 893 894 op = IOAT_OP_XOR_VAL; 895 896 /* validate the sources with the destintation page */ 897 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 898 xor_val_srcs[i] = xor_srcs[i]; 899 xor_val_srcs[i] = dest; 900 901 xor_val_result = 1; 902 903 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 904 dma_srcs[i] = DMA_ERROR_CODE; 905 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { 906 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 907 DMA_TO_DEVICE); 908 if (dma_mapping_error(dev, dma_srcs[i])) 909 goto dma_unmap; 910 } 911 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 912 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 913 &xor_val_result, DMA_PREP_INTERRUPT); 914 if (!tx) { 915 dev_err(dev, "Self-test zero prep failed\n"); 916 err = -ENODEV; 917 goto dma_unmap; 918 } 919 920 async_tx_ack(tx); 921 init_completion(&cmp); 922 tx->callback = ioat_dma_test_callback; 923 tx->callback_param = &cmp; 924 cookie = tx->tx_submit(tx); 925 if (cookie < 0) { 926 dev_err(dev, "Self-test zero setup failed\n"); 927 err = -ENODEV; 928 goto dma_unmap; 929 } 930 dma->device_issue_pending(dma_chan); 931 932 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 933 934 if (tmo == 0 || 935 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 936 dev_err(dev, "Self-test validate timed out\n"); 937 err = -ENODEV; 938 goto dma_unmap; 939 } 940 941 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 942 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 943 944 if (xor_val_result != 0) { 945 dev_err(dev, "Self-test validate failed compare\n"); 946 err = -ENODEV; 947 goto free_resources; 948 } 949 950 memset(page_address(dest), 0, PAGE_SIZE); 951 952 /* test for non-zero parity sum */ 953 op = IOAT_OP_XOR_VAL; 954 955 xor_val_result = 0; 956 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 957 dma_srcs[i] = DMA_ERROR_CODE; 958 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { 959 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 960 DMA_TO_DEVICE); 961 if (dma_mapping_error(dev, dma_srcs[i])) 962 goto dma_unmap; 963 } 964 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 965 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 966 &xor_val_result, DMA_PREP_INTERRUPT); 967 if (!tx) { 968 dev_err(dev, "Self-test 2nd zero prep failed\n"); 969 err = -ENODEV; 970 goto dma_unmap; 971 } 972 973 async_tx_ack(tx); 974 init_completion(&cmp); 975 tx->callback = ioat_dma_test_callback; 976 tx->callback_param = &cmp; 977 cookie = tx->tx_submit(tx); 978 if (cookie < 0) { 979 dev_err(dev, "Self-test 2nd zero setup failed\n"); 980 err = -ENODEV; 981 goto dma_unmap; 982 } 983 dma->device_issue_pending(dma_chan); 984 985 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 986 987 if (tmo == 0 || 988 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 989 dev_err(dev, "Self-test 2nd validate timed out\n"); 990 err = -ENODEV; 991 goto dma_unmap; 992 } 993 994 if (xor_val_result != SUM_CHECK_P_RESULT) { 995 dev_err(dev, "Self-test validate failed compare\n"); 996 err = -ENODEV; 997 goto dma_unmap; 998 } 999 1000 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1001 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1002 1003 goto free_resources; 1004 dma_unmap: 1005 if (op == IOAT_OP_XOR) { 1006 if (dest_dma != DMA_ERROR_CODE) 1007 dma_unmap_page(dev, dest_dma, PAGE_SIZE, 1008 DMA_FROM_DEVICE); 1009 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1010 if (dma_srcs[i] != DMA_ERROR_CODE) 1011 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1012 DMA_TO_DEVICE); 1013 } else if (op == IOAT_OP_XOR_VAL) { 1014 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1015 if (dma_srcs[i] != DMA_ERROR_CODE) 1016 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1017 DMA_TO_DEVICE); 1018 } 1019 free_resources: 1020 dma->device_free_chan_resources(dma_chan); 1021 out: 1022 src_idx = IOAT_NUM_SRC_TEST; 1023 while (src_idx--) 1024 __free_page(xor_srcs[src_idx]); 1025 __free_page(dest); 1026 return err; 1027 } 1028 1029 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) 1030 { 1031 int rc; 1032 1033 rc = ioat_dma_self_test(ioat_dma); 1034 if (rc) 1035 return rc; 1036 1037 rc = ioat_xor_val_self_test(ioat_dma); 1038 1039 return rc; 1040 } 1041 1042 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma) 1043 { 1044 struct dma_device *dma; 1045 struct dma_chan *c; 1046 struct ioatdma_chan *ioat_chan; 1047 u32 errmask; 1048 1049 dma = &ioat_dma->dma_dev; 1050 1051 /* 1052 * if we have descriptor write back error status, we mask the 1053 * error interrupts 1054 */ 1055 if (ioat_dma->cap & IOAT_CAP_DWBES) { 1056 list_for_each_entry(c, &dma->channels, device_node) { 1057 ioat_chan = to_ioat_chan(c); 1058 errmask = readl(ioat_chan->reg_base + 1059 IOAT_CHANERR_MASK_OFFSET); 1060 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | 1061 IOAT_CHANERR_XOR_Q_ERR; 1062 writel(errmask, ioat_chan->reg_base + 1063 IOAT_CHANERR_MASK_OFFSET); 1064 } 1065 } 1066 } 1067 1068 static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) 1069 { 1070 struct pci_dev *pdev = ioat_dma->pdev; 1071 int dca_en = system_has_dca_enabled(pdev); 1072 struct dma_device *dma; 1073 struct dma_chan *c; 1074 struct ioatdma_chan *ioat_chan; 1075 bool is_raid_device = false; 1076 int err; 1077 1078 dma = &ioat_dma->dma_dev; 1079 dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; 1080 dma->device_issue_pending = ioat_issue_pending; 1081 dma->device_alloc_chan_resources = ioat_alloc_chan_resources; 1082 dma->device_free_chan_resources = ioat_free_chan_resources; 1083 1084 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1085 dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock; 1086 1087 ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); 1088 1089 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) 1090 ioat_dma->cap &= 1091 ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); 1092 1093 /* dca is incompatible with raid operations */ 1094 if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) 1095 ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); 1096 1097 if (ioat_dma->cap & IOAT_CAP_XOR) { 1098 is_raid_device = true; 1099 dma->max_xor = 8; 1100 1101 dma_cap_set(DMA_XOR, dma->cap_mask); 1102 dma->device_prep_dma_xor = ioat_prep_xor; 1103 1104 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1105 dma->device_prep_dma_xor_val = ioat_prep_xor_val; 1106 } 1107 1108 if (ioat_dma->cap & IOAT_CAP_PQ) { 1109 is_raid_device = true; 1110 1111 dma->device_prep_dma_pq = ioat_prep_pq; 1112 dma->device_prep_dma_pq_val = ioat_prep_pq_val; 1113 dma_cap_set(DMA_PQ, dma->cap_mask); 1114 dma_cap_set(DMA_PQ_VAL, dma->cap_mask); 1115 1116 if (ioat_dma->cap & IOAT_CAP_RAID16SS) 1117 dma_set_maxpq(dma, 16, 0); 1118 else 1119 dma_set_maxpq(dma, 8, 0); 1120 1121 if (!(ioat_dma->cap & IOAT_CAP_XOR)) { 1122 dma->device_prep_dma_xor = ioat_prep_pqxor; 1123 dma->device_prep_dma_xor_val = ioat_prep_pqxor_val; 1124 dma_cap_set(DMA_XOR, dma->cap_mask); 1125 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1126 1127 if (ioat_dma->cap & IOAT_CAP_RAID16SS) 1128 dma->max_xor = 16; 1129 else 1130 dma->max_xor = 8; 1131 } 1132 } 1133 1134 dma->device_tx_status = ioat_tx_status; 1135 1136 /* starting with CB3.3 super extended descriptors are supported */ 1137 if (ioat_dma->cap & IOAT_CAP_RAID16SS) { 1138 char pool_name[14]; 1139 int i; 1140 1141 for (i = 0; i < MAX_SED_POOLS; i++) { 1142 snprintf(pool_name, 14, "ioat_hw%d_sed", i); 1143 1144 /* allocate SED DMA pool */ 1145 ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, 1146 &pdev->dev, 1147 SED_SIZE * (i + 1), 64, 0); 1148 if (!ioat_dma->sed_hw_pool[i]) 1149 return -ENOMEM; 1150 1151 } 1152 } 1153 1154 if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) 1155 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 1156 1157 err = ioat_probe(ioat_dma); 1158 if (err) 1159 return err; 1160 1161 list_for_each_entry(c, &dma->channels, device_node) { 1162 ioat_chan = to_ioat_chan(c); 1163 writel(IOAT_DMA_DCA_ANY_CPU, 1164 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); 1165 } 1166 1167 err = ioat_register(ioat_dma); 1168 if (err) 1169 return err; 1170 1171 ioat_kobject_add(ioat_dma, &ioat_ktype); 1172 1173 if (dca) 1174 ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); 1175 1176 return 0; 1177 } 1178 1179 static void ioat_shutdown(struct pci_dev *pdev) 1180 { 1181 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); 1182 struct ioatdma_chan *ioat_chan; 1183 int i; 1184 1185 if (!ioat_dma) 1186 return; 1187 1188 for (i = 0; i < IOAT_MAX_CHANS; i++) { 1189 ioat_chan = ioat_dma->idx[i]; 1190 if (!ioat_chan) 1191 continue; 1192 1193 spin_lock_bh(&ioat_chan->prep_lock); 1194 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); 1195 del_timer_sync(&ioat_chan->timer); 1196 spin_unlock_bh(&ioat_chan->prep_lock); 1197 /* this should quiesce then reset */ 1198 ioat_reset_hw(ioat_chan); 1199 } 1200 1201 ioat_disable_interrupts(ioat_dma); 1202 } 1203 1204 void ioat_resume(struct ioatdma_device *ioat_dma) 1205 { 1206 struct ioatdma_chan *ioat_chan; 1207 u32 chanerr; 1208 int i; 1209 1210 for (i = 0; i < IOAT_MAX_CHANS; i++) { 1211 ioat_chan = ioat_dma->idx[i]; 1212 if (!ioat_chan) 1213 continue; 1214 1215 spin_lock_bh(&ioat_chan->prep_lock); 1216 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); 1217 spin_unlock_bh(&ioat_chan->prep_lock); 1218 1219 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 1220 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 1221 1222 /* no need to reset as shutdown already did that */ 1223 } 1224 } 1225 1226 #define DRV_NAME "ioatdma" 1227 1228 static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, 1229 enum pci_channel_state error) 1230 { 1231 dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); 1232 1233 /* quiesce and block I/O */ 1234 ioat_shutdown(pdev); 1235 1236 return PCI_ERS_RESULT_NEED_RESET; 1237 } 1238 1239 static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev) 1240 { 1241 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; 1242 int err; 1243 1244 dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME); 1245 1246 if (pci_enable_device_mem(pdev) < 0) { 1247 dev_err(&pdev->dev, 1248 "Failed to enable PCIe device after reset.\n"); 1249 result = PCI_ERS_RESULT_DISCONNECT; 1250 } else { 1251 pci_set_master(pdev); 1252 pci_restore_state(pdev); 1253 pci_save_state(pdev); 1254 pci_wake_from_d3(pdev, false); 1255 } 1256 1257 err = pci_cleanup_aer_uncorrect_error_status(pdev); 1258 if (err) { 1259 dev_err(&pdev->dev, 1260 "AER uncorrect error status clear failed: %#x\n", err); 1261 } 1262 1263 return result; 1264 } 1265 1266 static void ioat_pcie_error_resume(struct pci_dev *pdev) 1267 { 1268 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); 1269 1270 dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME); 1271 1272 /* initialize and bring everything back */ 1273 ioat_resume(ioat_dma); 1274 } 1275 1276 static const struct pci_error_handlers ioat_err_handler = { 1277 .error_detected = ioat_pcie_error_detected, 1278 .slot_reset = ioat_pcie_error_slot_reset, 1279 .resume = ioat_pcie_error_resume, 1280 }; 1281 1282 static struct pci_driver ioat_pci_driver = { 1283 .name = DRV_NAME, 1284 .id_table = ioat_pci_tbl, 1285 .probe = ioat_pci_probe, 1286 .remove = ioat_remove, 1287 .shutdown = ioat_shutdown, 1288 .err_handler = &ioat_err_handler, 1289 }; 1290 1291 static struct ioatdma_device * 1292 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) 1293 { 1294 struct device *dev = &pdev->dev; 1295 struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); 1296 1297 if (!d) 1298 return NULL; 1299 d->pdev = pdev; 1300 d->reg_base = iobase; 1301 return d; 1302 } 1303 1304 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1305 { 1306 void __iomem * const *iomap; 1307 struct device *dev = &pdev->dev; 1308 struct ioatdma_device *device; 1309 int err; 1310 1311 err = pcim_enable_device(pdev); 1312 if (err) 1313 return err; 1314 1315 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); 1316 if (err) 1317 return err; 1318 iomap = pcim_iomap_table(pdev); 1319 if (!iomap) 1320 return -ENOMEM; 1321 1322 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1323 if (err) 1324 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1325 if (err) 1326 return err; 1327 1328 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1329 if (err) 1330 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1331 if (err) 1332 return err; 1333 1334 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); 1335 if (!device) 1336 return -ENOMEM; 1337 pci_set_master(pdev); 1338 pci_set_drvdata(pdev, device); 1339 1340 device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1341 if (device->version >= IOAT_VER_3_0) { 1342 err = ioat3_dma_probe(device, ioat_dca_enabled); 1343 1344 if (device->version >= IOAT_VER_3_3) 1345 pci_enable_pcie_error_reporting(pdev); 1346 } else 1347 return -ENODEV; 1348 1349 if (err) { 1350 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); 1351 pci_disable_pcie_error_reporting(pdev); 1352 return -ENODEV; 1353 } 1354 1355 return 0; 1356 } 1357 1358 static void ioat_remove(struct pci_dev *pdev) 1359 { 1360 struct ioatdma_device *device = pci_get_drvdata(pdev); 1361 1362 if (!device) 1363 return; 1364 1365 dev_err(&pdev->dev, "Removing dma and dca services\n"); 1366 if (device->dca) { 1367 unregister_dca_provider(device->dca, &pdev->dev); 1368 free_dca_provider(device->dca); 1369 device->dca = NULL; 1370 } 1371 1372 pci_disable_pcie_error_reporting(pdev); 1373 ioat_dma_remove(device); 1374 } 1375 1376 static int __init ioat_init_module(void) 1377 { 1378 int err = -ENOMEM; 1379 1380 pr_info("%s: Intel(R) QuickData Technology Driver %s\n", 1381 DRV_NAME, IOAT_DMA_VERSION); 1382 1383 ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent), 1384 0, SLAB_HWCACHE_ALIGN, NULL); 1385 if (!ioat_cache) 1386 return -ENOMEM; 1387 1388 ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); 1389 if (!ioat_sed_cache) 1390 goto err_ioat_cache; 1391 1392 err = pci_register_driver(&ioat_pci_driver); 1393 if (err) 1394 goto err_ioat3_cache; 1395 1396 return 0; 1397 1398 err_ioat3_cache: 1399 kmem_cache_destroy(ioat_sed_cache); 1400 1401 err_ioat_cache: 1402 kmem_cache_destroy(ioat_cache); 1403 1404 return err; 1405 } 1406 module_init(ioat_init_module); 1407 1408 static void __exit ioat_exit_module(void) 1409 { 1410 pci_unregister_driver(&ioat_pci_driver); 1411 kmem_cache_destroy(ioat_cache); 1412 } 1413 module_exit(ioat_exit_module); 1414