1 /* 2 * Intel I/OAT DMA Linux driver 3 * Copyright(c) 2007 - 2009 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in 15 * the file called "COPYING". 16 * 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/pci.h> 21 #include <linux/smp.h> 22 #include <linux/interrupt.h> 23 #include <linux/dca.h> 24 25 /* either a kernel change is needed, or we need something like this in kernel */ 26 #ifndef CONFIG_SMP 27 #include <asm/smp.h> 28 #undef cpu_physical_id 29 #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) 30 #endif 31 32 #include "dma.h" 33 #include "registers.h" 34 #include "dma_v2.h" 35 36 /* 37 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 38 * contain the bit number of the APIC ID to map into the DCA tag. If the valid 39 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. 40 */ 41 #define DCA_TAG_MAP_VALID 0x80 42 43 #define DCA3_TAG_MAP_BIT_TO_INV 0x80 44 #define DCA3_TAG_MAP_BIT_TO_SEL 0x40 45 #define DCA3_TAG_MAP_LITERAL_VAL 0x1 46 47 #define DCA_TAG_MAP_MASK 0xDF 48 49 /* expected tag map bytes for I/OAT ver.2 */ 50 #define DCA2_TAG_MAP_BYTE0 0x80 51 #define DCA2_TAG_MAP_BYTE1 0x0 52 #define DCA2_TAG_MAP_BYTE2 0x81 53 #define DCA2_TAG_MAP_BYTE3 0x82 54 #define DCA2_TAG_MAP_BYTE4 0x82 55 56 /* verify if tag map matches expected values */ 57 static inline int dca2_tag_map_valid(u8 *tag_map) 58 { 59 return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) && 60 (tag_map[1] == DCA2_TAG_MAP_BYTE1) && 61 (tag_map[2] == DCA2_TAG_MAP_BYTE2) && 62 (tag_map[3] == DCA2_TAG_MAP_BYTE3) && 63 (tag_map[4] == DCA2_TAG_MAP_BYTE4)); 64 } 65 66 /* 67 * "Legacy" DCA systems do not implement the DCA register set in the 68 * I/OAT device. Software needs direct support for their tag mappings. 69 */ 70 71 #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) 72 #define IOAT_TAG_MAP_LEN 8 73 74 static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { 75 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; 76 static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { 77 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; 78 static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { 79 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; 80 static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; 81 82 /* pack PCI B/D/F into a u16 */ 83 static inline u16 dcaid_from_pcidev(struct pci_dev *pci) 84 { 85 return (pci->bus->number << 8) | pci->devfn; 86 } 87 88 static int dca_enabled_in_bios(struct pci_dev *pdev) 89 { 90 /* CPUID level 9 returns DCA configuration */ 91 /* Bit 0 indicates DCA enabled by the BIOS */ 92 unsigned long cpuid_level_9; 93 int res; 94 95 cpuid_level_9 = cpuid_eax(9); 96 res = test_bit(0, &cpuid_level_9); 97 if (!res) 98 dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n"); 99 100 return res; 101 } 102 103 int system_has_dca_enabled(struct pci_dev *pdev) 104 { 105 if (boot_cpu_has(X86_FEATURE_DCA)) 106 return dca_enabled_in_bios(pdev); 107 108 dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); 109 return 0; 110 } 111 112 struct ioat_dca_slot { 113 struct pci_dev *pdev; /* requester device */ 114 u16 rid; /* requester id, as used by IOAT */ 115 }; 116 117 #define IOAT_DCA_MAX_REQ 6 118 #define IOAT3_DCA_MAX_REQ 2 119 120 struct ioat_dca_priv { 121 void __iomem *iobase; 122 void __iomem *dca_base; 123 int max_requesters; 124 int requester_count; 125 u8 tag_map[IOAT_TAG_MAP_LEN]; 126 struct ioat_dca_slot req_slots[0]; 127 }; 128 129 /* 5000 series chipset DCA Port Requester ID Table Entry Format 130 * [15:8] PCI-Express Bus Number 131 * [7:3] PCI-Express Device Number 132 * [2:0] PCI-Express Function Number 133 * 134 * 5000 series chipset DCA control register format 135 * [7:1] Reserved (0) 136 * [0] Ignore Function Number 137 */ 138 139 static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) 140 { 141 struct ioat_dca_priv *ioatdca = dca_priv(dca); 142 struct pci_dev *pdev; 143 int i; 144 u16 id; 145 146 /* This implementation only supports PCI-Express */ 147 if (!dev_is_pci(dev)) 148 return -ENODEV; 149 pdev = to_pci_dev(dev); 150 id = dcaid_from_pcidev(pdev); 151 152 if (ioatdca->requester_count == ioatdca->max_requesters) 153 return -ENODEV; 154 155 for (i = 0; i < ioatdca->max_requesters; i++) { 156 if (ioatdca->req_slots[i].pdev == NULL) { 157 /* found an empty slot */ 158 ioatdca->requester_count++; 159 ioatdca->req_slots[i].pdev = pdev; 160 ioatdca->req_slots[i].rid = id; 161 writew(id, ioatdca->dca_base + (i * 4)); 162 /* make sure the ignore function bit is off */ 163 writeb(0, ioatdca->dca_base + (i * 4) + 2); 164 return i; 165 } 166 } 167 /* Error, ioatdma->requester_count is out of whack */ 168 return -EFAULT; 169 } 170 171 static int ioat_dca_remove_requester(struct dca_provider *dca, 172 struct device *dev) 173 { 174 struct ioat_dca_priv *ioatdca = dca_priv(dca); 175 struct pci_dev *pdev; 176 int i; 177 178 /* This implementation only supports PCI-Express */ 179 if (!dev_is_pci(dev)) 180 return -ENODEV; 181 pdev = to_pci_dev(dev); 182 183 for (i = 0; i < ioatdca->max_requesters; i++) { 184 if (ioatdca->req_slots[i].pdev == pdev) { 185 writew(0, ioatdca->dca_base + (i * 4)); 186 ioatdca->req_slots[i].pdev = NULL; 187 ioatdca->req_slots[i].rid = 0; 188 ioatdca->requester_count--; 189 return i; 190 } 191 } 192 return -ENODEV; 193 } 194 195 static u8 ioat_dca_get_tag(struct dca_provider *dca, 196 struct device *dev, 197 int cpu) 198 { 199 struct ioat_dca_priv *ioatdca = dca_priv(dca); 200 int i, apic_id, bit, value; 201 u8 entry, tag; 202 203 tag = 0; 204 apic_id = cpu_physical_id(cpu); 205 206 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { 207 entry = ioatdca->tag_map[i]; 208 if (entry & DCA_TAG_MAP_VALID) { 209 bit = entry & ~DCA_TAG_MAP_VALID; 210 value = (apic_id & (1 << bit)) ? 1 : 0; 211 } else { 212 value = entry ? 1 : 0; 213 } 214 tag |= (value << i); 215 } 216 return tag; 217 } 218 219 static int ioat_dca_dev_managed(struct dca_provider *dca, 220 struct device *dev) 221 { 222 struct ioat_dca_priv *ioatdca = dca_priv(dca); 223 struct pci_dev *pdev; 224 int i; 225 226 pdev = to_pci_dev(dev); 227 for (i = 0; i < ioatdca->max_requesters; i++) { 228 if (ioatdca->req_slots[i].pdev == pdev) 229 return 1; 230 } 231 return 0; 232 } 233 234 static struct dca_ops ioat_dca_ops = { 235 .add_requester = ioat_dca_add_requester, 236 .remove_requester = ioat_dca_remove_requester, 237 .get_tag = ioat_dca_get_tag, 238 .dev_managed = ioat_dca_dev_managed, 239 }; 240 241 242 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) 243 { 244 struct dca_provider *dca; 245 struct ioat_dca_priv *ioatdca; 246 u8 *tag_map = NULL; 247 int i; 248 int err; 249 u8 version; 250 u8 max_requesters; 251 252 if (!system_has_dca_enabled(pdev)) 253 return NULL; 254 255 /* I/OAT v1 systems must have a known tag_map to support DCA */ 256 switch (pdev->vendor) { 257 case PCI_VENDOR_ID_INTEL: 258 switch (pdev->device) { 259 case PCI_DEVICE_ID_INTEL_IOAT: 260 tag_map = ioat_tag_map_BNB; 261 break; 262 case PCI_DEVICE_ID_INTEL_IOAT_CNB: 263 tag_map = ioat_tag_map_CNB; 264 break; 265 case PCI_DEVICE_ID_INTEL_IOAT_SCNB: 266 tag_map = ioat_tag_map_SCNB; 267 break; 268 } 269 break; 270 case PCI_VENDOR_ID_UNISYS: 271 switch (pdev->device) { 272 case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: 273 tag_map = ioat_tag_map_UNISYS; 274 break; 275 } 276 break; 277 } 278 if (tag_map == NULL) 279 return NULL; 280 281 version = readb(iobase + IOAT_VER_OFFSET); 282 if (version == IOAT_VER_3_0) 283 max_requesters = IOAT3_DCA_MAX_REQ; 284 else 285 max_requesters = IOAT_DCA_MAX_REQ; 286 287 dca = alloc_dca_provider(&ioat_dca_ops, 288 sizeof(*ioatdca) + 289 (sizeof(struct ioat_dca_slot) * max_requesters)); 290 if (!dca) 291 return NULL; 292 293 ioatdca = dca_priv(dca); 294 ioatdca->max_requesters = max_requesters; 295 ioatdca->dca_base = iobase + 0x54; 296 297 /* copy over the APIC ID to DCA tag mapping */ 298 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) 299 ioatdca->tag_map[i] = tag_map[i]; 300 301 err = register_dca_provider(dca, &pdev->dev); 302 if (err) { 303 free_dca_provider(dca); 304 return NULL; 305 } 306 307 return dca; 308 } 309 310 311 static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) 312 { 313 struct ioat_dca_priv *ioatdca = dca_priv(dca); 314 struct pci_dev *pdev; 315 int i; 316 u16 id; 317 u16 global_req_table; 318 319 /* This implementation only supports PCI-Express */ 320 if (!dev_is_pci(dev)) 321 return -ENODEV; 322 pdev = to_pci_dev(dev); 323 id = dcaid_from_pcidev(pdev); 324 325 if (ioatdca->requester_count == ioatdca->max_requesters) 326 return -ENODEV; 327 328 for (i = 0; i < ioatdca->max_requesters; i++) { 329 if (ioatdca->req_slots[i].pdev == NULL) { 330 /* found an empty slot */ 331 ioatdca->requester_count++; 332 ioatdca->req_slots[i].pdev = pdev; 333 ioatdca->req_slots[i].rid = id; 334 global_req_table = 335 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); 336 writel(id | IOAT_DCA_GREQID_VALID, 337 ioatdca->iobase + global_req_table + (i * 4)); 338 return i; 339 } 340 } 341 /* Error, ioatdma->requester_count is out of whack */ 342 return -EFAULT; 343 } 344 345 static int ioat2_dca_remove_requester(struct dca_provider *dca, 346 struct device *dev) 347 { 348 struct ioat_dca_priv *ioatdca = dca_priv(dca); 349 struct pci_dev *pdev; 350 int i; 351 u16 global_req_table; 352 353 /* This implementation only supports PCI-Express */ 354 if (!dev_is_pci(dev)) 355 return -ENODEV; 356 pdev = to_pci_dev(dev); 357 358 for (i = 0; i < ioatdca->max_requesters; i++) { 359 if (ioatdca->req_slots[i].pdev == pdev) { 360 global_req_table = 361 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); 362 writel(0, ioatdca->iobase + global_req_table + (i * 4)); 363 ioatdca->req_slots[i].pdev = NULL; 364 ioatdca->req_slots[i].rid = 0; 365 ioatdca->requester_count--; 366 return i; 367 } 368 } 369 return -ENODEV; 370 } 371 372 static u8 ioat2_dca_get_tag(struct dca_provider *dca, 373 struct device *dev, 374 int cpu) 375 { 376 u8 tag; 377 378 tag = ioat_dca_get_tag(dca, dev, cpu); 379 tag = (~tag) & 0x1F; 380 return tag; 381 } 382 383 static struct dca_ops ioat2_dca_ops = { 384 .add_requester = ioat2_dca_add_requester, 385 .remove_requester = ioat2_dca_remove_requester, 386 .get_tag = ioat2_dca_get_tag, 387 .dev_managed = ioat_dca_dev_managed, 388 }; 389 390 static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) 391 { 392 int slots = 0; 393 u32 req; 394 u16 global_req_table; 395 396 global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); 397 if (global_req_table == 0) 398 return 0; 399 do { 400 req = readl(iobase + global_req_table + (slots * sizeof(u32))); 401 slots++; 402 } while ((req & IOAT_DCA_GREQID_LASTID) == 0); 403 404 return slots; 405 } 406 407 struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) 408 { 409 struct dca_provider *dca; 410 struct ioat_dca_priv *ioatdca; 411 int slots; 412 int i; 413 int err; 414 u32 tag_map; 415 u16 dca_offset; 416 u16 csi_fsb_control; 417 u16 pcie_control; 418 u8 bit; 419 420 if (!system_has_dca_enabled(pdev)) 421 return NULL; 422 423 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); 424 if (dca_offset == 0) 425 return NULL; 426 427 slots = ioat2_dca_count_dca_slots(iobase, dca_offset); 428 if (slots == 0) 429 return NULL; 430 431 dca = alloc_dca_provider(&ioat2_dca_ops, 432 sizeof(*ioatdca) 433 + (sizeof(struct ioat_dca_slot) * slots)); 434 if (!dca) 435 return NULL; 436 437 ioatdca = dca_priv(dca); 438 ioatdca->iobase = iobase; 439 ioatdca->dca_base = iobase + dca_offset; 440 ioatdca->max_requesters = slots; 441 442 /* some bios might not know to turn these on */ 443 csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); 444 if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { 445 csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; 446 writew(csi_fsb_control, 447 ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); 448 } 449 pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); 450 if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { 451 pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; 452 writew(pcie_control, 453 ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); 454 } 455 456 457 /* TODO version, compatibility and configuration checks */ 458 459 /* copy out the APIC to DCA tag map */ 460 tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); 461 for (i = 0; i < 5; i++) { 462 bit = (tag_map >> (4 * i)) & 0x0f; 463 if (bit < 8) 464 ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; 465 else 466 ioatdca->tag_map[i] = 0; 467 } 468 469 if (!dca2_tag_map_valid(ioatdca->tag_map)) { 470 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, 471 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", 472 dev_driver_string(&pdev->dev), 473 dev_name(&pdev->dev)); 474 free_dca_provider(dca); 475 return NULL; 476 } 477 478 err = register_dca_provider(dca, &pdev->dev); 479 if (err) { 480 free_dca_provider(dca); 481 return NULL; 482 } 483 484 return dca; 485 } 486 487 static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) 488 { 489 struct ioat_dca_priv *ioatdca = dca_priv(dca); 490 struct pci_dev *pdev; 491 int i; 492 u16 id; 493 u16 global_req_table; 494 495 /* This implementation only supports PCI-Express */ 496 if (!dev_is_pci(dev)) 497 return -ENODEV; 498 pdev = to_pci_dev(dev); 499 id = dcaid_from_pcidev(pdev); 500 501 if (ioatdca->requester_count == ioatdca->max_requesters) 502 return -ENODEV; 503 504 for (i = 0; i < ioatdca->max_requesters; i++) { 505 if (ioatdca->req_slots[i].pdev == NULL) { 506 /* found an empty slot */ 507 ioatdca->requester_count++; 508 ioatdca->req_slots[i].pdev = pdev; 509 ioatdca->req_slots[i].rid = id; 510 global_req_table = 511 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); 512 writel(id | IOAT_DCA_GREQID_VALID, 513 ioatdca->iobase + global_req_table + (i * 4)); 514 return i; 515 } 516 } 517 /* Error, ioatdma->requester_count is out of whack */ 518 return -EFAULT; 519 } 520 521 static int ioat3_dca_remove_requester(struct dca_provider *dca, 522 struct device *dev) 523 { 524 struct ioat_dca_priv *ioatdca = dca_priv(dca); 525 struct pci_dev *pdev; 526 int i; 527 u16 global_req_table; 528 529 /* This implementation only supports PCI-Express */ 530 if (!dev_is_pci(dev)) 531 return -ENODEV; 532 pdev = to_pci_dev(dev); 533 534 for (i = 0; i < ioatdca->max_requesters; i++) { 535 if (ioatdca->req_slots[i].pdev == pdev) { 536 global_req_table = 537 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET); 538 writel(0, ioatdca->iobase + global_req_table + (i * 4)); 539 ioatdca->req_slots[i].pdev = NULL; 540 ioatdca->req_slots[i].rid = 0; 541 ioatdca->requester_count--; 542 return i; 543 } 544 } 545 return -ENODEV; 546 } 547 548 static u8 ioat3_dca_get_tag(struct dca_provider *dca, 549 struct device *dev, 550 int cpu) 551 { 552 u8 tag; 553 554 struct ioat_dca_priv *ioatdca = dca_priv(dca); 555 int i, apic_id, bit, value; 556 u8 entry; 557 558 tag = 0; 559 apic_id = cpu_physical_id(cpu); 560 561 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { 562 entry = ioatdca->tag_map[i]; 563 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) { 564 bit = entry & 565 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV); 566 value = (apic_id & (1 << bit)) ? 1 : 0; 567 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) { 568 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV; 569 value = (apic_id & (1 << bit)) ? 0 : 1; 570 } else { 571 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0; 572 } 573 tag |= (value << i); 574 } 575 576 return tag; 577 } 578 579 static struct dca_ops ioat3_dca_ops = { 580 .add_requester = ioat3_dca_add_requester, 581 .remove_requester = ioat3_dca_remove_requester, 582 .get_tag = ioat3_dca_get_tag, 583 .dev_managed = ioat_dca_dev_managed, 584 }; 585 586 static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) 587 { 588 int slots = 0; 589 u32 req; 590 u16 global_req_table; 591 592 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET); 593 if (global_req_table == 0) 594 return 0; 595 596 do { 597 req = readl(iobase + global_req_table + (slots * sizeof(u32))); 598 slots++; 599 } while ((req & IOAT_DCA_GREQID_LASTID) == 0); 600 601 return slots; 602 } 603 604 static inline int dca3_tag_map_invalid(u8 *tag_map) 605 { 606 /* 607 * If the tag map is not programmed by the BIOS the default is: 608 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00 609 * 610 * This an invalid map and will result in only 2 possible tags 611 * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that 612 * this entire definition is invalid. 613 */ 614 return ((tag_map[0] == DCA_TAG_MAP_VALID) && 615 (tag_map[1] == DCA_TAG_MAP_VALID) && 616 (tag_map[2] == DCA_TAG_MAP_VALID) && 617 (tag_map[3] == DCA_TAG_MAP_VALID) && 618 (tag_map[4] == DCA_TAG_MAP_VALID)); 619 } 620 621 struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) 622 { 623 struct dca_provider *dca; 624 struct ioat_dca_priv *ioatdca; 625 int slots; 626 int i; 627 int err; 628 u16 dca_offset; 629 u16 csi_fsb_control; 630 u16 pcie_control; 631 u8 bit; 632 633 union { 634 u64 full; 635 struct { 636 u32 low; 637 u32 high; 638 }; 639 } tag_map; 640 641 if (!system_has_dca_enabled(pdev)) 642 return NULL; 643 644 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); 645 if (dca_offset == 0) 646 return NULL; 647 648 slots = ioat3_dca_count_dca_slots(iobase, dca_offset); 649 if (slots == 0) 650 return NULL; 651 652 dca = alloc_dca_provider(&ioat3_dca_ops, 653 sizeof(*ioatdca) 654 + (sizeof(struct ioat_dca_slot) * slots)); 655 if (!dca) 656 return NULL; 657 658 ioatdca = dca_priv(dca); 659 ioatdca->iobase = iobase; 660 ioatdca->dca_base = iobase + dca_offset; 661 ioatdca->max_requesters = slots; 662 663 /* some bios might not know to turn these on */ 664 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); 665 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) { 666 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH; 667 writew(csi_fsb_control, 668 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET); 669 } 670 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); 671 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) { 672 pcie_control |= IOAT3_PCI_CONTROL_MEMWR; 673 writew(pcie_control, 674 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET); 675 } 676 677 678 /* TODO version, compatibility and configuration checks */ 679 680 /* copy out the APIC to DCA tag map */ 681 tag_map.low = 682 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW); 683 tag_map.high = 684 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH); 685 for (i = 0; i < 8; i++) { 686 bit = tag_map.full >> (8 * i); 687 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; 688 } 689 690 if (dca3_tag_map_invalid(ioatdca->tag_map)) { 691 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, 692 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", 693 dev_driver_string(&pdev->dev), 694 dev_name(&pdev->dev)); 695 free_dca_provider(dca); 696 return NULL; 697 } 698 699 err = register_dca_provider(dca, &pdev->dev); 700 if (err) { 701 free_dca_provider(dca); 702 return NULL; 703 } 704 705 return dca; 706 } 707