xref: /openbmc/linux/drivers/dma/ioat/dca.c (revision 6774def6)
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2007 - 2009 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/pci.h>
25 #include <linux/smp.h>
26 #include <linux/interrupt.h>
27 #include <linux/dca.h>
28 
29 /* either a kernel change is needed, or we need something like this in kernel */
30 #ifndef CONFIG_SMP
31 #include <asm/smp.h>
32 #undef cpu_physical_id
33 #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
34 #endif
35 
36 #include "dma.h"
37 #include "registers.h"
38 #include "dma_v2.h"
39 
40 /*
41  * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
42  * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
43  * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
44  */
45 #define DCA_TAG_MAP_VALID 0x80
46 
47 #define DCA3_TAG_MAP_BIT_TO_INV 0x80
48 #define DCA3_TAG_MAP_BIT_TO_SEL 0x40
49 #define DCA3_TAG_MAP_LITERAL_VAL 0x1
50 
51 #define DCA_TAG_MAP_MASK 0xDF
52 
53 /* expected tag map bytes for I/OAT ver.2 */
54 #define DCA2_TAG_MAP_BYTE0 0x80
55 #define DCA2_TAG_MAP_BYTE1 0x0
56 #define DCA2_TAG_MAP_BYTE2 0x81
57 #define DCA2_TAG_MAP_BYTE3 0x82
58 #define DCA2_TAG_MAP_BYTE4 0x82
59 
60 /* verify if tag map matches expected values */
61 static inline int dca2_tag_map_valid(u8 *tag_map)
62 {
63 	return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
64 		(tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
65 		(tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
66 		(tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
67 		(tag_map[4] == DCA2_TAG_MAP_BYTE4));
68 }
69 
70 /*
71  * "Legacy" DCA systems do not implement the DCA register set in the
72  * I/OAT device.  Software needs direct support for their tag mappings.
73  */
74 
75 #define APICID_BIT(x)		(DCA_TAG_MAP_VALID | (x))
76 #define IOAT_TAG_MAP_LEN	8
77 
78 static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
79 	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
80 static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
81 	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
82 static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
83 	1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
84 static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
85 
86 /* pack PCI B/D/F into a u16 */
87 static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
88 {
89 	return (pci->bus->number << 8) | pci->devfn;
90 }
91 
92 static int dca_enabled_in_bios(struct pci_dev *pdev)
93 {
94 	/* CPUID level 9 returns DCA configuration */
95 	/* Bit 0 indicates DCA enabled by the BIOS */
96 	unsigned long cpuid_level_9;
97 	int res;
98 
99 	cpuid_level_9 = cpuid_eax(9);
100 	res = test_bit(0, &cpuid_level_9);
101 	if (!res)
102 		dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
103 
104 	return res;
105 }
106 
107 int system_has_dca_enabled(struct pci_dev *pdev)
108 {
109 	if (boot_cpu_has(X86_FEATURE_DCA))
110 		return dca_enabled_in_bios(pdev);
111 
112 	dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
113 	return 0;
114 }
115 
116 struct ioat_dca_slot {
117 	struct pci_dev *pdev;	/* requester device */
118 	u16 rid;		/* requester id, as used by IOAT */
119 };
120 
121 #define IOAT_DCA_MAX_REQ 6
122 #define IOAT3_DCA_MAX_REQ 2
123 
124 struct ioat_dca_priv {
125 	void __iomem		*iobase;
126 	void __iomem		*dca_base;
127 	int			 max_requesters;
128 	int			 requester_count;
129 	u8			 tag_map[IOAT_TAG_MAP_LEN];
130 	struct ioat_dca_slot 	 req_slots[0];
131 };
132 
133 /* 5000 series chipset DCA Port Requester ID Table Entry Format
134  * [15:8]	PCI-Express Bus Number
135  * [7:3]	PCI-Express Device Number
136  * [2:0]	PCI-Express Function Number
137  *
138  * 5000 series chipset DCA control register format
139  * [7:1]	Reserved (0)
140  * [0]		Ignore Function Number
141  */
142 
143 static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
144 {
145 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
146 	struct pci_dev *pdev;
147 	int i;
148 	u16 id;
149 
150 	/* This implementation only supports PCI-Express */
151 	if (!dev_is_pci(dev))
152 		return -ENODEV;
153 	pdev = to_pci_dev(dev);
154 	id = dcaid_from_pcidev(pdev);
155 
156 	if (ioatdca->requester_count == ioatdca->max_requesters)
157 		return -ENODEV;
158 
159 	for (i = 0; i < ioatdca->max_requesters; i++) {
160 		if (ioatdca->req_slots[i].pdev == NULL) {
161 			/* found an empty slot */
162 			ioatdca->requester_count++;
163 			ioatdca->req_slots[i].pdev = pdev;
164 			ioatdca->req_slots[i].rid = id;
165 			writew(id, ioatdca->dca_base + (i * 4));
166 			/* make sure the ignore function bit is off */
167 			writeb(0, ioatdca->dca_base + (i * 4) + 2);
168 			return i;
169 		}
170 	}
171 	/* Error, ioatdma->requester_count is out of whack */
172 	return -EFAULT;
173 }
174 
175 static int ioat_dca_remove_requester(struct dca_provider *dca,
176 				     struct device *dev)
177 {
178 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
179 	struct pci_dev *pdev;
180 	int i;
181 
182 	/* This implementation only supports PCI-Express */
183 	if (!dev_is_pci(dev))
184 		return -ENODEV;
185 	pdev = to_pci_dev(dev);
186 
187 	for (i = 0; i < ioatdca->max_requesters; i++) {
188 		if (ioatdca->req_slots[i].pdev == pdev) {
189 			writew(0, ioatdca->dca_base + (i * 4));
190 			ioatdca->req_slots[i].pdev = NULL;
191 			ioatdca->req_slots[i].rid = 0;
192 			ioatdca->requester_count--;
193 			return i;
194 		}
195 	}
196 	return -ENODEV;
197 }
198 
199 static u8 ioat_dca_get_tag(struct dca_provider *dca,
200 			   struct device *dev,
201 			   int cpu)
202 {
203 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
204 	int i, apic_id, bit, value;
205 	u8 entry, tag;
206 
207 	tag = 0;
208 	apic_id = cpu_physical_id(cpu);
209 
210 	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
211 		entry = ioatdca->tag_map[i];
212 		if (entry & DCA_TAG_MAP_VALID) {
213 			bit = entry & ~DCA_TAG_MAP_VALID;
214 			value = (apic_id & (1 << bit)) ? 1 : 0;
215 		} else {
216 			value = entry ? 1 : 0;
217 		}
218 		tag |= (value << i);
219 	}
220 	return tag;
221 }
222 
223 static int ioat_dca_dev_managed(struct dca_provider *dca,
224 				struct device *dev)
225 {
226 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
227 	struct pci_dev *pdev;
228 	int i;
229 
230 	pdev = to_pci_dev(dev);
231 	for (i = 0; i < ioatdca->max_requesters; i++) {
232 		if (ioatdca->req_slots[i].pdev == pdev)
233 			return 1;
234 	}
235 	return 0;
236 }
237 
238 static struct dca_ops ioat_dca_ops = {
239 	.add_requester		= ioat_dca_add_requester,
240 	.remove_requester	= ioat_dca_remove_requester,
241 	.get_tag		= ioat_dca_get_tag,
242 	.dev_managed		= ioat_dca_dev_managed,
243 };
244 
245 
246 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
247 {
248 	struct dca_provider *dca;
249 	struct ioat_dca_priv *ioatdca;
250 	u8 *tag_map = NULL;
251 	int i;
252 	int err;
253 	u8 version;
254 	u8 max_requesters;
255 
256 	if (!system_has_dca_enabled(pdev))
257 		return NULL;
258 
259 	/* I/OAT v1 systems must have a known tag_map to support DCA */
260 	switch (pdev->vendor) {
261 	case PCI_VENDOR_ID_INTEL:
262 		switch (pdev->device) {
263 		case PCI_DEVICE_ID_INTEL_IOAT:
264 			tag_map = ioat_tag_map_BNB;
265 			break;
266 		case PCI_DEVICE_ID_INTEL_IOAT_CNB:
267 			tag_map = ioat_tag_map_CNB;
268 			break;
269 		case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
270 			tag_map = ioat_tag_map_SCNB;
271 			break;
272 		}
273 		break;
274 	case PCI_VENDOR_ID_UNISYS:
275 		switch (pdev->device) {
276 		case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
277 			tag_map = ioat_tag_map_UNISYS;
278 			break;
279 		}
280 		break;
281 	}
282 	if (tag_map == NULL)
283 		return NULL;
284 
285 	version = readb(iobase + IOAT_VER_OFFSET);
286 	if (version == IOAT_VER_3_0)
287 		max_requesters = IOAT3_DCA_MAX_REQ;
288 	else
289 		max_requesters = IOAT_DCA_MAX_REQ;
290 
291 	dca = alloc_dca_provider(&ioat_dca_ops,
292 			sizeof(*ioatdca) +
293 			(sizeof(struct ioat_dca_slot) * max_requesters));
294 	if (!dca)
295 		return NULL;
296 
297 	ioatdca = dca_priv(dca);
298 	ioatdca->max_requesters = max_requesters;
299 	ioatdca->dca_base = iobase + 0x54;
300 
301 	/* copy over the APIC ID to DCA tag mapping */
302 	for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
303 		ioatdca->tag_map[i] = tag_map[i];
304 
305 	err = register_dca_provider(dca, &pdev->dev);
306 	if (err) {
307 		free_dca_provider(dca);
308 		return NULL;
309 	}
310 
311 	return dca;
312 }
313 
314 
315 static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
316 {
317 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
318 	struct pci_dev *pdev;
319 	int i;
320 	u16 id;
321 	u16 global_req_table;
322 
323 	/* This implementation only supports PCI-Express */
324 	if (!dev_is_pci(dev))
325 		return -ENODEV;
326 	pdev = to_pci_dev(dev);
327 	id = dcaid_from_pcidev(pdev);
328 
329 	if (ioatdca->requester_count == ioatdca->max_requesters)
330 		return -ENODEV;
331 
332 	for (i = 0; i < ioatdca->max_requesters; i++) {
333 		if (ioatdca->req_slots[i].pdev == NULL) {
334 			/* found an empty slot */
335 			ioatdca->requester_count++;
336 			ioatdca->req_slots[i].pdev = pdev;
337 			ioatdca->req_slots[i].rid = id;
338 			global_req_table =
339 			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
340 			writel(id | IOAT_DCA_GREQID_VALID,
341 			       ioatdca->iobase + global_req_table + (i * 4));
342 			return i;
343 		}
344 	}
345 	/* Error, ioatdma->requester_count is out of whack */
346 	return -EFAULT;
347 }
348 
349 static int ioat2_dca_remove_requester(struct dca_provider *dca,
350 				      struct device *dev)
351 {
352 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
353 	struct pci_dev *pdev;
354 	int i;
355 	u16 global_req_table;
356 
357 	/* This implementation only supports PCI-Express */
358 	if (!dev_is_pci(dev))
359 		return -ENODEV;
360 	pdev = to_pci_dev(dev);
361 
362 	for (i = 0; i < ioatdca->max_requesters; i++) {
363 		if (ioatdca->req_slots[i].pdev == pdev) {
364 			global_req_table =
365 			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
366 			writel(0, ioatdca->iobase + global_req_table + (i * 4));
367 			ioatdca->req_slots[i].pdev = NULL;
368 			ioatdca->req_slots[i].rid = 0;
369 			ioatdca->requester_count--;
370 			return i;
371 		}
372 	}
373 	return -ENODEV;
374 }
375 
376 static u8 ioat2_dca_get_tag(struct dca_provider *dca,
377 			    struct device *dev,
378 			    int cpu)
379 {
380 	u8 tag;
381 
382 	tag = ioat_dca_get_tag(dca, dev, cpu);
383 	tag = (~tag) & 0x1F;
384 	return tag;
385 }
386 
387 static struct dca_ops ioat2_dca_ops = {
388 	.add_requester		= ioat2_dca_add_requester,
389 	.remove_requester	= ioat2_dca_remove_requester,
390 	.get_tag		= ioat2_dca_get_tag,
391 	.dev_managed		= ioat_dca_dev_managed,
392 };
393 
394 static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
395 {
396 	int slots = 0;
397 	u32 req;
398 	u16 global_req_table;
399 
400 	global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
401 	if (global_req_table == 0)
402 		return 0;
403 	do {
404 		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
405 		slots++;
406 	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
407 
408 	return slots;
409 }
410 
411 struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
412 {
413 	struct dca_provider *dca;
414 	struct ioat_dca_priv *ioatdca;
415 	int slots;
416 	int i;
417 	int err;
418 	u32 tag_map;
419 	u16 dca_offset;
420 	u16 csi_fsb_control;
421 	u16 pcie_control;
422 	u8 bit;
423 
424 	if (!system_has_dca_enabled(pdev))
425 		return NULL;
426 
427 	dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
428 	if (dca_offset == 0)
429 		return NULL;
430 
431 	slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
432 	if (slots == 0)
433 		return NULL;
434 
435 	dca = alloc_dca_provider(&ioat2_dca_ops,
436 				 sizeof(*ioatdca)
437 				      + (sizeof(struct ioat_dca_slot) * slots));
438 	if (!dca)
439 		return NULL;
440 
441 	ioatdca = dca_priv(dca);
442 	ioatdca->iobase = iobase;
443 	ioatdca->dca_base = iobase + dca_offset;
444 	ioatdca->max_requesters = slots;
445 
446 	/* some bios might not know to turn these on */
447 	csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
448 	if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
449 		csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
450 		writew(csi_fsb_control,
451 		       ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
452 	}
453 	pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
454 	if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
455 		pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
456 		writew(pcie_control,
457 		       ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
458 	}
459 
460 
461 	/* TODO version, compatibility and configuration checks */
462 
463 	/* copy out the APIC to DCA tag map */
464 	tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
465 	for (i = 0; i < 5; i++) {
466 		bit = (tag_map >> (4 * i)) & 0x0f;
467 		if (bit < 8)
468 			ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
469 		else
470 			ioatdca->tag_map[i] = 0;
471 	}
472 
473 	if (!dca2_tag_map_valid(ioatdca->tag_map)) {
474 		WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
475 				"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
476 				dev_driver_string(&pdev->dev),
477 				dev_name(&pdev->dev));
478 		free_dca_provider(dca);
479 		return NULL;
480 	}
481 
482 	err = register_dca_provider(dca, &pdev->dev);
483 	if (err) {
484 		free_dca_provider(dca);
485 		return NULL;
486 	}
487 
488 	return dca;
489 }
490 
491 static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
492 {
493 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
494 	struct pci_dev *pdev;
495 	int i;
496 	u16 id;
497 	u16 global_req_table;
498 
499 	/* This implementation only supports PCI-Express */
500 	if (!dev_is_pci(dev))
501 		return -ENODEV;
502 	pdev = to_pci_dev(dev);
503 	id = dcaid_from_pcidev(pdev);
504 
505 	if (ioatdca->requester_count == ioatdca->max_requesters)
506 		return -ENODEV;
507 
508 	for (i = 0; i < ioatdca->max_requesters; i++) {
509 		if (ioatdca->req_slots[i].pdev == NULL) {
510 			/* found an empty slot */
511 			ioatdca->requester_count++;
512 			ioatdca->req_slots[i].pdev = pdev;
513 			ioatdca->req_slots[i].rid = id;
514 			global_req_table =
515 			      readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
516 			writel(id | IOAT_DCA_GREQID_VALID,
517 			       ioatdca->iobase + global_req_table + (i * 4));
518 			return i;
519 		}
520 	}
521 	/* Error, ioatdma->requester_count is out of whack */
522 	return -EFAULT;
523 }
524 
525 static int ioat3_dca_remove_requester(struct dca_provider *dca,
526 				      struct device *dev)
527 {
528 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
529 	struct pci_dev *pdev;
530 	int i;
531 	u16 global_req_table;
532 
533 	/* This implementation only supports PCI-Express */
534 	if (!dev_is_pci(dev))
535 		return -ENODEV;
536 	pdev = to_pci_dev(dev);
537 
538 	for (i = 0; i < ioatdca->max_requesters; i++) {
539 		if (ioatdca->req_slots[i].pdev == pdev) {
540 			global_req_table =
541 			      readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
542 			writel(0, ioatdca->iobase + global_req_table + (i * 4));
543 			ioatdca->req_slots[i].pdev = NULL;
544 			ioatdca->req_slots[i].rid = 0;
545 			ioatdca->requester_count--;
546 			return i;
547 		}
548 	}
549 	return -ENODEV;
550 }
551 
552 static u8 ioat3_dca_get_tag(struct dca_provider *dca,
553 			    struct device *dev,
554 			    int cpu)
555 {
556 	u8 tag;
557 
558 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
559 	int i, apic_id, bit, value;
560 	u8 entry;
561 
562 	tag = 0;
563 	apic_id = cpu_physical_id(cpu);
564 
565 	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
566 		entry = ioatdca->tag_map[i];
567 		if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
568 			bit = entry &
569 				~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
570 			value = (apic_id & (1 << bit)) ? 1 : 0;
571 		} else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
572 			bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
573 			value = (apic_id & (1 << bit)) ? 0 : 1;
574 		} else {
575 			value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
576 		}
577 		tag |= (value << i);
578 	}
579 
580 	return tag;
581 }
582 
583 static struct dca_ops ioat3_dca_ops = {
584 	.add_requester		= ioat3_dca_add_requester,
585 	.remove_requester	= ioat3_dca_remove_requester,
586 	.get_tag		= ioat3_dca_get_tag,
587 	.dev_managed		= ioat_dca_dev_managed,
588 };
589 
590 static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
591 {
592 	int slots = 0;
593 	u32 req;
594 	u16 global_req_table;
595 
596 	global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
597 	if (global_req_table == 0)
598 		return 0;
599 
600 	do {
601 		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
602 		slots++;
603 	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
604 
605 	return slots;
606 }
607 
608 static inline int dca3_tag_map_invalid(u8 *tag_map)
609 {
610 	/*
611 	 * If the tag map is not programmed by the BIOS the default is:
612 	 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
613 	 *
614 	 * This an invalid map and will result in only 2 possible tags
615 	 * 0x1F and 0x00.  0x00 is an invalid DCA tag so we know that
616 	 * this entire definition is invalid.
617 	 */
618 	return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
619 		(tag_map[1] == DCA_TAG_MAP_VALID) &&
620 		(tag_map[2] == DCA_TAG_MAP_VALID) &&
621 		(tag_map[3] == DCA_TAG_MAP_VALID) &&
622 		(tag_map[4] == DCA_TAG_MAP_VALID));
623 }
624 
625 struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
626 {
627 	struct dca_provider *dca;
628 	struct ioat_dca_priv *ioatdca;
629 	int slots;
630 	int i;
631 	int err;
632 	u16 dca_offset;
633 	u16 csi_fsb_control;
634 	u16 pcie_control;
635 	u8 bit;
636 
637 	union {
638 		u64 full;
639 		struct {
640 			u32 low;
641 			u32 high;
642 		};
643 	} tag_map;
644 
645 	if (!system_has_dca_enabled(pdev))
646 		return NULL;
647 
648 	dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
649 	if (dca_offset == 0)
650 		return NULL;
651 
652 	slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
653 	if (slots == 0)
654 		return NULL;
655 
656 	dca = alloc_dca_provider(&ioat3_dca_ops,
657 				 sizeof(*ioatdca)
658 				      + (sizeof(struct ioat_dca_slot) * slots));
659 	if (!dca)
660 		return NULL;
661 
662 	ioatdca = dca_priv(dca);
663 	ioatdca->iobase = iobase;
664 	ioatdca->dca_base = iobase + dca_offset;
665 	ioatdca->max_requesters = slots;
666 
667 	/* some bios might not know to turn these on */
668 	csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
669 	if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
670 		csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
671 		writew(csi_fsb_control,
672 		       ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
673 	}
674 	pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
675 	if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
676 		pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
677 		writew(pcie_control,
678 		       ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
679 	}
680 
681 
682 	/* TODO version, compatibility and configuration checks */
683 
684 	/* copy out the APIC to DCA tag map */
685 	tag_map.low =
686 		readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
687 	tag_map.high =
688 		readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
689 	for (i = 0; i < 8; i++) {
690 		bit = tag_map.full >> (8 * i);
691 		ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
692 	}
693 
694 	if (dca3_tag_map_invalid(ioatdca->tag_map)) {
695 		WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
696 				"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
697 				dev_driver_string(&pdev->dev),
698 				dev_name(&pdev->dev));
699 		free_dca_provider(dca);
700 		return NULL;
701 	}
702 
703 	err = register_dca_provider(dca, &pdev->dev);
704 	if (err) {
705 		free_dca_provider(dca);
706 		return NULL;
707 	}
708 
709 	return dca;
710 }
711