xref: /openbmc/linux/drivers/char/agp/intel-gtt.c (revision afc98d90)
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17 
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <linux/delay.h>
25 #include <asm/smp.h>
26 #include "agp.h"
27 #include "intel-agp.h"
28 #include <drm/intel-gtt.h>
29 
30 /*
31  * If we have Intel graphics, we're not going to have anything other than
32  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
34  * Only newer chipsets need to bother with this, of course.
35  */
36 #ifdef CONFIG_INTEL_IOMMU
37 #define USE_PCI_DMA_API 1
38 #else
39 #define USE_PCI_DMA_API 0
40 #endif
41 
42 struct intel_gtt_driver {
43 	unsigned int gen : 8;
44 	unsigned int is_g33 : 1;
45 	unsigned int is_pineview : 1;
46 	unsigned int is_ironlake : 1;
47 	unsigned int has_pgtbl_enable : 1;
48 	unsigned int dma_mask_size : 8;
49 	/* Chipset specific GTT setup */
50 	int (*setup)(void);
51 	/* This should undo anything done in ->setup() save the unmapping
52 	 * of the mmio register file, that's done in the generic code. */
53 	void (*cleanup)(void);
54 	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
55 	/* Flags is a more or less chipset specific opaque value.
56 	 * For chipsets that need to support old ums (non-gem) code, this
57 	 * needs to be identical to the various supported agp memory types! */
58 	bool (*check_flags)(unsigned int flags);
59 	void (*chipset_flush)(void);
60 };
61 
62 static struct _intel_private {
63 	const struct intel_gtt_driver *driver;
64 	struct pci_dev *pcidev;	/* device one */
65 	struct pci_dev *bridge_dev;
66 	u8 __iomem *registers;
67 	phys_addr_t gtt_phys_addr;
68 	u32 PGETBL_save;
69 	u32 __iomem *gtt;		/* I915G */
70 	bool clear_fake_agp; /* on first access via agp, fill with scratch */
71 	int num_dcache_entries;
72 	void __iomem *i9xx_flush_page;
73 	char *i81x_gtt_table;
74 	struct resource ifp_resource;
75 	int resource_valid;
76 	struct page *scratch_page;
77 	phys_addr_t scratch_page_dma;
78 	int refcount;
79 	/* Whether i915 needs to use the dmar apis or not. */
80 	unsigned int needs_dmar : 1;
81 	phys_addr_t gma_bus_addr;
82 	/*  Size of memory reserved for graphics by the BIOS */
83 	unsigned int stolen_size;
84 	/* Total number of gtt entries. */
85 	unsigned int gtt_total_entries;
86 	/* Part of the gtt that is mappable by the cpu, for those chips where
87 	 * this is not the full gtt. */
88 	unsigned int gtt_mappable_entries;
89 } intel_private;
90 
91 #define INTEL_GTT_GEN	intel_private.driver->gen
92 #define IS_G33		intel_private.driver->is_g33
93 #define IS_PINEVIEW	intel_private.driver->is_pineview
94 #define IS_IRONLAKE	intel_private.driver->is_ironlake
95 #define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
96 
97 #if IS_ENABLED(CONFIG_AGP_INTEL)
98 static int intel_gtt_map_memory(struct page **pages,
99 				unsigned int num_entries,
100 				struct sg_table *st)
101 {
102 	struct scatterlist *sg;
103 	int i;
104 
105 	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
106 
107 	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
108 		goto err;
109 
110 	for_each_sg(st->sgl, sg, num_entries, i)
111 		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
112 
113 	if (!pci_map_sg(intel_private.pcidev,
114 			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
115 		goto err;
116 
117 	return 0;
118 
119 err:
120 	sg_free_table(st);
121 	return -ENOMEM;
122 }
123 
124 static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
125 {
126 	struct sg_table st;
127 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
128 
129 	pci_unmap_sg(intel_private.pcidev, sg_list,
130 		     num_sg, PCI_DMA_BIDIRECTIONAL);
131 
132 	st.sgl = sg_list;
133 	st.orig_nents = st.nents = num_sg;
134 
135 	sg_free_table(&st);
136 }
137 
138 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
139 {
140 	return;
141 }
142 
143 /* Exists to support ARGB cursors */
144 static struct page *i8xx_alloc_pages(void)
145 {
146 	struct page *page;
147 
148 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
149 	if (page == NULL)
150 		return NULL;
151 
152 	if (set_pages_uc(page, 4) < 0) {
153 		set_pages_wb(page, 4);
154 		__free_pages(page, 2);
155 		return NULL;
156 	}
157 	get_page(page);
158 	atomic_inc(&agp_bridge->current_memory_agp);
159 	return page;
160 }
161 
162 static void i8xx_destroy_pages(struct page *page)
163 {
164 	if (page == NULL)
165 		return;
166 
167 	set_pages_wb(page, 4);
168 	put_page(page);
169 	__free_pages(page, 2);
170 	atomic_dec(&agp_bridge->current_memory_agp);
171 }
172 #endif
173 
174 #define I810_GTT_ORDER 4
175 static int i810_setup(void)
176 {
177 	phys_addr_t reg_addr;
178 	char *gtt_table;
179 
180 	/* i81x does not preallocate the gtt. It's always 64kb in size. */
181 	gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
182 	if (gtt_table == NULL)
183 		return -ENOMEM;
184 	intel_private.i81x_gtt_table = gtt_table;
185 
186 	reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
187 
188 	intel_private.registers = ioremap(reg_addr, KB(64));
189 	if (!intel_private.registers)
190 		return -ENOMEM;
191 
192 	writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
193 	       intel_private.registers+I810_PGETBL_CTL);
194 
195 	intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
196 
197 	if ((readl(intel_private.registers+I810_DRAM_CTL)
198 		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
199 		dev_info(&intel_private.pcidev->dev,
200 			 "detected 4MB dedicated video ram\n");
201 		intel_private.num_dcache_entries = 1024;
202 	}
203 
204 	return 0;
205 }
206 
207 static void i810_cleanup(void)
208 {
209 	writel(0, intel_private.registers+I810_PGETBL_CTL);
210 	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
211 }
212 
213 #if IS_ENABLED(CONFIG_AGP_INTEL)
214 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
215 				      int type)
216 {
217 	int i;
218 
219 	if ((pg_start + mem->page_count)
220 			> intel_private.num_dcache_entries)
221 		return -EINVAL;
222 
223 	if (!mem->is_flushed)
224 		global_cache_flush();
225 
226 	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
227 		dma_addr_t addr = i << PAGE_SHIFT;
228 		intel_private.driver->write_entry(addr,
229 						  i, type);
230 	}
231 	readl(intel_private.gtt+i-1);
232 
233 	return 0;
234 }
235 
236 /*
237  * The i810/i830 requires a physical address to program its mouse
238  * pointer into hardware.
239  * However the Xserver still writes to it through the agp aperture.
240  */
241 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
242 {
243 	struct agp_memory *new;
244 	struct page *page;
245 
246 	switch (pg_count) {
247 	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
248 		break;
249 	case 4:
250 		/* kludge to get 4 physical pages for ARGB cursor */
251 		page = i8xx_alloc_pages();
252 		break;
253 	default:
254 		return NULL;
255 	}
256 
257 	if (page == NULL)
258 		return NULL;
259 
260 	new = agp_create_memory(pg_count);
261 	if (new == NULL)
262 		return NULL;
263 
264 	new->pages[0] = page;
265 	if (pg_count == 4) {
266 		/* kludge to get 4 physical pages for ARGB cursor */
267 		new->pages[1] = new->pages[0] + 1;
268 		new->pages[2] = new->pages[1] + 1;
269 		new->pages[3] = new->pages[2] + 1;
270 	}
271 	new->page_count = pg_count;
272 	new->num_scratch_pages = pg_count;
273 	new->type = AGP_PHYS_MEMORY;
274 	new->physical = page_to_phys(new->pages[0]);
275 	return new;
276 }
277 
278 static void intel_i810_free_by_type(struct agp_memory *curr)
279 {
280 	agp_free_key(curr->key);
281 	if (curr->type == AGP_PHYS_MEMORY) {
282 		if (curr->page_count == 4)
283 			i8xx_destroy_pages(curr->pages[0]);
284 		else {
285 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
286 							     AGP_PAGE_DESTROY_UNMAP);
287 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
288 							     AGP_PAGE_DESTROY_FREE);
289 		}
290 		agp_free_page_array(curr);
291 	}
292 	kfree(curr);
293 }
294 #endif
295 
296 static int intel_gtt_setup_scratch_page(void)
297 {
298 	struct page *page;
299 	dma_addr_t dma_addr;
300 
301 	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
302 	if (page == NULL)
303 		return -ENOMEM;
304 	get_page(page);
305 	set_pages_uc(page, 1);
306 
307 	if (intel_private.needs_dmar) {
308 		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
309 				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
310 		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
311 			return -EINVAL;
312 
313 		intel_private.scratch_page_dma = dma_addr;
314 	} else
315 		intel_private.scratch_page_dma = page_to_phys(page);
316 
317 	intel_private.scratch_page = page;
318 
319 	return 0;
320 }
321 
322 static void i810_write_entry(dma_addr_t addr, unsigned int entry,
323 			     unsigned int flags)
324 {
325 	u32 pte_flags = I810_PTE_VALID;
326 
327 	switch (flags) {
328 	case AGP_DCACHE_MEMORY:
329 		pte_flags |= I810_PTE_LOCAL;
330 		break;
331 	case AGP_USER_CACHED_MEMORY:
332 		pte_flags |= I830_PTE_SYSTEM_CACHED;
333 		break;
334 	}
335 
336 	writel(addr | pte_flags, intel_private.gtt + entry);
337 }
338 
339 static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
340 	{32, 8192, 3},
341 	{64, 16384, 4},
342 	{128, 32768, 5},
343 	{256, 65536, 6},
344 	{512, 131072, 7},
345 };
346 
347 static unsigned int intel_gtt_stolen_size(void)
348 {
349 	u16 gmch_ctrl;
350 	u8 rdct;
351 	int local = 0;
352 	static const int ddt[4] = { 0, 16, 32, 64 };
353 	unsigned int stolen_size = 0;
354 
355 	if (INTEL_GTT_GEN == 1)
356 		return 0; /* no stolen mem on i81x */
357 
358 	pci_read_config_word(intel_private.bridge_dev,
359 			     I830_GMCH_CTRL, &gmch_ctrl);
360 
361 	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
362 	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
363 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
364 		case I830_GMCH_GMS_STOLEN_512:
365 			stolen_size = KB(512);
366 			break;
367 		case I830_GMCH_GMS_STOLEN_1024:
368 			stolen_size = MB(1);
369 			break;
370 		case I830_GMCH_GMS_STOLEN_8192:
371 			stolen_size = MB(8);
372 			break;
373 		case I830_GMCH_GMS_LOCAL:
374 			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
375 			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
376 					MB(ddt[I830_RDRAM_DDT(rdct)]);
377 			local = 1;
378 			break;
379 		default:
380 			stolen_size = 0;
381 			break;
382 		}
383 	} else {
384 		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
385 		case I855_GMCH_GMS_STOLEN_1M:
386 			stolen_size = MB(1);
387 			break;
388 		case I855_GMCH_GMS_STOLEN_4M:
389 			stolen_size = MB(4);
390 			break;
391 		case I855_GMCH_GMS_STOLEN_8M:
392 			stolen_size = MB(8);
393 			break;
394 		case I855_GMCH_GMS_STOLEN_16M:
395 			stolen_size = MB(16);
396 			break;
397 		case I855_GMCH_GMS_STOLEN_32M:
398 			stolen_size = MB(32);
399 			break;
400 		case I915_GMCH_GMS_STOLEN_48M:
401 			stolen_size = MB(48);
402 			break;
403 		case I915_GMCH_GMS_STOLEN_64M:
404 			stolen_size = MB(64);
405 			break;
406 		case G33_GMCH_GMS_STOLEN_128M:
407 			stolen_size = MB(128);
408 			break;
409 		case G33_GMCH_GMS_STOLEN_256M:
410 			stolen_size = MB(256);
411 			break;
412 		case INTEL_GMCH_GMS_STOLEN_96M:
413 			stolen_size = MB(96);
414 			break;
415 		case INTEL_GMCH_GMS_STOLEN_160M:
416 			stolen_size = MB(160);
417 			break;
418 		case INTEL_GMCH_GMS_STOLEN_224M:
419 			stolen_size = MB(224);
420 			break;
421 		case INTEL_GMCH_GMS_STOLEN_352M:
422 			stolen_size = MB(352);
423 			break;
424 		default:
425 			stolen_size = 0;
426 			break;
427 		}
428 	}
429 
430 	if (stolen_size > 0) {
431 		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
432 		       stolen_size / KB(1), local ? "local" : "stolen");
433 	} else {
434 		dev_info(&intel_private.bridge_dev->dev,
435 		       "no pre-allocated video memory detected\n");
436 		stolen_size = 0;
437 	}
438 
439 	return stolen_size;
440 }
441 
442 static void i965_adjust_pgetbl_size(unsigned int size_flag)
443 {
444 	u32 pgetbl_ctl, pgetbl_ctl2;
445 
446 	/* ensure that ppgtt is disabled */
447 	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
448 	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
449 	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
450 
451 	/* write the new ggtt size */
452 	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
453 	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
454 	pgetbl_ctl |= size_flag;
455 	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
456 }
457 
458 static unsigned int i965_gtt_total_entries(void)
459 {
460 	int size;
461 	u32 pgetbl_ctl;
462 	u16 gmch_ctl;
463 
464 	pci_read_config_word(intel_private.bridge_dev,
465 			     I830_GMCH_CTRL, &gmch_ctl);
466 
467 	if (INTEL_GTT_GEN == 5) {
468 		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
469 		case G4x_GMCH_SIZE_1M:
470 		case G4x_GMCH_SIZE_VT_1M:
471 			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
472 			break;
473 		case G4x_GMCH_SIZE_VT_1_5M:
474 			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
475 			break;
476 		case G4x_GMCH_SIZE_2M:
477 		case G4x_GMCH_SIZE_VT_2M:
478 			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
479 			break;
480 		}
481 	}
482 
483 	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
484 
485 	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
486 	case I965_PGETBL_SIZE_128KB:
487 		size = KB(128);
488 		break;
489 	case I965_PGETBL_SIZE_256KB:
490 		size = KB(256);
491 		break;
492 	case I965_PGETBL_SIZE_512KB:
493 		size = KB(512);
494 		break;
495 	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
496 	case I965_PGETBL_SIZE_1MB:
497 		size = KB(1024);
498 		break;
499 	case I965_PGETBL_SIZE_2MB:
500 		size = KB(2048);
501 		break;
502 	case I965_PGETBL_SIZE_1_5MB:
503 		size = KB(1024 + 512);
504 		break;
505 	default:
506 		dev_info(&intel_private.pcidev->dev,
507 			 "unknown page table size, assuming 512KB\n");
508 		size = KB(512);
509 	}
510 
511 	return size/4;
512 }
513 
514 static unsigned int intel_gtt_total_entries(void)
515 {
516 	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
517 		return i965_gtt_total_entries();
518 	else {
519 		/* On previous hardware, the GTT size was just what was
520 		 * required to map the aperture.
521 		 */
522 		return intel_private.gtt_mappable_entries;
523 	}
524 }
525 
526 static unsigned int intel_gtt_mappable_entries(void)
527 {
528 	unsigned int aperture_size;
529 
530 	if (INTEL_GTT_GEN == 1) {
531 		u32 smram_miscc;
532 
533 		pci_read_config_dword(intel_private.bridge_dev,
534 				      I810_SMRAM_MISCC, &smram_miscc);
535 
536 		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
537 				== I810_GFX_MEM_WIN_32M)
538 			aperture_size = MB(32);
539 		else
540 			aperture_size = MB(64);
541 	} else if (INTEL_GTT_GEN == 2) {
542 		u16 gmch_ctrl;
543 
544 		pci_read_config_word(intel_private.bridge_dev,
545 				     I830_GMCH_CTRL, &gmch_ctrl);
546 
547 		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
548 			aperture_size = MB(64);
549 		else
550 			aperture_size = MB(128);
551 	} else {
552 		/* 9xx supports large sizes, just look at the length */
553 		aperture_size = pci_resource_len(intel_private.pcidev, 2);
554 	}
555 
556 	return aperture_size >> PAGE_SHIFT;
557 }
558 
559 static void intel_gtt_teardown_scratch_page(void)
560 {
561 	set_pages_wb(intel_private.scratch_page, 1);
562 	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
563 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
564 	put_page(intel_private.scratch_page);
565 	__free_page(intel_private.scratch_page);
566 }
567 
568 static void intel_gtt_cleanup(void)
569 {
570 	intel_private.driver->cleanup();
571 
572 	iounmap(intel_private.gtt);
573 	iounmap(intel_private.registers);
574 
575 	intel_gtt_teardown_scratch_page();
576 }
577 
578 /* Certain Gen5 chipsets require require idling the GPU before
579  * unmapping anything from the GTT when VT-d is enabled.
580  */
581 static inline int needs_ilk_vtd_wa(void)
582 {
583 #ifdef CONFIG_INTEL_IOMMU
584 	const unsigned short gpu_devid = intel_private.pcidev->device;
585 
586 	/* Query intel_iommu to see if we need the workaround. Presumably that
587 	 * was loaded first.
588 	 */
589 	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
590 	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
591 	     intel_iommu_gfx_mapped)
592 		return 1;
593 #endif
594 	return 0;
595 }
596 
597 static bool intel_gtt_can_wc(void)
598 {
599 	if (INTEL_GTT_GEN <= 2)
600 		return false;
601 
602 	if (INTEL_GTT_GEN >= 6)
603 		return false;
604 
605 	/* Reports of major corruption with ILK vt'd enabled */
606 	if (needs_ilk_vtd_wa())
607 		return false;
608 
609 	return true;
610 }
611 
612 static int intel_gtt_init(void)
613 {
614 	u32 gtt_map_size;
615 	int ret, bar;
616 
617 	ret = intel_private.driver->setup();
618 	if (ret != 0)
619 		return ret;
620 
621 	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
622 	intel_private.gtt_total_entries = intel_gtt_total_entries();
623 
624 	/* save the PGETBL reg for resume */
625 	intel_private.PGETBL_save =
626 		readl(intel_private.registers+I810_PGETBL_CTL)
627 			& ~I810_PGETBL_ENABLED;
628 	/* we only ever restore the register when enabling the PGTBL... */
629 	if (HAS_PGTBL_EN)
630 		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
631 
632 	dev_info(&intel_private.bridge_dev->dev,
633 			"detected gtt size: %dK total, %dK mappable\n",
634 			intel_private.gtt_total_entries * 4,
635 			intel_private.gtt_mappable_entries * 4);
636 
637 	gtt_map_size = intel_private.gtt_total_entries * 4;
638 
639 	intel_private.gtt = NULL;
640 	if (intel_gtt_can_wc())
641 		intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
642 					       gtt_map_size);
643 	if (intel_private.gtt == NULL)
644 		intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
645 					    gtt_map_size);
646 	if (intel_private.gtt == NULL) {
647 		intel_private.driver->cleanup();
648 		iounmap(intel_private.registers);
649 		return -ENOMEM;
650 	}
651 
652 #if IS_ENABLED(CONFIG_AGP_INTEL)
653 	global_cache_flush();   /* FIXME: ? */
654 #endif
655 
656 	intel_private.stolen_size = intel_gtt_stolen_size();
657 
658 	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
659 
660 	ret = intel_gtt_setup_scratch_page();
661 	if (ret != 0) {
662 		intel_gtt_cleanup();
663 		return ret;
664 	}
665 
666 	if (INTEL_GTT_GEN <= 2)
667 		bar = I810_GMADR_BAR;
668 	else
669 		bar = I915_GMADR_BAR;
670 
671 	intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
672 	return 0;
673 }
674 
675 #if IS_ENABLED(CONFIG_AGP_INTEL)
676 static int intel_fake_agp_fetch_size(void)
677 {
678 	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
679 	unsigned int aper_size;
680 	int i;
681 
682 	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
683 
684 	for (i = 0; i < num_sizes; i++) {
685 		if (aper_size == intel_fake_agp_sizes[i].size) {
686 			agp_bridge->current_size =
687 				(void *) (intel_fake_agp_sizes + i);
688 			return aper_size;
689 		}
690 	}
691 
692 	return 0;
693 }
694 #endif
695 
696 static void i830_cleanup(void)
697 {
698 }
699 
700 /* The chipset_flush interface needs to get data that has already been
701  * flushed out of the CPU all the way out to main memory, because the GPU
702  * doesn't snoop those buffers.
703  *
704  * The 8xx series doesn't have the same lovely interface for flushing the
705  * chipset write buffers that the later chips do. According to the 865
706  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
707  * that buffer out, we just fill 1KB and clflush it out, on the assumption
708  * that it'll push whatever was in there out.  It appears to work.
709  */
710 static void i830_chipset_flush(void)
711 {
712 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
713 
714 	/* Forcibly evict everything from the CPU write buffers.
715 	 * clflush appears to be insufficient.
716 	 */
717 	wbinvd_on_all_cpus();
718 
719 	/* Now we've only seen documents for this magic bit on 855GM,
720 	 * we hope it exists for the other gen2 chipsets...
721 	 *
722 	 * Also works as advertised on my 845G.
723 	 */
724 	writel(readl(intel_private.registers+I830_HIC) | (1<<31),
725 	       intel_private.registers+I830_HIC);
726 
727 	while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
728 		if (time_after(jiffies, timeout))
729 			break;
730 
731 		udelay(50);
732 	}
733 }
734 
735 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
736 			     unsigned int flags)
737 {
738 	u32 pte_flags = I810_PTE_VALID;
739 
740 	if (flags ==  AGP_USER_CACHED_MEMORY)
741 		pte_flags |= I830_PTE_SYSTEM_CACHED;
742 
743 	writel(addr | pte_flags, intel_private.gtt + entry);
744 }
745 
746 bool intel_enable_gtt(void)
747 {
748 	u8 __iomem *reg;
749 
750 	if (INTEL_GTT_GEN == 2) {
751 		u16 gmch_ctrl;
752 
753 		pci_read_config_word(intel_private.bridge_dev,
754 				     I830_GMCH_CTRL, &gmch_ctrl);
755 		gmch_ctrl |= I830_GMCH_ENABLED;
756 		pci_write_config_word(intel_private.bridge_dev,
757 				      I830_GMCH_CTRL, gmch_ctrl);
758 
759 		pci_read_config_word(intel_private.bridge_dev,
760 				     I830_GMCH_CTRL, &gmch_ctrl);
761 		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
762 			dev_err(&intel_private.pcidev->dev,
763 				"failed to enable the GTT: GMCH_CTRL=%x\n",
764 				gmch_ctrl);
765 			return false;
766 		}
767 	}
768 
769 	/* On the resume path we may be adjusting the PGTBL value, so
770 	 * be paranoid and flush all chipset write buffers...
771 	 */
772 	if (INTEL_GTT_GEN >= 3)
773 		writel(0, intel_private.registers+GFX_FLSH_CNTL);
774 
775 	reg = intel_private.registers+I810_PGETBL_CTL;
776 	writel(intel_private.PGETBL_save, reg);
777 	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
778 		dev_err(&intel_private.pcidev->dev,
779 			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
780 			readl(reg), intel_private.PGETBL_save);
781 		return false;
782 	}
783 
784 	if (INTEL_GTT_GEN >= 3)
785 		writel(0, intel_private.registers+GFX_FLSH_CNTL);
786 
787 	return true;
788 }
789 EXPORT_SYMBOL(intel_enable_gtt);
790 
791 static int i830_setup(void)
792 {
793 	phys_addr_t reg_addr;
794 
795 	reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
796 
797 	intel_private.registers = ioremap(reg_addr, KB(64));
798 	if (!intel_private.registers)
799 		return -ENOMEM;
800 
801 	intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
802 
803 	return 0;
804 }
805 
806 #if IS_ENABLED(CONFIG_AGP_INTEL)
807 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
808 {
809 	agp_bridge->gatt_table_real = NULL;
810 	agp_bridge->gatt_table = NULL;
811 	agp_bridge->gatt_bus_addr = 0;
812 
813 	return 0;
814 }
815 
816 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
817 {
818 	return 0;
819 }
820 
821 static int intel_fake_agp_configure(void)
822 {
823 	if (!intel_enable_gtt())
824 	    return -EIO;
825 
826 	intel_private.clear_fake_agp = true;
827 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
828 
829 	return 0;
830 }
831 #endif
832 
833 static bool i830_check_flags(unsigned int flags)
834 {
835 	switch (flags) {
836 	case 0:
837 	case AGP_PHYS_MEMORY:
838 	case AGP_USER_CACHED_MEMORY:
839 	case AGP_USER_MEMORY:
840 		return true;
841 	}
842 
843 	return false;
844 }
845 
846 void intel_gtt_insert_sg_entries(struct sg_table *st,
847 				 unsigned int pg_start,
848 				 unsigned int flags)
849 {
850 	struct scatterlist *sg;
851 	unsigned int len, m;
852 	int i, j;
853 
854 	j = pg_start;
855 
856 	/* sg may merge pages, but we have to separate
857 	 * per-page addr for GTT */
858 	for_each_sg(st->sgl, sg, st->nents, i) {
859 		len = sg_dma_len(sg) >> PAGE_SHIFT;
860 		for (m = 0; m < len; m++) {
861 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
862 			intel_private.driver->write_entry(addr, j, flags);
863 			j++;
864 		}
865 	}
866 	readl(intel_private.gtt+j-1);
867 }
868 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
869 
870 #if IS_ENABLED(CONFIG_AGP_INTEL)
871 static void intel_gtt_insert_pages(unsigned int first_entry,
872 				   unsigned int num_entries,
873 				   struct page **pages,
874 				   unsigned int flags)
875 {
876 	int i, j;
877 
878 	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
879 		dma_addr_t addr = page_to_phys(pages[i]);
880 		intel_private.driver->write_entry(addr,
881 						  j, flags);
882 	}
883 	readl(intel_private.gtt+j-1);
884 }
885 
886 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
887 					 off_t pg_start, int type)
888 {
889 	int ret = -EINVAL;
890 
891 	if (intel_private.clear_fake_agp) {
892 		int start = intel_private.stolen_size / PAGE_SIZE;
893 		int end = intel_private.gtt_mappable_entries;
894 		intel_gtt_clear_range(start, end - start);
895 		intel_private.clear_fake_agp = false;
896 	}
897 
898 	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
899 		return i810_insert_dcache_entries(mem, pg_start, type);
900 
901 	if (mem->page_count == 0)
902 		goto out;
903 
904 	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
905 		goto out_err;
906 
907 	if (type != mem->type)
908 		goto out_err;
909 
910 	if (!intel_private.driver->check_flags(type))
911 		goto out_err;
912 
913 	if (!mem->is_flushed)
914 		global_cache_flush();
915 
916 	if (intel_private.needs_dmar) {
917 		struct sg_table st;
918 
919 		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
920 		if (ret != 0)
921 			return ret;
922 
923 		intel_gtt_insert_sg_entries(&st, pg_start, type);
924 		mem->sg_list = st.sgl;
925 		mem->num_sg = st.nents;
926 	} else
927 		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
928 				       type);
929 
930 out:
931 	ret = 0;
932 out_err:
933 	mem->is_flushed = true;
934 	return ret;
935 }
936 #endif
937 
938 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
939 {
940 	unsigned int i;
941 
942 	for (i = first_entry; i < (first_entry + num_entries); i++) {
943 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
944 						  i, 0);
945 	}
946 	readl(intel_private.gtt+i-1);
947 }
948 EXPORT_SYMBOL(intel_gtt_clear_range);
949 
950 #if IS_ENABLED(CONFIG_AGP_INTEL)
951 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
952 					 off_t pg_start, int type)
953 {
954 	if (mem->page_count == 0)
955 		return 0;
956 
957 	intel_gtt_clear_range(pg_start, mem->page_count);
958 
959 	if (intel_private.needs_dmar) {
960 		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
961 		mem->sg_list = NULL;
962 		mem->num_sg = 0;
963 	}
964 
965 	return 0;
966 }
967 
968 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
969 						       int type)
970 {
971 	struct agp_memory *new;
972 
973 	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
974 		if (pg_count != intel_private.num_dcache_entries)
975 			return NULL;
976 
977 		new = agp_create_memory(1);
978 		if (new == NULL)
979 			return NULL;
980 
981 		new->type = AGP_DCACHE_MEMORY;
982 		new->page_count = pg_count;
983 		new->num_scratch_pages = 0;
984 		agp_free_page_array(new);
985 		return new;
986 	}
987 	if (type == AGP_PHYS_MEMORY)
988 		return alloc_agpphysmem_i8xx(pg_count, type);
989 	/* always return NULL for other allocation types for now */
990 	return NULL;
991 }
992 #endif
993 
994 static int intel_alloc_chipset_flush_resource(void)
995 {
996 	int ret;
997 	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
998 				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
999 				     pcibios_align_resource, intel_private.bridge_dev);
1000 
1001 	return ret;
1002 }
1003 
1004 static void intel_i915_setup_chipset_flush(void)
1005 {
1006 	int ret;
1007 	u32 temp;
1008 
1009 	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1010 	if (!(temp & 0x1)) {
1011 		intel_alloc_chipset_flush_resource();
1012 		intel_private.resource_valid = 1;
1013 		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1014 	} else {
1015 		temp &= ~1;
1016 
1017 		intel_private.resource_valid = 1;
1018 		intel_private.ifp_resource.start = temp;
1019 		intel_private.ifp_resource.end = temp + PAGE_SIZE;
1020 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1021 		/* some BIOSes reserve this area in a pnp some don't */
1022 		if (ret)
1023 			intel_private.resource_valid = 0;
1024 	}
1025 }
1026 
1027 static void intel_i965_g33_setup_chipset_flush(void)
1028 {
1029 	u32 temp_hi, temp_lo;
1030 	int ret;
1031 
1032 	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1033 	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1034 
1035 	if (!(temp_lo & 0x1)) {
1036 
1037 		intel_alloc_chipset_flush_resource();
1038 
1039 		intel_private.resource_valid = 1;
1040 		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1041 			upper_32_bits(intel_private.ifp_resource.start));
1042 		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1043 	} else {
1044 		u64 l64;
1045 
1046 		temp_lo &= ~0x1;
1047 		l64 = ((u64)temp_hi << 32) | temp_lo;
1048 
1049 		intel_private.resource_valid = 1;
1050 		intel_private.ifp_resource.start = l64;
1051 		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1052 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1053 		/* some BIOSes reserve this area in a pnp some don't */
1054 		if (ret)
1055 			intel_private.resource_valid = 0;
1056 	}
1057 }
1058 
1059 static void intel_i9xx_setup_flush(void)
1060 {
1061 	/* return if already configured */
1062 	if (intel_private.ifp_resource.start)
1063 		return;
1064 
1065 	if (INTEL_GTT_GEN == 6)
1066 		return;
1067 
1068 	/* setup a resource for this object */
1069 	intel_private.ifp_resource.name = "Intel Flush Page";
1070 	intel_private.ifp_resource.flags = IORESOURCE_MEM;
1071 
1072 	/* Setup chipset flush for 915 */
1073 	if (IS_G33 || INTEL_GTT_GEN >= 4) {
1074 		intel_i965_g33_setup_chipset_flush();
1075 	} else {
1076 		intel_i915_setup_chipset_flush();
1077 	}
1078 
1079 	if (intel_private.ifp_resource.start)
1080 		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1081 	if (!intel_private.i9xx_flush_page)
1082 		dev_err(&intel_private.pcidev->dev,
1083 			"can't ioremap flush page - no chipset flushing\n");
1084 }
1085 
1086 static void i9xx_cleanup(void)
1087 {
1088 	if (intel_private.i9xx_flush_page)
1089 		iounmap(intel_private.i9xx_flush_page);
1090 	if (intel_private.resource_valid)
1091 		release_resource(&intel_private.ifp_resource);
1092 	intel_private.ifp_resource.start = 0;
1093 	intel_private.resource_valid = 0;
1094 }
1095 
1096 static void i9xx_chipset_flush(void)
1097 {
1098 	if (intel_private.i9xx_flush_page)
1099 		writel(1, intel_private.i9xx_flush_page);
1100 }
1101 
1102 static void i965_write_entry(dma_addr_t addr,
1103 			     unsigned int entry,
1104 			     unsigned int flags)
1105 {
1106 	u32 pte_flags;
1107 
1108 	pte_flags = I810_PTE_VALID;
1109 	if (flags == AGP_USER_CACHED_MEMORY)
1110 		pte_flags |= I830_PTE_SYSTEM_CACHED;
1111 
1112 	/* Shift high bits down */
1113 	addr |= (addr >> 28) & 0xf0;
1114 	writel(addr | pte_flags, intel_private.gtt + entry);
1115 }
1116 
1117 static int i9xx_setup(void)
1118 {
1119 	phys_addr_t reg_addr;
1120 	int size = KB(512);
1121 
1122 	reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1123 
1124 	intel_private.registers = ioremap(reg_addr, size);
1125 	if (!intel_private.registers)
1126 		return -ENOMEM;
1127 
1128 	switch (INTEL_GTT_GEN) {
1129 	case 3:
1130 		intel_private.gtt_phys_addr =
1131 			pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1132 		break;
1133 	case 5:
1134 		intel_private.gtt_phys_addr = reg_addr + MB(2);
1135 		break;
1136 	default:
1137 		intel_private.gtt_phys_addr = reg_addr + KB(512);
1138 		break;
1139 	}
1140 
1141 	intel_i9xx_setup_flush();
1142 
1143 	return 0;
1144 }
1145 
1146 #if IS_ENABLED(CONFIG_AGP_INTEL)
1147 static const struct agp_bridge_driver intel_fake_agp_driver = {
1148 	.owner			= THIS_MODULE,
1149 	.size_type		= FIXED_APER_SIZE,
1150 	.aperture_sizes		= intel_fake_agp_sizes,
1151 	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1152 	.configure		= intel_fake_agp_configure,
1153 	.fetch_size		= intel_fake_agp_fetch_size,
1154 	.cleanup		= intel_gtt_cleanup,
1155 	.agp_enable		= intel_fake_agp_enable,
1156 	.cache_flush		= global_cache_flush,
1157 	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1158 	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1159 	.insert_memory		= intel_fake_agp_insert_entries,
1160 	.remove_memory		= intel_fake_agp_remove_entries,
1161 	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1162 	.free_by_type		= intel_i810_free_by_type,
1163 	.agp_alloc_page		= agp_generic_alloc_page,
1164 	.agp_alloc_pages        = agp_generic_alloc_pages,
1165 	.agp_destroy_page	= agp_generic_destroy_page,
1166 	.agp_destroy_pages      = agp_generic_destroy_pages,
1167 };
1168 #endif
1169 
1170 static const struct intel_gtt_driver i81x_gtt_driver = {
1171 	.gen = 1,
1172 	.has_pgtbl_enable = 1,
1173 	.dma_mask_size = 32,
1174 	.setup = i810_setup,
1175 	.cleanup = i810_cleanup,
1176 	.check_flags = i830_check_flags,
1177 	.write_entry = i810_write_entry,
1178 };
1179 static const struct intel_gtt_driver i8xx_gtt_driver = {
1180 	.gen = 2,
1181 	.has_pgtbl_enable = 1,
1182 	.setup = i830_setup,
1183 	.cleanup = i830_cleanup,
1184 	.write_entry = i830_write_entry,
1185 	.dma_mask_size = 32,
1186 	.check_flags = i830_check_flags,
1187 	.chipset_flush = i830_chipset_flush,
1188 };
1189 static const struct intel_gtt_driver i915_gtt_driver = {
1190 	.gen = 3,
1191 	.has_pgtbl_enable = 1,
1192 	.setup = i9xx_setup,
1193 	.cleanup = i9xx_cleanup,
1194 	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1195 	.write_entry = i830_write_entry,
1196 	.dma_mask_size = 32,
1197 	.check_flags = i830_check_flags,
1198 	.chipset_flush = i9xx_chipset_flush,
1199 };
1200 static const struct intel_gtt_driver g33_gtt_driver = {
1201 	.gen = 3,
1202 	.is_g33 = 1,
1203 	.setup = i9xx_setup,
1204 	.cleanup = i9xx_cleanup,
1205 	.write_entry = i965_write_entry,
1206 	.dma_mask_size = 36,
1207 	.check_flags = i830_check_flags,
1208 	.chipset_flush = i9xx_chipset_flush,
1209 };
1210 static const struct intel_gtt_driver pineview_gtt_driver = {
1211 	.gen = 3,
1212 	.is_pineview = 1, .is_g33 = 1,
1213 	.setup = i9xx_setup,
1214 	.cleanup = i9xx_cleanup,
1215 	.write_entry = i965_write_entry,
1216 	.dma_mask_size = 36,
1217 	.check_flags = i830_check_flags,
1218 	.chipset_flush = i9xx_chipset_flush,
1219 };
1220 static const struct intel_gtt_driver i965_gtt_driver = {
1221 	.gen = 4,
1222 	.has_pgtbl_enable = 1,
1223 	.setup = i9xx_setup,
1224 	.cleanup = i9xx_cleanup,
1225 	.write_entry = i965_write_entry,
1226 	.dma_mask_size = 36,
1227 	.check_flags = i830_check_flags,
1228 	.chipset_flush = i9xx_chipset_flush,
1229 };
1230 static const struct intel_gtt_driver g4x_gtt_driver = {
1231 	.gen = 5,
1232 	.setup = i9xx_setup,
1233 	.cleanup = i9xx_cleanup,
1234 	.write_entry = i965_write_entry,
1235 	.dma_mask_size = 36,
1236 	.check_flags = i830_check_flags,
1237 	.chipset_flush = i9xx_chipset_flush,
1238 };
1239 static const struct intel_gtt_driver ironlake_gtt_driver = {
1240 	.gen = 5,
1241 	.is_ironlake = 1,
1242 	.setup = i9xx_setup,
1243 	.cleanup = i9xx_cleanup,
1244 	.write_entry = i965_write_entry,
1245 	.dma_mask_size = 36,
1246 	.check_flags = i830_check_flags,
1247 	.chipset_flush = i9xx_chipset_flush,
1248 };
1249 
1250 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1251  * driver and gmch_driver must be non-null, and find_gmch will determine
1252  * which one should be used if a gmch_chip_id is present.
1253  */
1254 static const struct intel_gtt_driver_description {
1255 	unsigned int gmch_chip_id;
1256 	char *name;
1257 	const struct intel_gtt_driver *gtt_driver;
1258 } intel_gtt_chipsets[] = {
1259 	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1260 		&i81x_gtt_driver},
1261 	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1262 		&i81x_gtt_driver},
1263 	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1264 		&i81x_gtt_driver},
1265 	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1266 		&i81x_gtt_driver},
1267 	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1268 		&i8xx_gtt_driver},
1269 	{ PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1270 		&i8xx_gtt_driver},
1271 	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
1272 		&i8xx_gtt_driver},
1273 	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1274 		&i8xx_gtt_driver},
1275 	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
1276 		&i8xx_gtt_driver},
1277 	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1278 		&i915_gtt_driver },
1279 	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1280 		&i915_gtt_driver },
1281 	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1282 		&i915_gtt_driver },
1283 	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1284 		&i915_gtt_driver },
1285 	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1286 		&i915_gtt_driver },
1287 	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1288 		&i915_gtt_driver },
1289 	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1290 		&i965_gtt_driver },
1291 	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1292 		&i965_gtt_driver },
1293 	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1294 		&i965_gtt_driver },
1295 	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1296 		&i965_gtt_driver },
1297 	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1298 		&i965_gtt_driver },
1299 	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1300 		&i965_gtt_driver },
1301 	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1302 		&g33_gtt_driver },
1303 	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1304 		&g33_gtt_driver },
1305 	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1306 		&g33_gtt_driver },
1307 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1308 		&pineview_gtt_driver },
1309 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1310 		&pineview_gtt_driver },
1311 	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1312 		&g4x_gtt_driver },
1313 	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1314 		&g4x_gtt_driver },
1315 	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1316 		&g4x_gtt_driver },
1317 	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1318 		&g4x_gtt_driver },
1319 	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1320 		&g4x_gtt_driver },
1321 	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1322 		&g4x_gtt_driver },
1323 	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1324 		&g4x_gtt_driver },
1325 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1326 	    "HD Graphics", &ironlake_gtt_driver },
1327 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1328 	    "HD Graphics", &ironlake_gtt_driver },
1329 	{ 0, NULL, NULL }
1330 };
1331 
1332 static int find_gmch(u16 device)
1333 {
1334 	struct pci_dev *gmch_device;
1335 
1336 	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1337 	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1338 		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1339 					     device, gmch_device);
1340 	}
1341 
1342 	if (!gmch_device)
1343 		return 0;
1344 
1345 	intel_private.pcidev = gmch_device;
1346 	return 1;
1347 }
1348 
1349 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1350 		     struct agp_bridge_data *bridge)
1351 {
1352 	int i, mask;
1353 
1354 	/*
1355 	 * Can be called from the fake agp driver but also directly from
1356 	 * drm/i915.ko. Hence we need to check whether everything is set up
1357 	 * already.
1358 	 */
1359 	if (intel_private.driver) {
1360 		intel_private.refcount++;
1361 		return 1;
1362 	}
1363 
1364 	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1365 		if (gpu_pdev) {
1366 			if (gpu_pdev->device ==
1367 			    intel_gtt_chipsets[i].gmch_chip_id) {
1368 				intel_private.pcidev = pci_dev_get(gpu_pdev);
1369 				intel_private.driver =
1370 					intel_gtt_chipsets[i].gtt_driver;
1371 
1372 				break;
1373 			}
1374 		} else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1375 			intel_private.driver =
1376 				intel_gtt_chipsets[i].gtt_driver;
1377 			break;
1378 		}
1379 	}
1380 
1381 	if (!intel_private.driver)
1382 		return 0;
1383 
1384 	intel_private.refcount++;
1385 
1386 #if IS_ENABLED(CONFIG_AGP_INTEL)
1387 	if (bridge) {
1388 		bridge->driver = &intel_fake_agp_driver;
1389 		bridge->dev_private_data = &intel_private;
1390 		bridge->dev = bridge_pdev;
1391 	}
1392 #endif
1393 
1394 	intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1395 
1396 	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1397 
1398 	mask = intel_private.driver->dma_mask_size;
1399 	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1400 		dev_err(&intel_private.pcidev->dev,
1401 			"set gfx device dma mask %d-bit failed!\n", mask);
1402 	else
1403 		pci_set_consistent_dma_mask(intel_private.pcidev,
1404 					    DMA_BIT_MASK(mask));
1405 
1406 	if (intel_gtt_init() != 0) {
1407 		intel_gmch_remove();
1408 
1409 		return 0;
1410 	}
1411 
1412 	return 1;
1413 }
1414 EXPORT_SYMBOL(intel_gmch_probe);
1415 
1416 void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
1417 		   phys_addr_t *mappable_base, unsigned long *mappable_end)
1418 {
1419 	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1420 	*stolen_size = intel_private.stolen_size;
1421 	*mappable_base = intel_private.gma_bus_addr;
1422 	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1423 }
1424 EXPORT_SYMBOL(intel_gtt_get);
1425 
1426 void intel_gtt_chipset_flush(void)
1427 {
1428 	if (intel_private.driver->chipset_flush)
1429 		intel_private.driver->chipset_flush();
1430 }
1431 EXPORT_SYMBOL(intel_gtt_chipset_flush);
1432 
1433 void intel_gmch_remove(void)
1434 {
1435 	if (--intel_private.refcount)
1436 		return;
1437 
1438 	if (intel_private.pcidev)
1439 		pci_dev_put(intel_private.pcidev);
1440 	if (intel_private.bridge_dev)
1441 		pci_dev_put(intel_private.bridge_dev);
1442 	intel_private.driver = NULL;
1443 }
1444 EXPORT_SYMBOL(intel_gmch_remove);
1445 
1446 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1447 MODULE_LICENSE("GPL and additional rights");
1448