xref: /openbmc/linux/drivers/char/agp/intel-gtt.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17 
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <asm/smp.h>
25 #include "agp.h"
26 #include "intel-agp.h"
27 #include <linux/intel-gtt.h>
28 #include <drm/intel-gtt.h>
29 
30 /*
31  * If we have Intel graphics, we're not going to have anything other than
32  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33  * on the Intel IOMMU support (CONFIG_DMAR).
34  * Only newer chipsets need to bother with this, of course.
35  */
36 #ifdef CONFIG_DMAR
37 #define USE_PCI_DMA_API 1
38 #else
39 #define USE_PCI_DMA_API 0
40 #endif
41 
42 /* Max amount of stolen space, anything above will be returned to Linux */
43 int intel_max_stolen = 32 * 1024 * 1024;
44 
45 static const struct aper_size_info_fixed intel_i810_sizes[] =
46 {
47 	{64, 16384, 4},
48 	/* The 32M mode still requires a 64k gatt */
49 	{32, 8192, 4}
50 };
51 
52 #define AGP_DCACHE_MEMORY	1
53 #define AGP_PHYS_MEMORY		2
54 #define INTEL_AGP_CACHED_MEMORY 3
55 
56 static struct gatt_mask intel_i810_masks[] =
57 {
58 	{.mask = I810_PTE_VALID, .type = 0},
59 	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
60 	{.mask = I810_PTE_VALID, .type = 0},
61 	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
62 	 .type = INTEL_AGP_CACHED_MEMORY}
63 };
64 
65 #define INTEL_AGP_UNCACHED_MEMORY              0
66 #define INTEL_AGP_CACHED_MEMORY_LLC            1
67 #define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
68 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
69 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
70 
71 struct intel_gtt_driver {
72 	unsigned int gen : 8;
73 	unsigned int is_g33 : 1;
74 	unsigned int is_pineview : 1;
75 	unsigned int is_ironlake : 1;
76 	unsigned int dma_mask_size : 8;
77 	/* Chipset specific GTT setup */
78 	int (*setup)(void);
79 	/* This should undo anything done in ->setup() save the unmapping
80 	 * of the mmio register file, that's done in the generic code. */
81 	void (*cleanup)(void);
82 	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
83 	/* Flags is a more or less chipset specific opaque value.
84 	 * For chipsets that need to support old ums (non-gem) code, this
85 	 * needs to be identical to the various supported agp memory types! */
86 	bool (*check_flags)(unsigned int flags);
87 	void (*chipset_flush)(void);
88 };
89 
90 static struct _intel_private {
91 	struct intel_gtt base;
92 	const struct intel_gtt_driver *driver;
93 	struct pci_dev *pcidev;	/* device one */
94 	struct pci_dev *bridge_dev;
95 	u8 __iomem *registers;
96 	phys_addr_t gtt_bus_addr;
97 	phys_addr_t gma_bus_addr;
98 	phys_addr_t pte_bus_addr;
99 	u32 __iomem *gtt;		/* I915G */
100 	int num_dcache_entries;
101 	union {
102 		void __iomem *i9xx_flush_page;
103 		void *i8xx_flush_page;
104 	};
105 	struct page *i8xx_page;
106 	struct resource ifp_resource;
107 	int resource_valid;
108 	struct page *scratch_page;
109 	dma_addr_t scratch_page_dma;
110 } intel_private;
111 
112 #define INTEL_GTT_GEN	intel_private.driver->gen
113 #define IS_G33		intel_private.driver->is_g33
114 #define IS_PINEVIEW	intel_private.driver->is_pineview
115 #define IS_IRONLAKE	intel_private.driver->is_ironlake
116 
117 static void intel_agp_free_sglist(struct agp_memory *mem)
118 {
119 	struct sg_table st;
120 
121 	st.sgl = mem->sg_list;
122 	st.orig_nents = st.nents = mem->page_count;
123 
124 	sg_free_table(&st);
125 
126 	mem->sg_list = NULL;
127 	mem->num_sg = 0;
128 }
129 
130 static int intel_agp_map_memory(struct agp_memory *mem)
131 {
132 	struct sg_table st;
133 	struct scatterlist *sg;
134 	int i;
135 
136 	if (mem->sg_list)
137 		return 0; /* already mapped (for e.g. resume */
138 
139 	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
140 
141 	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
142 		goto err;
143 
144 	mem->sg_list = sg = st.sgl;
145 
146 	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
147 		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
148 
149 	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
150 				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
151 	if (unlikely(!mem->num_sg))
152 		goto err;
153 
154 	return 0;
155 
156 err:
157 	sg_free_table(&st);
158 	return -ENOMEM;
159 }
160 
161 static void intel_agp_unmap_memory(struct agp_memory *mem)
162 {
163 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
164 
165 	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
166 		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
167 	intel_agp_free_sglist(mem);
168 }
169 
170 static int intel_i810_fetch_size(void)
171 {
172 	u32 smram_miscc;
173 	struct aper_size_info_fixed *values;
174 
175 	pci_read_config_dword(intel_private.bridge_dev,
176 			      I810_SMRAM_MISCC, &smram_miscc);
177 	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
178 
179 	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
180 		dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
181 		return 0;
182 	}
183 	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
184 		agp_bridge->current_size = (void *) (values + 1);
185 		agp_bridge->aperture_size_idx = 1;
186 		return values[1].size;
187 	} else {
188 		agp_bridge->current_size = (void *) (values);
189 		agp_bridge->aperture_size_idx = 0;
190 		return values[0].size;
191 	}
192 
193 	return 0;
194 }
195 
196 static int intel_i810_configure(void)
197 {
198 	struct aper_size_info_fixed *current_size;
199 	u32 temp;
200 	int i;
201 
202 	current_size = A_SIZE_FIX(agp_bridge->current_size);
203 
204 	if (!intel_private.registers) {
205 		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
206 		temp &= 0xfff80000;
207 
208 		intel_private.registers = ioremap(temp, 128 * 4096);
209 		if (!intel_private.registers) {
210 			dev_err(&intel_private.pcidev->dev,
211 				"can't remap memory\n");
212 			return -ENOMEM;
213 		}
214 	}
215 
216 	if ((readl(intel_private.registers+I810_DRAM_CTL)
217 		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
218 		/* This will need to be dynamically assigned */
219 		dev_info(&intel_private.pcidev->dev,
220 			 "detected 4MB dedicated video ram\n");
221 		intel_private.num_dcache_entries = 1024;
222 	}
223 	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
224 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
225 	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
226 	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
227 
228 	if (agp_bridge->driver->needs_scratch_page) {
229 		for (i = 0; i < current_size->num_entries; i++) {
230 			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
231 		}
232 		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
233 	}
234 	global_cache_flush();
235 	return 0;
236 }
237 
238 static void intel_i810_cleanup(void)
239 {
240 	writel(0, intel_private.registers+I810_PGETBL_CTL);
241 	readl(intel_private.registers);	/* PCI Posting. */
242 	iounmap(intel_private.registers);
243 }
244 
245 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
246 {
247 	return;
248 }
249 
250 /* Exists to support ARGB cursors */
251 static struct page *i8xx_alloc_pages(void)
252 {
253 	struct page *page;
254 
255 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
256 	if (page == NULL)
257 		return NULL;
258 
259 	if (set_pages_uc(page, 4) < 0) {
260 		set_pages_wb(page, 4);
261 		__free_pages(page, 2);
262 		return NULL;
263 	}
264 	get_page(page);
265 	atomic_inc(&agp_bridge->current_memory_agp);
266 	return page;
267 }
268 
269 static void i8xx_destroy_pages(struct page *page)
270 {
271 	if (page == NULL)
272 		return;
273 
274 	set_pages_wb(page, 4);
275 	put_page(page);
276 	__free_pages(page, 2);
277 	atomic_dec(&agp_bridge->current_memory_agp);
278 }
279 
280 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
281 				int type)
282 {
283 	int i, j, num_entries;
284 	void *temp;
285 	int ret = -EINVAL;
286 	int mask_type;
287 
288 	if (mem->page_count == 0)
289 		goto out;
290 
291 	temp = agp_bridge->current_size;
292 	num_entries = A_SIZE_FIX(temp)->num_entries;
293 
294 	if ((pg_start + mem->page_count) > num_entries)
295 		goto out_err;
296 
297 
298 	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
299 		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
300 			ret = -EBUSY;
301 			goto out_err;
302 		}
303 	}
304 
305 	if (type != mem->type)
306 		goto out_err;
307 
308 	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
309 
310 	switch (mask_type) {
311 	case AGP_DCACHE_MEMORY:
312 		if (!mem->is_flushed)
313 			global_cache_flush();
314 		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
315 			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
316 			       intel_private.registers+I810_PTE_BASE+(i*4));
317 		}
318 		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
319 		break;
320 	case AGP_PHYS_MEMORY:
321 	case AGP_NORMAL_MEMORY:
322 		if (!mem->is_flushed)
323 			global_cache_flush();
324 		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
325 			writel(agp_bridge->driver->mask_memory(agp_bridge,
326 					page_to_phys(mem->pages[i]), mask_type),
327 			       intel_private.registers+I810_PTE_BASE+(j*4));
328 		}
329 		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
330 		break;
331 	default:
332 		goto out_err;
333 	}
334 
335 out:
336 	ret = 0;
337 out_err:
338 	mem->is_flushed = true;
339 	return ret;
340 }
341 
342 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
343 				int type)
344 {
345 	int i;
346 
347 	if (mem->page_count == 0)
348 		return 0;
349 
350 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
351 		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
352 	}
353 	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
354 
355 	return 0;
356 }
357 
358 /*
359  * The i810/i830 requires a physical address to program its mouse
360  * pointer into hardware.
361  * However the Xserver still writes to it through the agp aperture.
362  */
363 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
364 {
365 	struct agp_memory *new;
366 	struct page *page;
367 
368 	switch (pg_count) {
369 	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
370 		break;
371 	case 4:
372 		/* kludge to get 4 physical pages for ARGB cursor */
373 		page = i8xx_alloc_pages();
374 		break;
375 	default:
376 		return NULL;
377 	}
378 
379 	if (page == NULL)
380 		return NULL;
381 
382 	new = agp_create_memory(pg_count);
383 	if (new == NULL)
384 		return NULL;
385 
386 	new->pages[0] = page;
387 	if (pg_count == 4) {
388 		/* kludge to get 4 physical pages for ARGB cursor */
389 		new->pages[1] = new->pages[0] + 1;
390 		new->pages[2] = new->pages[1] + 1;
391 		new->pages[3] = new->pages[2] + 1;
392 	}
393 	new->page_count = pg_count;
394 	new->num_scratch_pages = pg_count;
395 	new->type = AGP_PHYS_MEMORY;
396 	new->physical = page_to_phys(new->pages[0]);
397 	return new;
398 }
399 
400 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
401 {
402 	struct agp_memory *new;
403 
404 	if (type == AGP_DCACHE_MEMORY) {
405 		if (pg_count != intel_private.num_dcache_entries)
406 			return NULL;
407 
408 		new = agp_create_memory(1);
409 		if (new == NULL)
410 			return NULL;
411 
412 		new->type = AGP_DCACHE_MEMORY;
413 		new->page_count = pg_count;
414 		new->num_scratch_pages = 0;
415 		agp_free_page_array(new);
416 		return new;
417 	}
418 	if (type == AGP_PHYS_MEMORY)
419 		return alloc_agpphysmem_i8xx(pg_count, type);
420 	return NULL;
421 }
422 
423 static void intel_i810_free_by_type(struct agp_memory *curr)
424 {
425 	agp_free_key(curr->key);
426 	if (curr->type == AGP_PHYS_MEMORY) {
427 		if (curr->page_count == 4)
428 			i8xx_destroy_pages(curr->pages[0]);
429 		else {
430 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
431 							     AGP_PAGE_DESTROY_UNMAP);
432 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
433 							     AGP_PAGE_DESTROY_FREE);
434 		}
435 		agp_free_page_array(curr);
436 	}
437 	kfree(curr);
438 }
439 
440 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
441 					    dma_addr_t addr, int type)
442 {
443 	/* Type checking must be done elsewhere */
444 	return addr | bridge->driver->masks[type].mask;
445 }
446 
447 static int intel_gtt_setup_scratch_page(void)
448 {
449 	struct page *page;
450 	dma_addr_t dma_addr;
451 
452 	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
453 	if (page == NULL)
454 		return -ENOMEM;
455 	get_page(page);
456 	set_pages_uc(page, 1);
457 
458 	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
459 		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
460 				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
461 		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
462 			return -EINVAL;
463 
464 		intel_private.scratch_page_dma = dma_addr;
465 	} else
466 		intel_private.scratch_page_dma = page_to_phys(page);
467 
468 	intel_private.scratch_page = page;
469 
470 	return 0;
471 }
472 
473 static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
474 	{128, 32768, 5},
475 	/* The 64M mode still requires a 128k gatt */
476 	{64, 16384, 5},
477 	{256, 65536, 6},
478 	{512, 131072, 7},
479 };
480 
481 static unsigned int intel_gtt_stolen_entries(void)
482 {
483 	u16 gmch_ctrl;
484 	u8 rdct;
485 	int local = 0;
486 	static const int ddt[4] = { 0, 16, 32, 64 };
487 	unsigned int overhead_entries, stolen_entries;
488 	unsigned int stolen_size = 0;
489 
490 	pci_read_config_word(intel_private.bridge_dev,
491 			     I830_GMCH_CTRL, &gmch_ctrl);
492 
493 	if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
494 		overhead_entries = 0;
495 	else
496 		overhead_entries = intel_private.base.gtt_mappable_entries
497 			/ 1024;
498 
499 	overhead_entries += 1; /* BIOS popup */
500 
501 	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
502 	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
503 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
504 		case I830_GMCH_GMS_STOLEN_512:
505 			stolen_size = KB(512);
506 			break;
507 		case I830_GMCH_GMS_STOLEN_1024:
508 			stolen_size = MB(1);
509 			break;
510 		case I830_GMCH_GMS_STOLEN_8192:
511 			stolen_size = MB(8);
512 			break;
513 		case I830_GMCH_GMS_LOCAL:
514 			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
515 			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
516 					MB(ddt[I830_RDRAM_DDT(rdct)]);
517 			local = 1;
518 			break;
519 		default:
520 			stolen_size = 0;
521 			break;
522 		}
523 	} else if (INTEL_GTT_GEN == 6) {
524 		/*
525 		 * SandyBridge has new memory control reg at 0x50.w
526 		 */
527 		u16 snb_gmch_ctl;
528 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
529 		switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
530 		case SNB_GMCH_GMS_STOLEN_32M:
531 			stolen_size = MB(32);
532 			break;
533 		case SNB_GMCH_GMS_STOLEN_64M:
534 			stolen_size = MB(64);
535 			break;
536 		case SNB_GMCH_GMS_STOLEN_96M:
537 			stolen_size = MB(96);
538 			break;
539 		case SNB_GMCH_GMS_STOLEN_128M:
540 			stolen_size = MB(128);
541 			break;
542 		case SNB_GMCH_GMS_STOLEN_160M:
543 			stolen_size = MB(160);
544 			break;
545 		case SNB_GMCH_GMS_STOLEN_192M:
546 			stolen_size = MB(192);
547 			break;
548 		case SNB_GMCH_GMS_STOLEN_224M:
549 			stolen_size = MB(224);
550 			break;
551 		case SNB_GMCH_GMS_STOLEN_256M:
552 			stolen_size = MB(256);
553 			break;
554 		case SNB_GMCH_GMS_STOLEN_288M:
555 			stolen_size = MB(288);
556 			break;
557 		case SNB_GMCH_GMS_STOLEN_320M:
558 			stolen_size = MB(320);
559 			break;
560 		case SNB_GMCH_GMS_STOLEN_352M:
561 			stolen_size = MB(352);
562 			break;
563 		case SNB_GMCH_GMS_STOLEN_384M:
564 			stolen_size = MB(384);
565 			break;
566 		case SNB_GMCH_GMS_STOLEN_416M:
567 			stolen_size = MB(416);
568 			break;
569 		case SNB_GMCH_GMS_STOLEN_448M:
570 			stolen_size = MB(448);
571 			break;
572 		case SNB_GMCH_GMS_STOLEN_480M:
573 			stolen_size = MB(480);
574 			break;
575 		case SNB_GMCH_GMS_STOLEN_512M:
576 			stolen_size = MB(512);
577 			break;
578 		}
579 	} else {
580 		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
581 		case I855_GMCH_GMS_STOLEN_1M:
582 			stolen_size = MB(1);
583 			break;
584 		case I855_GMCH_GMS_STOLEN_4M:
585 			stolen_size = MB(4);
586 			break;
587 		case I855_GMCH_GMS_STOLEN_8M:
588 			stolen_size = MB(8);
589 			break;
590 		case I855_GMCH_GMS_STOLEN_16M:
591 			stolen_size = MB(16);
592 			break;
593 		case I855_GMCH_GMS_STOLEN_32M:
594 			stolen_size = MB(32);
595 			break;
596 		case I915_GMCH_GMS_STOLEN_48M:
597 			stolen_size = MB(48);
598 			break;
599 		case I915_GMCH_GMS_STOLEN_64M:
600 			stolen_size = MB(64);
601 			break;
602 		case G33_GMCH_GMS_STOLEN_128M:
603 			stolen_size = MB(128);
604 			break;
605 		case G33_GMCH_GMS_STOLEN_256M:
606 			stolen_size = MB(256);
607 			break;
608 		case INTEL_GMCH_GMS_STOLEN_96M:
609 			stolen_size = MB(96);
610 			break;
611 		case INTEL_GMCH_GMS_STOLEN_160M:
612 			stolen_size = MB(160);
613 			break;
614 		case INTEL_GMCH_GMS_STOLEN_224M:
615 			stolen_size = MB(224);
616 			break;
617 		case INTEL_GMCH_GMS_STOLEN_352M:
618 			stolen_size = MB(352);
619 			break;
620 		default:
621 			stolen_size = 0;
622 			break;
623 		}
624 	}
625 
626 	if (!local && stolen_size > intel_max_stolen) {
627 		dev_info(&intel_private.bridge_dev->dev,
628 			 "detected %dK stolen memory, trimming to %dK\n",
629 			 stolen_size / KB(1), intel_max_stolen / KB(1));
630 		stolen_size = intel_max_stolen;
631 	} else if (stolen_size > 0) {
632 		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
633 		       stolen_size / KB(1), local ? "local" : "stolen");
634 	} else {
635 		dev_info(&intel_private.bridge_dev->dev,
636 		       "no pre-allocated video memory detected\n");
637 		stolen_size = 0;
638 	}
639 
640 	stolen_entries = stolen_size/KB(4) - overhead_entries;
641 
642 	return stolen_entries;
643 }
644 
645 static unsigned int intel_gtt_total_entries(void)
646 {
647 	int size;
648 
649 	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
650 		u32 pgetbl_ctl;
651 		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
652 
653 		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
654 		case I965_PGETBL_SIZE_128KB:
655 			size = KB(128);
656 			break;
657 		case I965_PGETBL_SIZE_256KB:
658 			size = KB(256);
659 			break;
660 		case I965_PGETBL_SIZE_512KB:
661 			size = KB(512);
662 			break;
663 		case I965_PGETBL_SIZE_1MB:
664 			size = KB(1024);
665 			break;
666 		case I965_PGETBL_SIZE_2MB:
667 			size = KB(2048);
668 			break;
669 		case I965_PGETBL_SIZE_1_5MB:
670 			size = KB(1024 + 512);
671 			break;
672 		default:
673 			dev_info(&intel_private.pcidev->dev,
674 				 "unknown page table size, assuming 512KB\n");
675 			size = KB(512);
676 		}
677 
678 		return size/4;
679 	} else if (INTEL_GTT_GEN == 6) {
680 		u16 snb_gmch_ctl;
681 
682 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
683 		switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
684 		default:
685 		case SNB_GTT_SIZE_0M:
686 			printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
687 			size = MB(0);
688 			break;
689 		case SNB_GTT_SIZE_1M:
690 			size = MB(1);
691 			break;
692 		case SNB_GTT_SIZE_2M:
693 			size = MB(2);
694 			break;
695 		}
696 		return size/4;
697 	} else {
698 		/* On previous hardware, the GTT size was just what was
699 		 * required to map the aperture.
700 		 */
701 		return intel_private.base.gtt_mappable_entries;
702 	}
703 }
704 
705 static unsigned int intel_gtt_mappable_entries(void)
706 {
707 	unsigned int aperture_size;
708 
709 	if (INTEL_GTT_GEN == 2) {
710 		u16 gmch_ctrl;
711 
712 		pci_read_config_word(intel_private.bridge_dev,
713 				     I830_GMCH_CTRL, &gmch_ctrl);
714 
715 		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
716 			aperture_size = MB(64);
717 		else
718 			aperture_size = MB(128);
719 	} else {
720 		/* 9xx supports large sizes, just look at the length */
721 		aperture_size = pci_resource_len(intel_private.pcidev, 2);
722 	}
723 
724 	return aperture_size >> PAGE_SHIFT;
725 }
726 
727 static void intel_gtt_teardown_scratch_page(void)
728 {
729 	set_pages_wb(intel_private.scratch_page, 1);
730 	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
731 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
732 	put_page(intel_private.scratch_page);
733 	__free_page(intel_private.scratch_page);
734 }
735 
736 static void intel_gtt_cleanup(void)
737 {
738 	intel_private.driver->cleanup();
739 
740 	iounmap(intel_private.gtt);
741 	iounmap(intel_private.registers);
742 
743 	intel_gtt_teardown_scratch_page();
744 }
745 
746 static int intel_gtt_init(void)
747 {
748 	u32 gtt_map_size;
749 	int ret;
750 
751 	ret = intel_private.driver->setup();
752 	if (ret != 0)
753 		return ret;
754 
755 	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
756 	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
757 
758 	dev_info(&intel_private.bridge_dev->dev,
759 			"detected gtt size: %dK total, %dK mappable\n",
760 			intel_private.base.gtt_total_entries * 4,
761 			intel_private.base.gtt_mappable_entries * 4);
762 
763 	gtt_map_size = intel_private.base.gtt_total_entries * 4;
764 
765 	intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
766 				    gtt_map_size);
767 	if (!intel_private.gtt) {
768 		intel_private.driver->cleanup();
769 		iounmap(intel_private.registers);
770 		return -ENOMEM;
771 	}
772 
773 	global_cache_flush();   /* FIXME: ? */
774 
775 	/* we have to call this as early as possible after the MMIO base address is known */
776 	intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
777 	if (intel_private.base.gtt_stolen_entries == 0) {
778 		intel_private.driver->cleanup();
779 		iounmap(intel_private.registers);
780 		iounmap(intel_private.gtt);
781 		return -ENOMEM;
782 	}
783 
784 	ret = intel_gtt_setup_scratch_page();
785 	if (ret != 0) {
786 		intel_gtt_cleanup();
787 		return ret;
788 	}
789 
790 	return 0;
791 }
792 
793 static int intel_fake_agp_fetch_size(void)
794 {
795 	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
796 	unsigned int aper_size;
797 	int i;
798 
799 	aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
800 		    / MB(1);
801 
802 	for (i = 0; i < num_sizes; i++) {
803 		if (aper_size == intel_fake_agp_sizes[i].size) {
804 			agp_bridge->current_size =
805 				(void *) (intel_fake_agp_sizes + i);
806 			return aper_size;
807 		}
808 	}
809 
810 	return 0;
811 }
812 
813 static void i830_cleanup(void)
814 {
815 	if (intel_private.i8xx_flush_page) {
816 		kunmap(intel_private.i8xx_flush_page);
817 		intel_private.i8xx_flush_page = NULL;
818 	}
819 
820 	__free_page(intel_private.i8xx_page);
821 	intel_private.i8xx_page = NULL;
822 }
823 
824 static void intel_i830_setup_flush(void)
825 {
826 	/* return if we've already set the flush mechanism up */
827 	if (intel_private.i8xx_page)
828 		return;
829 
830 	intel_private.i8xx_page = alloc_page(GFP_KERNEL);
831 	if (!intel_private.i8xx_page)
832 		return;
833 
834 	intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
835 	if (!intel_private.i8xx_flush_page)
836 		i830_cleanup();
837 }
838 
839 /* The chipset_flush interface needs to get data that has already been
840  * flushed out of the CPU all the way out to main memory, because the GPU
841  * doesn't snoop those buffers.
842  *
843  * The 8xx series doesn't have the same lovely interface for flushing the
844  * chipset write buffers that the later chips do. According to the 865
845  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
846  * that buffer out, we just fill 1KB and clflush it out, on the assumption
847  * that it'll push whatever was in there out.  It appears to work.
848  */
849 static void i830_chipset_flush(void)
850 {
851 	unsigned int *pg = intel_private.i8xx_flush_page;
852 
853 	memset(pg, 0, 1024);
854 
855 	if (cpu_has_clflush)
856 		clflush_cache_range(pg, 1024);
857 	else if (wbinvd_on_all_cpus() != 0)
858 		printk(KERN_ERR "Timed out waiting for cache flush.\n");
859 }
860 
861 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
862 			     unsigned int flags)
863 {
864 	u32 pte_flags = I810_PTE_VALID;
865 
866 	switch (flags) {
867 	case AGP_DCACHE_MEMORY:
868 		pte_flags |= I810_PTE_LOCAL;
869 		break;
870 	case AGP_USER_CACHED_MEMORY:
871 		pte_flags |= I830_PTE_SYSTEM_CACHED;
872 		break;
873 	}
874 
875 	writel(addr | pte_flags, intel_private.gtt + entry);
876 }
877 
878 static void intel_enable_gtt(void)
879 {
880 	u32 gma_addr;
881 	u16 gmch_ctrl;
882 
883 	if (INTEL_GTT_GEN == 2)
884 		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
885 				      &gma_addr);
886 	else
887 		pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
888 				      &gma_addr);
889 
890 	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
891 
892 	pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
893 	gmch_ctrl |= I830_GMCH_ENABLED;
894 	pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
895 
896 	writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
897 	       intel_private.registers+I810_PGETBL_CTL);
898 	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
899 }
900 
901 static int i830_setup(void)
902 {
903 	u32 reg_addr;
904 
905 	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
906 	reg_addr &= 0xfff80000;
907 
908 	intel_private.registers = ioremap(reg_addr, KB(64));
909 	if (!intel_private.registers)
910 		return -ENOMEM;
911 
912 	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
913 	intel_private.pte_bus_addr =
914 		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
915 
916 	intel_i830_setup_flush();
917 
918 	return 0;
919 }
920 
921 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
922 {
923 	agp_bridge->gatt_table_real = NULL;
924 	agp_bridge->gatt_table = NULL;
925 	agp_bridge->gatt_bus_addr = 0;
926 
927 	return 0;
928 }
929 
930 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
931 {
932 	return 0;
933 }
934 
935 static int intel_fake_agp_configure(void)
936 {
937 	int i;
938 
939 	intel_enable_gtt();
940 
941 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
942 
943 	for (i = intel_private.base.gtt_stolen_entries;
944 			i < intel_private.base.gtt_total_entries; i++) {
945 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
946 						  i, 0);
947 	}
948 	readl(intel_private.gtt+i-1);	/* PCI Posting. */
949 
950 	global_cache_flush();
951 
952 	return 0;
953 }
954 
955 static bool i830_check_flags(unsigned int flags)
956 {
957 	switch (flags) {
958 	case 0:
959 	case AGP_PHYS_MEMORY:
960 	case AGP_USER_CACHED_MEMORY:
961 	case AGP_USER_MEMORY:
962 		return true;
963 	}
964 
965 	return false;
966 }
967 
968 static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
969 					unsigned int sg_len,
970 					unsigned int pg_start,
971 					unsigned int flags)
972 {
973 	struct scatterlist *sg;
974 	unsigned int len, m;
975 	int i, j;
976 
977 	j = pg_start;
978 
979 	/* sg may merge pages, but we have to separate
980 	 * per-page addr for GTT */
981 	for_each_sg(sg_list, sg, sg_len, i) {
982 		len = sg_dma_len(sg) >> PAGE_SHIFT;
983 		for (m = 0; m < len; m++) {
984 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
985 			intel_private.driver->write_entry(addr,
986 							  j, flags);
987 			j++;
988 		}
989 	}
990 	readl(intel_private.gtt+j-1);
991 }
992 
993 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
994 					 off_t pg_start, int type)
995 {
996 	int i, j;
997 	int ret = -EINVAL;
998 
999 	if (mem->page_count == 0)
1000 		goto out;
1001 
1002 	if (pg_start < intel_private.base.gtt_stolen_entries) {
1003 		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1004 			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1005 			   pg_start, intel_private.base.gtt_stolen_entries);
1006 
1007 		dev_info(&intel_private.pcidev->dev,
1008 			 "trying to insert into local/stolen memory\n");
1009 		goto out_err;
1010 	}
1011 
1012 	if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
1013 		goto out_err;
1014 
1015 	if (type != mem->type)
1016 		goto out_err;
1017 
1018 	if (!intel_private.driver->check_flags(type))
1019 		goto out_err;
1020 
1021 	if (!mem->is_flushed)
1022 		global_cache_flush();
1023 
1024 	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
1025 		ret = intel_agp_map_memory(mem);
1026 		if (ret != 0)
1027 			return ret;
1028 
1029 		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
1030 					    pg_start, type);
1031 	} else {
1032 		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1033 			dma_addr_t addr = page_to_phys(mem->pages[i]);
1034 			intel_private.driver->write_entry(addr,
1035 							  j, type);
1036 		}
1037 		readl(intel_private.gtt+j-1);
1038 	}
1039 
1040 out:
1041 	ret = 0;
1042 out_err:
1043 	mem->is_flushed = true;
1044 	return ret;
1045 }
1046 
1047 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
1048 					 off_t pg_start, int type)
1049 {
1050 	int i;
1051 
1052 	if (mem->page_count == 0)
1053 		return 0;
1054 
1055 	if (pg_start < intel_private.base.gtt_stolen_entries) {
1056 		dev_info(&intel_private.pcidev->dev,
1057 			 "trying to disable local/stolen memory\n");
1058 		return -EINVAL;
1059 	}
1060 
1061 	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
1062 		intel_agp_unmap_memory(mem);
1063 
1064 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1065 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
1066 						  i, 0);
1067 	}
1068 	readl(intel_private.gtt+i-1);
1069 
1070 	return 0;
1071 }
1072 
1073 static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
1074 {
1075 	intel_private.driver->chipset_flush();
1076 }
1077 
1078 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1079 						       int type)
1080 {
1081 	if (type == AGP_PHYS_MEMORY)
1082 		return alloc_agpphysmem_i8xx(pg_count, type);
1083 	/* always return NULL for other allocation types for now */
1084 	return NULL;
1085 }
1086 
1087 static int intel_alloc_chipset_flush_resource(void)
1088 {
1089 	int ret;
1090 	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1091 				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1092 				     pcibios_align_resource, intel_private.bridge_dev);
1093 
1094 	return ret;
1095 }
1096 
1097 static void intel_i915_setup_chipset_flush(void)
1098 {
1099 	int ret;
1100 	u32 temp;
1101 
1102 	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1103 	if (!(temp & 0x1)) {
1104 		intel_alloc_chipset_flush_resource();
1105 		intel_private.resource_valid = 1;
1106 		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1107 	} else {
1108 		temp &= ~1;
1109 
1110 		intel_private.resource_valid = 1;
1111 		intel_private.ifp_resource.start = temp;
1112 		intel_private.ifp_resource.end = temp + PAGE_SIZE;
1113 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1114 		/* some BIOSes reserve this area in a pnp some don't */
1115 		if (ret)
1116 			intel_private.resource_valid = 0;
1117 	}
1118 }
1119 
1120 static void intel_i965_g33_setup_chipset_flush(void)
1121 {
1122 	u32 temp_hi, temp_lo;
1123 	int ret;
1124 
1125 	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1126 	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1127 
1128 	if (!(temp_lo & 0x1)) {
1129 
1130 		intel_alloc_chipset_flush_resource();
1131 
1132 		intel_private.resource_valid = 1;
1133 		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1134 			upper_32_bits(intel_private.ifp_resource.start));
1135 		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1136 	} else {
1137 		u64 l64;
1138 
1139 		temp_lo &= ~0x1;
1140 		l64 = ((u64)temp_hi << 32) | temp_lo;
1141 
1142 		intel_private.resource_valid = 1;
1143 		intel_private.ifp_resource.start = l64;
1144 		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1145 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1146 		/* some BIOSes reserve this area in a pnp some don't */
1147 		if (ret)
1148 			intel_private.resource_valid = 0;
1149 	}
1150 }
1151 
1152 static void intel_i9xx_setup_flush(void)
1153 {
1154 	/* return if already configured */
1155 	if (intel_private.ifp_resource.start)
1156 		return;
1157 
1158 	if (INTEL_GTT_GEN == 6)
1159 		return;
1160 
1161 	/* setup a resource for this object */
1162 	intel_private.ifp_resource.name = "Intel Flush Page";
1163 	intel_private.ifp_resource.flags = IORESOURCE_MEM;
1164 
1165 	/* Setup chipset flush for 915 */
1166 	if (IS_G33 || INTEL_GTT_GEN >= 4) {
1167 		intel_i965_g33_setup_chipset_flush();
1168 	} else {
1169 		intel_i915_setup_chipset_flush();
1170 	}
1171 
1172 	if (intel_private.ifp_resource.start)
1173 		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1174 	if (!intel_private.i9xx_flush_page)
1175 		dev_err(&intel_private.pcidev->dev,
1176 			"can't ioremap flush page - no chipset flushing\n");
1177 }
1178 
1179 static void i9xx_cleanup(void)
1180 {
1181 	if (intel_private.i9xx_flush_page)
1182 		iounmap(intel_private.i9xx_flush_page);
1183 	if (intel_private.resource_valid)
1184 		release_resource(&intel_private.ifp_resource);
1185 	intel_private.ifp_resource.start = 0;
1186 	intel_private.resource_valid = 0;
1187 }
1188 
1189 static void i9xx_chipset_flush(void)
1190 {
1191 	if (intel_private.i9xx_flush_page)
1192 		writel(1, intel_private.i9xx_flush_page);
1193 }
1194 
1195 static void i965_write_entry(dma_addr_t addr,
1196 			     unsigned int entry,
1197 			     unsigned int flags)
1198 {
1199 	u32 pte_flags;
1200 
1201 	pte_flags = I810_PTE_VALID;
1202 	if (flags == AGP_USER_CACHED_MEMORY)
1203 		pte_flags |= I830_PTE_SYSTEM_CACHED;
1204 
1205 	/* Shift high bits down */
1206 	addr |= (addr >> 28) & 0xf0;
1207 	writel(addr | pte_flags, intel_private.gtt + entry);
1208 }
1209 
1210 static bool gen6_check_flags(unsigned int flags)
1211 {
1212 	return true;
1213 }
1214 
1215 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1216 			     unsigned int flags)
1217 {
1218 	unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1219 	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1220 	u32 pte_flags;
1221 
1222 	if (type_mask == AGP_USER_MEMORY)
1223 		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1224 	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1225 		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1226 		if (gfdt)
1227 			pte_flags |= GEN6_PTE_GFDT;
1228 	} else { /* set 'normal'/'cached' to LLC by default */
1229 		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1230 		if (gfdt)
1231 			pte_flags |= GEN6_PTE_GFDT;
1232 	}
1233 
1234 	/* gen6 has bit11-4 for physical addr bit39-32 */
1235 	addr |= (addr >> 28) & 0xff0;
1236 	writel(addr | pte_flags, intel_private.gtt + entry);
1237 }
1238 
1239 static void gen6_cleanup(void)
1240 {
1241 }
1242 
1243 static int i9xx_setup(void)
1244 {
1245 	u32 reg_addr;
1246 
1247 	pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1248 
1249 	reg_addr &= 0xfff80000;
1250 
1251 	intel_private.registers = ioremap(reg_addr, 128 * 4096);
1252 	if (!intel_private.registers)
1253 		return -ENOMEM;
1254 
1255 	if (INTEL_GTT_GEN == 3) {
1256 		u32 gtt_addr;
1257 
1258 		pci_read_config_dword(intel_private.pcidev,
1259 				      I915_PTEADDR, &gtt_addr);
1260 		intel_private.gtt_bus_addr = gtt_addr;
1261 	} else {
1262 		u32 gtt_offset;
1263 
1264 		switch (INTEL_GTT_GEN) {
1265 		case 5:
1266 		case 6:
1267 			gtt_offset = MB(2);
1268 			break;
1269 		case 4:
1270 		default:
1271 			gtt_offset =  KB(512);
1272 			break;
1273 		}
1274 		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1275 	}
1276 
1277 	intel_private.pte_bus_addr =
1278 		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1279 
1280 	intel_i9xx_setup_flush();
1281 
1282 	return 0;
1283 }
1284 
1285 static const struct agp_bridge_driver intel_810_driver = {
1286 	.owner			= THIS_MODULE,
1287 	.aperture_sizes		= intel_i810_sizes,
1288 	.size_type		= FIXED_APER_SIZE,
1289 	.num_aperture_sizes	= 2,
1290 	.needs_scratch_page	= true,
1291 	.configure		= intel_i810_configure,
1292 	.fetch_size		= intel_i810_fetch_size,
1293 	.cleanup		= intel_i810_cleanup,
1294 	.mask_memory		= intel_i810_mask_memory,
1295 	.masks			= intel_i810_masks,
1296 	.agp_enable		= intel_fake_agp_enable,
1297 	.cache_flush		= global_cache_flush,
1298 	.create_gatt_table	= agp_generic_create_gatt_table,
1299 	.free_gatt_table	= agp_generic_free_gatt_table,
1300 	.insert_memory		= intel_i810_insert_entries,
1301 	.remove_memory		= intel_i810_remove_entries,
1302 	.alloc_by_type		= intel_i810_alloc_by_type,
1303 	.free_by_type		= intel_i810_free_by_type,
1304 	.agp_alloc_page		= agp_generic_alloc_page,
1305 	.agp_alloc_pages        = agp_generic_alloc_pages,
1306 	.agp_destroy_page	= agp_generic_destroy_page,
1307 	.agp_destroy_pages      = agp_generic_destroy_pages,
1308 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1309 };
1310 
1311 static const struct agp_bridge_driver intel_fake_agp_driver = {
1312 	.owner			= THIS_MODULE,
1313 	.size_type		= FIXED_APER_SIZE,
1314 	.aperture_sizes		= intel_fake_agp_sizes,
1315 	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1316 	.configure		= intel_fake_agp_configure,
1317 	.fetch_size		= intel_fake_agp_fetch_size,
1318 	.cleanup		= intel_gtt_cleanup,
1319 	.agp_enable		= intel_fake_agp_enable,
1320 	.cache_flush		= global_cache_flush,
1321 	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1322 	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1323 	.insert_memory		= intel_fake_agp_insert_entries,
1324 	.remove_memory		= intel_fake_agp_remove_entries,
1325 	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1326 	.free_by_type		= intel_i810_free_by_type,
1327 	.agp_alloc_page		= agp_generic_alloc_page,
1328 	.agp_alloc_pages        = agp_generic_alloc_pages,
1329 	.agp_destroy_page	= agp_generic_destroy_page,
1330 	.agp_destroy_pages      = agp_generic_destroy_pages,
1331 	.chipset_flush		= intel_fake_agp_chipset_flush,
1332 };
1333 
1334 static const struct intel_gtt_driver i81x_gtt_driver = {
1335 	.gen = 1,
1336 	.dma_mask_size = 32,
1337 };
1338 static const struct intel_gtt_driver i8xx_gtt_driver = {
1339 	.gen = 2,
1340 	.setup = i830_setup,
1341 	.cleanup = i830_cleanup,
1342 	.write_entry = i830_write_entry,
1343 	.dma_mask_size = 32,
1344 	.check_flags = i830_check_flags,
1345 	.chipset_flush = i830_chipset_flush,
1346 };
1347 static const struct intel_gtt_driver i915_gtt_driver = {
1348 	.gen = 3,
1349 	.setup = i9xx_setup,
1350 	.cleanup = i9xx_cleanup,
1351 	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1352 	.write_entry = i830_write_entry,
1353 	.dma_mask_size = 32,
1354 	.check_flags = i830_check_flags,
1355 	.chipset_flush = i9xx_chipset_flush,
1356 };
1357 static const struct intel_gtt_driver g33_gtt_driver = {
1358 	.gen = 3,
1359 	.is_g33 = 1,
1360 	.setup = i9xx_setup,
1361 	.cleanup = i9xx_cleanup,
1362 	.write_entry = i965_write_entry,
1363 	.dma_mask_size = 36,
1364 	.check_flags = i830_check_flags,
1365 	.chipset_flush = i9xx_chipset_flush,
1366 };
1367 static const struct intel_gtt_driver pineview_gtt_driver = {
1368 	.gen = 3,
1369 	.is_pineview = 1, .is_g33 = 1,
1370 	.setup = i9xx_setup,
1371 	.cleanup = i9xx_cleanup,
1372 	.write_entry = i965_write_entry,
1373 	.dma_mask_size = 36,
1374 	.check_flags = i830_check_flags,
1375 	.chipset_flush = i9xx_chipset_flush,
1376 };
1377 static const struct intel_gtt_driver i965_gtt_driver = {
1378 	.gen = 4,
1379 	.setup = i9xx_setup,
1380 	.cleanup = i9xx_cleanup,
1381 	.write_entry = i965_write_entry,
1382 	.dma_mask_size = 36,
1383 	.check_flags = i830_check_flags,
1384 	.chipset_flush = i9xx_chipset_flush,
1385 };
1386 static const struct intel_gtt_driver g4x_gtt_driver = {
1387 	.gen = 5,
1388 	.setup = i9xx_setup,
1389 	.cleanup = i9xx_cleanup,
1390 	.write_entry = i965_write_entry,
1391 	.dma_mask_size = 36,
1392 	.check_flags = i830_check_flags,
1393 	.chipset_flush = i9xx_chipset_flush,
1394 };
1395 static const struct intel_gtt_driver ironlake_gtt_driver = {
1396 	.gen = 5,
1397 	.is_ironlake = 1,
1398 	.setup = i9xx_setup,
1399 	.cleanup = i9xx_cleanup,
1400 	.write_entry = i965_write_entry,
1401 	.dma_mask_size = 36,
1402 	.check_flags = i830_check_flags,
1403 	.chipset_flush = i9xx_chipset_flush,
1404 };
1405 static const struct intel_gtt_driver sandybridge_gtt_driver = {
1406 	.gen = 6,
1407 	.setup = i9xx_setup,
1408 	.cleanup = gen6_cleanup,
1409 	.write_entry = gen6_write_entry,
1410 	.dma_mask_size = 40,
1411 	.check_flags = gen6_check_flags,
1412 	.chipset_flush = i9xx_chipset_flush,
1413 };
1414 
1415 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1416  * driver and gmch_driver must be non-null, and find_gmch will determine
1417  * which one should be used if a gmch_chip_id is present.
1418  */
1419 static const struct intel_gtt_driver_description {
1420 	unsigned int gmch_chip_id;
1421 	char *name;
1422 	const struct agp_bridge_driver *gmch_driver;
1423 	const struct intel_gtt_driver *gtt_driver;
1424 } intel_gtt_chipsets[] = {
1425 	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
1426 		&i81x_gtt_driver},
1427 	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
1428 		&i81x_gtt_driver},
1429 	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
1430 		&i81x_gtt_driver},
1431 	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
1432 		&i81x_gtt_driver},
1433 	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1434 		&intel_fake_agp_driver, &i8xx_gtt_driver},
1435 	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1436 		&intel_fake_agp_driver, &i8xx_gtt_driver},
1437 	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
1438 		&intel_fake_agp_driver, &i8xx_gtt_driver},
1439 	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1440 		&intel_fake_agp_driver, &i8xx_gtt_driver},
1441 	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
1442 		&intel_fake_agp_driver, &i8xx_gtt_driver},
1443 	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1444 		&intel_fake_agp_driver, &i915_gtt_driver },
1445 	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1446 		&intel_fake_agp_driver, &i915_gtt_driver },
1447 	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1448 		&intel_fake_agp_driver, &i915_gtt_driver },
1449 	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1450 		&intel_fake_agp_driver, &i915_gtt_driver },
1451 	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1452 		&intel_fake_agp_driver, &i915_gtt_driver },
1453 	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1454 		&intel_fake_agp_driver, &i915_gtt_driver },
1455 	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1456 		&intel_fake_agp_driver, &i965_gtt_driver },
1457 	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1458 		&intel_fake_agp_driver, &i965_gtt_driver },
1459 	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1460 		&intel_fake_agp_driver, &i965_gtt_driver },
1461 	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1462 		&intel_fake_agp_driver, &i965_gtt_driver },
1463 	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1464 		&intel_fake_agp_driver, &i965_gtt_driver },
1465 	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1466 		&intel_fake_agp_driver, &i965_gtt_driver },
1467 	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1468 		&intel_fake_agp_driver, &g33_gtt_driver },
1469 	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1470 		&intel_fake_agp_driver, &g33_gtt_driver },
1471 	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1472 		&intel_fake_agp_driver, &g33_gtt_driver },
1473 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1474 		&intel_fake_agp_driver, &pineview_gtt_driver },
1475 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1476 		&intel_fake_agp_driver, &pineview_gtt_driver },
1477 	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1478 		&intel_fake_agp_driver, &g4x_gtt_driver },
1479 	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1480 		&intel_fake_agp_driver, &g4x_gtt_driver },
1481 	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1482 		&intel_fake_agp_driver, &g4x_gtt_driver },
1483 	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1484 		&intel_fake_agp_driver, &g4x_gtt_driver },
1485 	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1486 		&intel_fake_agp_driver, &g4x_gtt_driver },
1487 	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1488 		&intel_fake_agp_driver, &g4x_gtt_driver },
1489 	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1490 		&intel_fake_agp_driver, &g4x_gtt_driver },
1491 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1492 	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1493 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1494 	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1495 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1496 	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1497 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1498 	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1499 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1500 	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1501 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1502 	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1503 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1504 	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1505 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1506 	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1507 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1508 	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1509 	{ 0, NULL, NULL }
1510 };
1511 
1512 static int find_gmch(u16 device)
1513 {
1514 	struct pci_dev *gmch_device;
1515 
1516 	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1517 	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1518 		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1519 					     device, gmch_device);
1520 	}
1521 
1522 	if (!gmch_device)
1523 		return 0;
1524 
1525 	intel_private.pcidev = gmch_device;
1526 	return 1;
1527 }
1528 
1529 int intel_gmch_probe(struct pci_dev *pdev,
1530 				      struct agp_bridge_data *bridge)
1531 {
1532 	int i, mask;
1533 	bridge->driver = NULL;
1534 
1535 	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1536 		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1537 			bridge->driver =
1538 				intel_gtt_chipsets[i].gmch_driver;
1539 			intel_private.driver =
1540 				intel_gtt_chipsets[i].gtt_driver;
1541 			break;
1542 		}
1543 	}
1544 
1545 	if (!bridge->driver)
1546 		return 0;
1547 
1548 	bridge->dev_private_data = &intel_private;
1549 	bridge->dev = pdev;
1550 
1551 	intel_private.bridge_dev = pci_dev_get(pdev);
1552 
1553 	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1554 
1555 	mask = intel_private.driver->dma_mask_size;
1556 	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1557 		dev_err(&intel_private.pcidev->dev,
1558 			"set gfx device dma mask %d-bit failed!\n", mask);
1559 	else
1560 		pci_set_consistent_dma_mask(intel_private.pcidev,
1561 					    DMA_BIT_MASK(mask));
1562 
1563 	if (bridge->driver == &intel_810_driver)
1564 		return 1;
1565 
1566 	if (intel_gtt_init() != 0)
1567 		return 0;
1568 
1569 	return 1;
1570 }
1571 EXPORT_SYMBOL(intel_gmch_probe);
1572 
1573 struct intel_gtt *intel_gtt_get(void)
1574 {
1575 	return &intel_private.base;
1576 }
1577 EXPORT_SYMBOL(intel_gtt_get);
1578 
1579 void intel_gmch_remove(struct pci_dev *pdev)
1580 {
1581 	if (intel_private.pcidev)
1582 		pci_dev_put(intel_private.pcidev);
1583 	if (intel_private.bridge_dev)
1584 		pci_dev_put(intel_private.bridge_dev);
1585 }
1586 EXPORT_SYMBOL(intel_gmch_remove);
1587 
1588 MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1589 MODULE_LICENSE("GPL and additional rights");
1590