xref: /openbmc/linux/drivers/char/agp/generic.c (revision f3539c12)
1 /*
2  * AGPGART driver.
3  * Copyright (C) 2004 Silicon Graphics, Inc.
4  * Copyright (C) 2002-2005 Dave Jones.
5  * Copyright (C) 1999 Jeff Hartmann.
6  * Copyright (C) 1999 Precision Insight, Inc.
7  * Copyright (C) 1999 Xi Graphics, Inc.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included
17  * in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25  * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * TODO:
28  * - Allocate more than order 0 pages to avoid too much linear map splitting.
29  */
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/pagemap.h>
33 #include <linux/miscdevice.h>
34 #include <linux/pm.h>
35 #include <linux/agp_backend.h>
36 #include <linux/vmalloc.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/mm.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <asm/io.h>
42 #include <asm/cacheflush.h>
43 #include <asm/pgtable.h>
44 #include "agp.h"
45 
46 __u32 *agp_gatt_table;
47 int agp_memory_reserved;
48 
49 /*
50  * Needed by the Nforce GART driver for the time being. Would be
51  * nice to do this some other way instead of needing this export.
52  */
53 EXPORT_SYMBOL_GPL(agp_memory_reserved);
54 
55 /*
56  * Generic routines for handling agp_memory structures -
57  * They use the basic page allocation routines to do the brunt of the work.
58  */
59 
60 void agp_free_key(int key)
61 {
62 	if (key < 0)
63 		return;
64 
65 	if (key < MAXKEY)
66 		clear_bit(key, agp_bridge->key_list);
67 }
68 EXPORT_SYMBOL(agp_free_key);
69 
70 
71 static int agp_get_key(void)
72 {
73 	int bit;
74 
75 	bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
76 	if (bit < MAXKEY) {
77 		set_bit(bit, agp_bridge->key_list);
78 		return bit;
79 	}
80 	return -1;
81 }
82 
83 /*
84  * Use kmalloc if possible for the page list. Otherwise fall back to
85  * vmalloc. This speeds things up and also saves memory for small AGP
86  * regions.
87  */
88 
89 void agp_alloc_page_array(size_t size, struct agp_memory *mem)
90 {
91 	mem->pages = NULL;
92 
93 	if (size <= 2*PAGE_SIZE)
94 		mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
95 	if (mem->pages == NULL) {
96 		mem->pages = vmalloc(size);
97 	}
98 }
99 EXPORT_SYMBOL(agp_alloc_page_array);
100 
101 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
102 {
103 	struct agp_memory *new;
104 	unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
105 
106 	if (INT_MAX/sizeof(struct page *) < num_agp_pages)
107 		return NULL;
108 
109 	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
110 	if (new == NULL)
111 		return NULL;
112 
113 	new->key = agp_get_key();
114 
115 	if (new->key < 0) {
116 		kfree(new);
117 		return NULL;
118 	}
119 
120 	agp_alloc_page_array(alloc_size, new);
121 
122 	if (new->pages == NULL) {
123 		agp_free_key(new->key);
124 		kfree(new);
125 		return NULL;
126 	}
127 	new->num_scratch_pages = 0;
128 	return new;
129 }
130 
131 struct agp_memory *agp_create_memory(int scratch_pages)
132 {
133 	struct agp_memory *new;
134 
135 	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
136 	if (new == NULL)
137 		return NULL;
138 
139 	new->key = agp_get_key();
140 
141 	if (new->key < 0) {
142 		kfree(new);
143 		return NULL;
144 	}
145 
146 	agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
147 
148 	if (new->pages == NULL) {
149 		agp_free_key(new->key);
150 		kfree(new);
151 		return NULL;
152 	}
153 	new->num_scratch_pages = scratch_pages;
154 	new->type = AGP_NORMAL_MEMORY;
155 	return new;
156 }
157 EXPORT_SYMBOL(agp_create_memory);
158 
159 /**
160  *	agp_free_memory - free memory associated with an agp_memory pointer.
161  *
162  *	@curr:		agp_memory pointer to be freed.
163  *
164  *	It is the only function that can be called when the backend is not owned
165  *	by the caller.  (So it can free memory on client death.)
166  */
167 void agp_free_memory(struct agp_memory *curr)
168 {
169 	size_t i;
170 
171 	if (curr == NULL)
172 		return;
173 
174 	if (curr->is_bound)
175 		agp_unbind_memory(curr);
176 
177 	if (curr->type >= AGP_USER_TYPES) {
178 		agp_generic_free_by_type(curr);
179 		return;
180 	}
181 
182 	if (curr->type != 0) {
183 		curr->bridge->driver->free_by_type(curr);
184 		return;
185 	}
186 	if (curr->page_count != 0) {
187 		if (curr->bridge->driver->agp_destroy_pages) {
188 			curr->bridge->driver->agp_destroy_pages(curr);
189 		} else {
190 
191 			for (i = 0; i < curr->page_count; i++) {
192 				curr->bridge->driver->agp_destroy_page(
193 					curr->pages[i],
194 					AGP_PAGE_DESTROY_UNMAP);
195 			}
196 			for (i = 0; i < curr->page_count; i++) {
197 				curr->bridge->driver->agp_destroy_page(
198 					curr->pages[i],
199 					AGP_PAGE_DESTROY_FREE);
200 			}
201 		}
202 	}
203 	agp_free_key(curr->key);
204 	agp_free_page_array(curr);
205 	kfree(curr);
206 }
207 EXPORT_SYMBOL(agp_free_memory);
208 
209 #define ENTRIES_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
210 
211 /**
212  *	agp_allocate_memory  -  allocate a group of pages of a certain type.
213  *
214  *	@page_count:	size_t argument of the number of pages
215  *	@type:	u32 argument of the type of memory to be allocated.
216  *
217  *	Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
218  *	maps to physical ram.  Any other type is device dependent.
219  *
220  *	It returns NULL whenever memory is unavailable.
221  */
222 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
223 					size_t page_count, u32 type)
224 {
225 	int scratch_pages;
226 	struct agp_memory *new;
227 	size_t i;
228 	int cur_memory;
229 
230 	if (!bridge)
231 		return NULL;
232 
233 	cur_memory = atomic_read(&bridge->current_memory_agp);
234 	if ((cur_memory + page_count > bridge->max_memory_agp) ||
235 	    (cur_memory + page_count < page_count))
236 		return NULL;
237 
238 	if (type >= AGP_USER_TYPES) {
239 		new = agp_generic_alloc_user(page_count, type);
240 		if (new)
241 			new->bridge = bridge;
242 		return new;
243 	}
244 
245 	if (type != 0) {
246 		new = bridge->driver->alloc_by_type(page_count, type);
247 		if (new)
248 			new->bridge = bridge;
249 		return new;
250 	}
251 
252 	scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
253 
254 	new = agp_create_memory(scratch_pages);
255 
256 	if (new == NULL)
257 		return NULL;
258 
259 	if (bridge->driver->agp_alloc_pages) {
260 		if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
261 			agp_free_memory(new);
262 			return NULL;
263 		}
264 		new->bridge = bridge;
265 		return new;
266 	}
267 
268 	for (i = 0; i < page_count; i++) {
269 		struct page *page = bridge->driver->agp_alloc_page(bridge);
270 
271 		if (page == NULL) {
272 			agp_free_memory(new);
273 			return NULL;
274 		}
275 		new->pages[i] = page;
276 		new->page_count++;
277 	}
278 	new->bridge = bridge;
279 
280 	return new;
281 }
282 EXPORT_SYMBOL(agp_allocate_memory);
283 
284 
285 /* End - Generic routines for handling agp_memory structures */
286 
287 
288 static int agp_return_size(void)
289 {
290 	int current_size;
291 	void *temp;
292 
293 	temp = agp_bridge->current_size;
294 
295 	switch (agp_bridge->driver->size_type) {
296 	case U8_APER_SIZE:
297 		current_size = A_SIZE_8(temp)->size;
298 		break;
299 	case U16_APER_SIZE:
300 		current_size = A_SIZE_16(temp)->size;
301 		break;
302 	case U32_APER_SIZE:
303 		current_size = A_SIZE_32(temp)->size;
304 		break;
305 	case LVL2_APER_SIZE:
306 		current_size = A_SIZE_LVL2(temp)->size;
307 		break;
308 	case FIXED_APER_SIZE:
309 		current_size = A_SIZE_FIX(temp)->size;
310 		break;
311 	default:
312 		current_size = 0;
313 		break;
314 	}
315 
316 	current_size -= (agp_memory_reserved / (1024*1024));
317 	if (current_size <0)
318 		current_size = 0;
319 	return current_size;
320 }
321 
322 
323 int agp_num_entries(void)
324 {
325 	int num_entries;
326 	void *temp;
327 
328 	temp = agp_bridge->current_size;
329 
330 	switch (agp_bridge->driver->size_type) {
331 	case U8_APER_SIZE:
332 		num_entries = A_SIZE_8(temp)->num_entries;
333 		break;
334 	case U16_APER_SIZE:
335 		num_entries = A_SIZE_16(temp)->num_entries;
336 		break;
337 	case U32_APER_SIZE:
338 		num_entries = A_SIZE_32(temp)->num_entries;
339 		break;
340 	case LVL2_APER_SIZE:
341 		num_entries = A_SIZE_LVL2(temp)->num_entries;
342 		break;
343 	case FIXED_APER_SIZE:
344 		num_entries = A_SIZE_FIX(temp)->num_entries;
345 		break;
346 	default:
347 		num_entries = 0;
348 		break;
349 	}
350 
351 	num_entries -= agp_memory_reserved>>PAGE_SHIFT;
352 	if (num_entries<0)
353 		num_entries = 0;
354 	return num_entries;
355 }
356 EXPORT_SYMBOL_GPL(agp_num_entries);
357 
358 
359 /**
360  *	agp_copy_info  -  copy bridge state information
361  *
362  *	@info:		agp_kern_info pointer.  The caller should insure that this pointer is valid.
363  *
364  *	This function copies information about the agp bridge device and the state of
365  *	the agp backend into an agp_kern_info pointer.
366  */
367 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
368 {
369 	memset(info, 0, sizeof(struct agp_kern_info));
370 	if (!bridge) {
371 		info->chipset = NOT_SUPPORTED;
372 		return -EIO;
373 	}
374 
375 	info->version.major = bridge->version->major;
376 	info->version.minor = bridge->version->minor;
377 	info->chipset = SUPPORTED;
378 	info->device = bridge->dev;
379 	if (bridge->mode & AGPSTAT_MODE_3_0)
380 		info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
381 	else
382 		info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
383 	info->aper_base = bridge->gart_bus_addr;
384 	info->aper_size = agp_return_size();
385 	info->max_memory = bridge->max_memory_agp;
386 	info->current_memory = atomic_read(&bridge->current_memory_agp);
387 	info->cant_use_aperture = bridge->driver->cant_use_aperture;
388 	info->vm_ops = bridge->vm_ops;
389 	info->page_mask = ~0UL;
390 	return 0;
391 }
392 EXPORT_SYMBOL(agp_copy_info);
393 
394 /* End - Routine to copy over information structure */
395 
396 /*
397  * Routines for handling swapping of agp_memory into the GATT -
398  * These routines take agp_memory and insert them into the GATT.
399  * They call device specific routines to actually write to the GATT.
400  */
401 
402 /**
403  *	agp_bind_memory  -  Bind an agp_memory structure into the GATT.
404  *
405  *	@curr:		agp_memory pointer
406  *	@pg_start:	an offset into the graphics aperture translation table
407  *
408  *	It returns -EINVAL if the pointer == NULL.
409  *	It returns -EBUSY if the area of the table requested is already in use.
410  */
411 int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
412 {
413 	int ret_val;
414 
415 	if (curr == NULL)
416 		return -EINVAL;
417 
418 	if (curr->is_bound) {
419 		printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
420 		return -EINVAL;
421 	}
422 	if (!curr->is_flushed) {
423 		curr->bridge->driver->cache_flush();
424 		curr->is_flushed = true;
425 	}
426 
427 	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
428 
429 	if (ret_val != 0)
430 		return ret_val;
431 
432 	curr->is_bound = true;
433 	curr->pg_start = pg_start;
434 	spin_lock(&agp_bridge->mapped_lock);
435 	list_add(&curr->mapped_list, &agp_bridge->mapped_list);
436 	spin_unlock(&agp_bridge->mapped_lock);
437 
438 	return 0;
439 }
440 EXPORT_SYMBOL(agp_bind_memory);
441 
442 
443 /**
444  *	agp_unbind_memory  -  Removes an agp_memory structure from the GATT
445  *
446  * @curr:	agp_memory pointer to be removed from the GATT.
447  *
448  * It returns -EINVAL if this piece of agp_memory is not currently bound to
449  * the graphics aperture translation table or if the agp_memory pointer == NULL
450  */
451 int agp_unbind_memory(struct agp_memory *curr)
452 {
453 	int ret_val;
454 
455 	if (curr == NULL)
456 		return -EINVAL;
457 
458 	if (!curr->is_bound) {
459 		printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
460 		return -EINVAL;
461 	}
462 
463 	ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
464 
465 	if (ret_val != 0)
466 		return ret_val;
467 
468 	curr->is_bound = false;
469 	curr->pg_start = 0;
470 	spin_lock(&curr->bridge->mapped_lock);
471 	list_del(&curr->mapped_list);
472 	spin_unlock(&curr->bridge->mapped_lock);
473 	return 0;
474 }
475 EXPORT_SYMBOL(agp_unbind_memory);
476 
477 
478 /* End - Routines for handling swapping of agp_memory into the GATT */
479 
480 
481 /* Generic Agp routines - Start */
482 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
483 {
484 	u32 tmp;
485 
486 	if (*requested_mode & AGP2_RESERVED_MASK) {
487 		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
488 			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
489 		*requested_mode &= ~AGP2_RESERVED_MASK;
490 	}
491 
492 	/*
493 	 * Some dumb bridges are programmed to disobey the AGP2 spec.
494 	 * This is likely a BIOS misprogramming rather than poweron default, or
495 	 * it would be a lot more common.
496 	 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
497 	 * AGPv2 spec 6.1.9 states:
498 	 *   The RATE field indicates the data transfer rates supported by this
499 	 *   device. A.G.P. devices must report all that apply.
500 	 * Fix them up as best we can.
501 	 */
502 	switch (*bridge_agpstat & 7) {
503 	case 4:
504 		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
505 		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
506 			"Fixing up support for x2 & x1\n");
507 		break;
508 	case 2:
509 		*bridge_agpstat |= AGPSTAT2_1X;
510 		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
511 			"Fixing up support for x1\n");
512 		break;
513 	default:
514 		break;
515 	}
516 
517 	/* Check the speed bits make sense. Only one should be set. */
518 	tmp = *requested_mode & 7;
519 	switch (tmp) {
520 		case 0:
521 			printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
522 			*requested_mode |= AGPSTAT2_1X;
523 			break;
524 		case 1:
525 		case 2:
526 			break;
527 		case 3:
528 			*requested_mode &= ~(AGPSTAT2_1X);	/* rate=2 */
529 			break;
530 		case 4:
531 			break;
532 		case 5:
533 		case 6:
534 		case 7:
535 			*requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
536 			break;
537 	}
538 
539 	/* disable SBA if it's not supported */
540 	if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
541 		*bridge_agpstat &= ~AGPSTAT_SBA;
542 
543 	/* Set rate */
544 	if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
545 		*bridge_agpstat &= ~AGPSTAT2_4X;
546 
547 	if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
548 		*bridge_agpstat &= ~AGPSTAT2_2X;
549 
550 	if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
551 		*bridge_agpstat &= ~AGPSTAT2_1X;
552 
553 	/* Now we know what mode it should be, clear out the unwanted bits. */
554 	if (*bridge_agpstat & AGPSTAT2_4X)
555 		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);	/* 4X */
556 
557 	if (*bridge_agpstat & AGPSTAT2_2X)
558 		*bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);	/* 2X */
559 
560 	if (*bridge_agpstat & AGPSTAT2_1X)
561 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);	/* 1X */
562 
563 	/* Apply any errata. */
564 	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
565 		*bridge_agpstat &= ~AGPSTAT_FW;
566 
567 	if (agp_bridge->flags & AGP_ERRATA_SBA)
568 		*bridge_agpstat &= ~AGPSTAT_SBA;
569 
570 	if (agp_bridge->flags & AGP_ERRATA_1X) {
571 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
572 		*bridge_agpstat |= AGPSTAT2_1X;
573 	}
574 
575 	/* If we've dropped down to 1X, disable fast writes. */
576 	if (*bridge_agpstat & AGPSTAT2_1X)
577 		*bridge_agpstat &= ~AGPSTAT_FW;
578 }
579 
580 /*
581  * requested_mode = Mode requested by (typically) X.
582  * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
583  * vga_agpstat = PCI_AGP_STATUS from graphic card.
584  */
585 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
586 {
587 	u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
588 	u32 tmp;
589 
590 	if (*requested_mode & AGP3_RESERVED_MASK) {
591 		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
592 			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
593 		*requested_mode &= ~AGP3_RESERVED_MASK;
594 	}
595 
596 	/* Check the speed bits make sense. */
597 	tmp = *requested_mode & 7;
598 	if (tmp == 0) {
599 		printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
600 		*requested_mode |= AGPSTAT3_4X;
601 	}
602 	if (tmp >= 3) {
603 		printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
604 		*requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
605 	}
606 
607 	/* ARQSZ - Set the value to the maximum one.
608 	 * Don't allow the mode register to override values. */
609 	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
610 		max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
611 
612 	/* Calibration cycle.
613 	 * Don't allow the mode register to override values. */
614 	*bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
615 		min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
616 
617 	/* SBA *must* be supported for AGP v3 */
618 	*bridge_agpstat |= AGPSTAT_SBA;
619 
620 	/*
621 	 * Set speed.
622 	 * Check for invalid speeds. This can happen when applications
623 	 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
624 	 */
625 	if (*requested_mode & AGPSTAT_MODE_3_0) {
626 		/*
627 		 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
628 		 * have been passed a 3.0 mode, but with 2.x speed bits set.
629 		 * AGP2.x 4x -> AGP3.0 4x.
630 		 */
631 		if (*requested_mode & AGPSTAT2_4X) {
632 			printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
633 						current->comm, *requested_mode);
634 			*requested_mode &= ~AGPSTAT2_4X;
635 			*requested_mode |= AGPSTAT3_4X;
636 		}
637 	} else {
638 		/*
639 		 * The caller doesn't know what they are doing. We are in 3.0 mode,
640 		 * but have been passed an AGP 2.x mode.
641 		 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
642 		 */
643 		printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
644 					current->comm, *requested_mode);
645 		*requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
646 		*requested_mode |= AGPSTAT3_4X;
647 	}
648 
649 	if (*requested_mode & AGPSTAT3_8X) {
650 		if (!(*bridge_agpstat & AGPSTAT3_8X)) {
651 			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
652 			*bridge_agpstat |= AGPSTAT3_4X;
653 			printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
654 			return;
655 		}
656 		if (!(*vga_agpstat & AGPSTAT3_8X)) {
657 			*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
658 			*bridge_agpstat |= AGPSTAT3_4X;
659 			printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
660 			return;
661 		}
662 		/* All set, bridge & device can do AGP x8*/
663 		*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
664 		goto done;
665 
666 	} else if (*requested_mode & AGPSTAT3_4X) {
667 		*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
668 		*bridge_agpstat |= AGPSTAT3_4X;
669 		goto done;
670 
671 	} else {
672 
673 		/*
674 		 * If we didn't specify an AGP mode, we see if both
675 		 * the graphics card, and the bridge can do x8, and use if so.
676 		 * If not, we fall back to x4 mode.
677 		 */
678 		if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
679 			printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
680 				"supported by bridge & card (x8).\n");
681 			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
682 			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
683 		} else {
684 			printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
685 			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
686 				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
687 					*bridge_agpstat, origbridge);
688 				*bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
689 				*bridge_agpstat |= AGPSTAT3_4X;
690 			}
691 			if (!(*vga_agpstat & AGPSTAT3_8X)) {
692 				printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
693 					*vga_agpstat, origvga);
694 				*vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
695 				*vga_agpstat |= AGPSTAT3_4X;
696 			}
697 		}
698 	}
699 
700 done:
701 	/* Apply any errata. */
702 	if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
703 		*bridge_agpstat &= ~AGPSTAT_FW;
704 
705 	if (agp_bridge->flags & AGP_ERRATA_SBA)
706 		*bridge_agpstat &= ~AGPSTAT_SBA;
707 
708 	if (agp_bridge->flags & AGP_ERRATA_1X) {
709 		*bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
710 		*bridge_agpstat |= AGPSTAT2_1X;
711 	}
712 }
713 
714 
715 /**
716  * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
717  * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
718  * @requested_mode: requested agp_stat from userspace (Typically from X)
719  * @bridge_agpstat: current agp_stat from AGP bridge.
720  *
721  * This function will hunt for an AGP graphics card, and try to match
722  * the requested mode to the capabilities of both the bridge and the card.
723  */
724 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
725 {
726 	struct pci_dev *device = NULL;
727 	u32 vga_agpstat;
728 	u8 cap_ptr;
729 
730 	for (;;) {
731 		device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
732 		if (!device) {
733 			printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
734 			return 0;
735 		}
736 		cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
737 		if (cap_ptr)
738 			break;
739 	}
740 
741 	/*
742 	 * Ok, here we have a AGP device. Disable impossible
743 	 * settings, and adjust the readqueue to the minimum.
744 	 */
745 	pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
746 
747 	/* adjust RQ depth */
748 	bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
749 	     min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
750 		 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
751 
752 	/* disable FW if it's not supported */
753 	if (!((bridge_agpstat & AGPSTAT_FW) &&
754 		 (vga_agpstat & AGPSTAT_FW) &&
755 		 (requested_mode & AGPSTAT_FW)))
756 		bridge_agpstat &= ~AGPSTAT_FW;
757 
758 	/* Check to see if we are operating in 3.0 mode */
759 	if (agp_bridge->mode & AGPSTAT_MODE_3_0)
760 		agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
761 	else
762 		agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
763 
764 	pci_dev_put(device);
765 	return bridge_agpstat;
766 }
767 EXPORT_SYMBOL(agp_collect_device_status);
768 
769 
770 void agp_device_command(u32 bridge_agpstat, bool agp_v3)
771 {
772 	struct pci_dev *device = NULL;
773 	int mode;
774 
775 	mode = bridge_agpstat & 0x7;
776 	if (agp_v3)
777 		mode *= 4;
778 
779 	for_each_pci_dev(device) {
780 		u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
781 		if (!agp)
782 			continue;
783 
784 		dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
785 			 agp_v3 ? 3 : 2, mode);
786 		pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
787 	}
788 }
789 EXPORT_SYMBOL(agp_device_command);
790 
791 
792 void get_agp_version(struct agp_bridge_data *bridge)
793 {
794 	u32 ncapid;
795 
796 	/* Exit early if already set by errata workarounds. */
797 	if (bridge->major_version != 0)
798 		return;
799 
800 	pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
801 	bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
802 	bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
803 }
804 EXPORT_SYMBOL(get_agp_version);
805 
806 
807 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
808 {
809 	u32 bridge_agpstat, temp;
810 
811 	get_agp_version(agp_bridge);
812 
813 	dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
814 		 agp_bridge->major_version, agp_bridge->minor_version);
815 
816 	pci_read_config_dword(agp_bridge->dev,
817 		      agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
818 
819 	bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
820 	if (bridge_agpstat == 0)
821 		/* Something bad happened. FIXME: Return error code? */
822 		return;
823 
824 	bridge_agpstat |= AGPSTAT_AGP_ENABLE;
825 
826 	/* Do AGP version specific frobbing. */
827 	if (bridge->major_version >= 3) {
828 		if (bridge->mode & AGPSTAT_MODE_3_0) {
829 			/* If we have 3.5, we can do the isoch stuff. */
830 			if (bridge->minor_version >= 5)
831 				agp_3_5_enable(bridge);
832 			agp_device_command(bridge_agpstat, true);
833 			return;
834 		} else {
835 		    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
836 		    bridge_agpstat &= ~(7<<10) ;
837 		    pci_read_config_dword(bridge->dev,
838 					bridge->capndx+AGPCTRL, &temp);
839 		    temp |= (1<<9);
840 		    pci_write_config_dword(bridge->dev,
841 					bridge->capndx+AGPCTRL, temp);
842 
843 		    dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
844 		}
845 	}
846 
847 	/* AGP v<3 */
848 	agp_device_command(bridge_agpstat, false);
849 }
850 EXPORT_SYMBOL(agp_generic_enable);
851 
852 
853 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
854 {
855 	char *table;
856 	char *table_end;
857 	int size;
858 	int page_order;
859 	int num_entries;
860 	int i;
861 	void *temp;
862 	struct page *page;
863 
864 	/* The generic routines can't handle 2 level gatt's */
865 	if (bridge->driver->size_type == LVL2_APER_SIZE)
866 		return -EINVAL;
867 
868 	table = NULL;
869 	i = bridge->aperture_size_idx;
870 	temp = bridge->current_size;
871 	size = page_order = num_entries = 0;
872 
873 	if (bridge->driver->size_type != FIXED_APER_SIZE) {
874 		do {
875 			switch (bridge->driver->size_type) {
876 			case U8_APER_SIZE:
877 				size = A_SIZE_8(temp)->size;
878 				page_order =
879 				    A_SIZE_8(temp)->page_order;
880 				num_entries =
881 				    A_SIZE_8(temp)->num_entries;
882 				break;
883 			case U16_APER_SIZE:
884 				size = A_SIZE_16(temp)->size;
885 				page_order = A_SIZE_16(temp)->page_order;
886 				num_entries = A_SIZE_16(temp)->num_entries;
887 				break;
888 			case U32_APER_SIZE:
889 				size = A_SIZE_32(temp)->size;
890 				page_order = A_SIZE_32(temp)->page_order;
891 				num_entries = A_SIZE_32(temp)->num_entries;
892 				break;
893 				/* This case will never really happen. */
894 			case FIXED_APER_SIZE:
895 			case LVL2_APER_SIZE:
896 			default:
897 				size = page_order = num_entries = 0;
898 				break;
899 			}
900 
901 			table = alloc_gatt_pages(page_order);
902 
903 			if (table == NULL) {
904 				i++;
905 				switch (bridge->driver->size_type) {
906 				case U8_APER_SIZE:
907 					bridge->current_size = A_IDX8(bridge);
908 					break;
909 				case U16_APER_SIZE:
910 					bridge->current_size = A_IDX16(bridge);
911 					break;
912 				case U32_APER_SIZE:
913 					bridge->current_size = A_IDX32(bridge);
914 					break;
915 				/* These cases will never really happen. */
916 				case FIXED_APER_SIZE:
917 				case LVL2_APER_SIZE:
918 				default:
919 					break;
920 				}
921 				temp = bridge->current_size;
922 			} else {
923 				bridge->aperture_size_idx = i;
924 			}
925 		} while (!table && (i < bridge->driver->num_aperture_sizes));
926 	} else {
927 		size = ((struct aper_size_info_fixed *) temp)->size;
928 		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
929 		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
930 		table = alloc_gatt_pages(page_order);
931 	}
932 
933 	if (table == NULL)
934 		return -ENOMEM;
935 
936 	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
937 
938 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
939 		SetPageReserved(page);
940 
941 	bridge->gatt_table_real = (u32 *) table;
942 	agp_gatt_table = (void *)table;
943 
944 	bridge->driver->cache_flush();
945 #ifdef CONFIG_X86
946 	if (set_memory_uc((unsigned long)table, 1 << page_order))
947 		printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
948 
949 	bridge->gatt_table = (u32 __iomem *)table;
950 #else
951 	bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
952 					(PAGE_SIZE * (1 << page_order)));
953 	bridge->driver->cache_flush();
954 #endif
955 
956 	if (bridge->gatt_table == NULL) {
957 		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
958 			ClearPageReserved(page);
959 
960 		free_gatt_pages(table, page_order);
961 
962 		return -ENOMEM;
963 	}
964 	bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
965 
966 	/* AK: bogus, should encode addresses > 4GB */
967 	for (i = 0; i < num_entries; i++) {
968 		writel(bridge->scratch_page, bridge->gatt_table+i);
969 		readl(bridge->gatt_table+i);	/* PCI Posting. */
970 	}
971 
972 	return 0;
973 }
974 EXPORT_SYMBOL(agp_generic_create_gatt_table);
975 
976 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
977 {
978 	int page_order;
979 	char *table, *table_end;
980 	void *temp;
981 	struct page *page;
982 
983 	temp = bridge->current_size;
984 
985 	switch (bridge->driver->size_type) {
986 	case U8_APER_SIZE:
987 		page_order = A_SIZE_8(temp)->page_order;
988 		break;
989 	case U16_APER_SIZE:
990 		page_order = A_SIZE_16(temp)->page_order;
991 		break;
992 	case U32_APER_SIZE:
993 		page_order = A_SIZE_32(temp)->page_order;
994 		break;
995 	case FIXED_APER_SIZE:
996 		page_order = A_SIZE_FIX(temp)->page_order;
997 		break;
998 	case LVL2_APER_SIZE:
999 		/* The generic routines can't deal with 2 level gatt's */
1000 		return -EINVAL;
1001 	default:
1002 		page_order = 0;
1003 		break;
1004 	}
1005 
1006 	/* Do not worry about freeing memory, because if this is
1007 	 * called, then all agp memory is deallocated and removed
1008 	 * from the table. */
1009 
1010 #ifdef CONFIG_X86
1011 	set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1012 #else
1013 	iounmap(bridge->gatt_table);
1014 #endif
1015 	table = (char *) bridge->gatt_table_real;
1016 	table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1017 
1018 	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1019 		ClearPageReserved(page);
1020 
1021 	free_gatt_pages(bridge->gatt_table_real, page_order);
1022 
1023 	agp_gatt_table = NULL;
1024 	bridge->gatt_table = NULL;
1025 	bridge->gatt_table_real = NULL;
1026 	bridge->gatt_bus_addr = 0;
1027 
1028 	return 0;
1029 }
1030 EXPORT_SYMBOL(agp_generic_free_gatt_table);
1031 
1032 
1033 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1034 {
1035 	int num_entries;
1036 	size_t i;
1037 	off_t j;
1038 	void *temp;
1039 	struct agp_bridge_data *bridge;
1040 	int mask_type;
1041 
1042 	bridge = mem->bridge;
1043 	if (!bridge)
1044 		return -EINVAL;
1045 
1046 	if (mem->page_count == 0)
1047 		return 0;
1048 
1049 	temp = bridge->current_size;
1050 
1051 	switch (bridge->driver->size_type) {
1052 	case U8_APER_SIZE:
1053 		num_entries = A_SIZE_8(temp)->num_entries;
1054 		break;
1055 	case U16_APER_SIZE:
1056 		num_entries = A_SIZE_16(temp)->num_entries;
1057 		break;
1058 	case U32_APER_SIZE:
1059 		num_entries = A_SIZE_32(temp)->num_entries;
1060 		break;
1061 	case FIXED_APER_SIZE:
1062 		num_entries = A_SIZE_FIX(temp)->num_entries;
1063 		break;
1064 	case LVL2_APER_SIZE:
1065 		/* The generic routines can't deal with 2 level gatt's */
1066 		return -EINVAL;
1067 	default:
1068 		num_entries = 0;
1069 		break;
1070 	}
1071 
1072 	num_entries -= agp_memory_reserved/PAGE_SIZE;
1073 	if (num_entries < 0) num_entries = 0;
1074 
1075 	if (type != mem->type)
1076 		return -EINVAL;
1077 
1078 	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1079 	if (mask_type != 0) {
1080 		/* The generic routines know nothing of memory types */
1081 		return -EINVAL;
1082 	}
1083 
1084 	if (((pg_start + mem->page_count) > num_entries) ||
1085 	    ((pg_start + mem->page_count) < pg_start))
1086 		return -EINVAL;
1087 
1088 	j = pg_start;
1089 
1090 	while (j < (pg_start + mem->page_count)) {
1091 		if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1092 			return -EBUSY;
1093 		j++;
1094 	}
1095 
1096 	if (!mem->is_flushed) {
1097 		bridge->driver->cache_flush();
1098 		mem->is_flushed = true;
1099 	}
1100 
1101 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1102 		writel(bridge->driver->mask_memory(bridge,
1103 						   page_to_phys(mem->pages[i]),
1104 						   mask_type),
1105 		       bridge->gatt_table+j);
1106 	}
1107 	readl(bridge->gatt_table+j-1);	/* PCI Posting. */
1108 
1109 	bridge->driver->tlb_flush(mem);
1110 	return 0;
1111 }
1112 EXPORT_SYMBOL(agp_generic_insert_memory);
1113 
1114 
1115 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1116 {
1117 	size_t i;
1118 	struct agp_bridge_data *bridge;
1119 	int mask_type, num_entries;
1120 
1121 	bridge = mem->bridge;
1122 	if (!bridge)
1123 		return -EINVAL;
1124 
1125 	if (mem->page_count == 0)
1126 		return 0;
1127 
1128 	if (type != mem->type)
1129 		return -EINVAL;
1130 
1131 	num_entries = agp_num_entries();
1132 	if (((pg_start + mem->page_count) > num_entries) ||
1133 	    ((pg_start + mem->page_count) < pg_start))
1134 		return -EINVAL;
1135 
1136 	mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1137 	if (mask_type != 0) {
1138 		/* The generic routines know nothing of memory types */
1139 		return -EINVAL;
1140 	}
1141 
1142 	/* AK: bogus, should encode addresses > 4GB */
1143 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1144 		writel(bridge->scratch_page, bridge->gatt_table+i);
1145 	}
1146 	readl(bridge->gatt_table+i-1);	/* PCI Posting. */
1147 
1148 	bridge->driver->tlb_flush(mem);
1149 	return 0;
1150 }
1151 EXPORT_SYMBOL(agp_generic_remove_memory);
1152 
1153 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1154 {
1155 	return NULL;
1156 }
1157 EXPORT_SYMBOL(agp_generic_alloc_by_type);
1158 
1159 void agp_generic_free_by_type(struct agp_memory *curr)
1160 {
1161 	agp_free_page_array(curr);
1162 	agp_free_key(curr->key);
1163 	kfree(curr);
1164 }
1165 EXPORT_SYMBOL(agp_generic_free_by_type);
1166 
1167 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1168 {
1169 	struct agp_memory *new;
1170 	int i;
1171 	int pages;
1172 
1173 	pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1174 	new = agp_create_user_memory(page_count);
1175 	if (new == NULL)
1176 		return NULL;
1177 
1178 	for (i = 0; i < page_count; i++)
1179 		new->pages[i] = NULL;
1180 	new->page_count = 0;
1181 	new->type = type;
1182 	new->num_scratch_pages = pages;
1183 
1184 	return new;
1185 }
1186 EXPORT_SYMBOL(agp_generic_alloc_user);
1187 
1188 /*
1189  * Basic Page Allocation Routines -
1190  * These routines handle page allocation and by default they reserve the allocated
1191  * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1192  * against a maximum value.
1193  */
1194 
1195 int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1196 {
1197 	struct page * page;
1198 	int i, ret = -ENOMEM;
1199 
1200 	for (i = 0; i < num_pages; i++) {
1201 		page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1202 		/* agp_free_memory() needs gart address */
1203 		if (page == NULL)
1204 			goto out;
1205 
1206 #ifndef CONFIG_X86
1207 		map_page_into_agp(page);
1208 #endif
1209 		get_page(page);
1210 		atomic_inc(&agp_bridge->current_memory_agp);
1211 
1212 		mem->pages[i] = page;
1213 		mem->page_count++;
1214 	}
1215 
1216 #ifdef CONFIG_X86
1217 	set_pages_array_uc(mem->pages, num_pages);
1218 #endif
1219 	ret = 0;
1220 out:
1221 	return ret;
1222 }
1223 EXPORT_SYMBOL(agp_generic_alloc_pages);
1224 
1225 struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1226 {
1227 	struct page * page;
1228 
1229 	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1230 	if (page == NULL)
1231 		return NULL;
1232 
1233 	map_page_into_agp(page);
1234 
1235 	get_page(page);
1236 	atomic_inc(&agp_bridge->current_memory_agp);
1237 	return page;
1238 }
1239 EXPORT_SYMBOL(agp_generic_alloc_page);
1240 
1241 void agp_generic_destroy_pages(struct agp_memory *mem)
1242 {
1243 	int i;
1244 	struct page *page;
1245 
1246 	if (!mem)
1247 		return;
1248 
1249 #ifdef CONFIG_X86
1250 	set_pages_array_wb(mem->pages, mem->page_count);
1251 #endif
1252 
1253 	for (i = 0; i < mem->page_count; i++) {
1254 		page = mem->pages[i];
1255 
1256 #ifndef CONFIG_X86
1257 		unmap_page_from_agp(page);
1258 #endif
1259 		put_page(page);
1260 		__free_page(page);
1261 		atomic_dec(&agp_bridge->current_memory_agp);
1262 		mem->pages[i] = NULL;
1263 	}
1264 }
1265 EXPORT_SYMBOL(agp_generic_destroy_pages);
1266 
1267 void agp_generic_destroy_page(struct page *page, int flags)
1268 {
1269 	if (page == NULL)
1270 		return;
1271 
1272 	if (flags & AGP_PAGE_DESTROY_UNMAP)
1273 		unmap_page_from_agp(page);
1274 
1275 	if (flags & AGP_PAGE_DESTROY_FREE) {
1276 		put_page(page);
1277 		__free_page(page);
1278 		atomic_dec(&agp_bridge->current_memory_agp);
1279 	}
1280 }
1281 EXPORT_SYMBOL(agp_generic_destroy_page);
1282 
1283 /* End Basic Page Allocation Routines */
1284 
1285 
1286 /**
1287  * agp_enable  -  initialise the agp point-to-point connection.
1288  *
1289  * @mode:	agp mode register value to configure with.
1290  */
1291 void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1292 {
1293 	if (!bridge)
1294 		return;
1295 	bridge->driver->agp_enable(bridge, mode);
1296 }
1297 EXPORT_SYMBOL(agp_enable);
1298 
1299 /* When we remove the global variable agp_bridge from all drivers
1300  * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1301  */
1302 
1303 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1304 {
1305 	if (list_empty(&agp_bridges))
1306 		return NULL;
1307 
1308 	return agp_bridge;
1309 }
1310 
1311 static void ipi_handler(void *null)
1312 {
1313 	flush_agp_cache();
1314 }
1315 
1316 void global_cache_flush(void)
1317 {
1318 	if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1319 		panic(PFX "timed out waiting for the other CPUs!\n");
1320 }
1321 EXPORT_SYMBOL(global_cache_flush);
1322 
1323 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1324 				      dma_addr_t addr, int type)
1325 {
1326 	/* memory type is ignored in the generic routine */
1327 	if (bridge->driver->masks)
1328 		return addr | bridge->driver->masks[0].mask;
1329 	else
1330 		return addr;
1331 }
1332 EXPORT_SYMBOL(agp_generic_mask_memory);
1333 
1334 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1335 				  int type)
1336 {
1337 	if (type >= AGP_USER_TYPES)
1338 		return 0;
1339 	return type;
1340 }
1341 EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1342 
1343 /*
1344  * These functions are implemented according to the AGPv3 spec,
1345  * which covers implementation details that had previously been
1346  * left open.
1347  */
1348 
1349 int agp3_generic_fetch_size(void)
1350 {
1351 	u16 temp_size;
1352 	int i;
1353 	struct aper_size_info_16 *values;
1354 
1355 	pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1356 	values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1357 
1358 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1359 		if (temp_size == values[i].size_value) {
1360 			agp_bridge->previous_size =
1361 				agp_bridge->current_size = (void *) (values + i);
1362 
1363 			agp_bridge->aperture_size_idx = i;
1364 			return values[i].size;
1365 		}
1366 	}
1367 	return 0;
1368 }
1369 EXPORT_SYMBOL(agp3_generic_fetch_size);
1370 
1371 void agp3_generic_tlbflush(struct agp_memory *mem)
1372 {
1373 	u32 ctrl;
1374 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1375 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1376 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1377 }
1378 EXPORT_SYMBOL(agp3_generic_tlbflush);
1379 
1380 int agp3_generic_configure(void)
1381 {
1382 	u32 temp;
1383 	struct aper_size_info_16 *current_size;
1384 
1385 	current_size = A_SIZE_16(agp_bridge->current_size);
1386 
1387 	agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
1388 						    AGP_APERTURE_BAR);
1389 
1390 	/* set aperture size */
1391 	pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1392 	/* set gart pointer */
1393 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1394 	/* enable aperture and GTLB */
1395 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1396 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1397 	return 0;
1398 }
1399 EXPORT_SYMBOL(agp3_generic_configure);
1400 
1401 void agp3_generic_cleanup(void)
1402 {
1403 	u32 ctrl;
1404 	pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1405 	pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1406 }
1407 EXPORT_SYMBOL(agp3_generic_cleanup);
1408 
1409 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1410 {
1411 	{4096, 1048576, 10,0x000},
1412 	{2048,  524288, 9, 0x800},
1413 	{1024,  262144, 8, 0xc00},
1414 	{ 512,  131072, 7, 0xe00},
1415 	{ 256,   65536, 6, 0xf00},
1416 	{ 128,   32768, 5, 0xf20},
1417 	{  64,   16384, 4, 0xf30},
1418 	{  32,    8192, 3, 0xf38},
1419 	{  16,    4096, 2, 0xf3c},
1420 	{   8,    2048, 1, 0xf3e},
1421 	{   4,    1024, 0, 0xf3f}
1422 };
1423 EXPORT_SYMBOL(agp3_generic_sizes);
1424 
1425