1 /*
2 * AGPGART driver.
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2005 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * TODO:
28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 */
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/pagemap.h>
33 #include <linux/miscdevice.h>
34 #include <linux/pm.h>
35 #include <linux/agp_backend.h>
36 #include <linux/vmalloc.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/mm.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <asm/io.h>
42 #ifdef CONFIG_X86
43 #include <asm/set_memory.h>
44 #endif
45 #include "agp.h"
46
47 __u32 *agp_gatt_table;
48 int agp_memory_reserved;
49
50 /*
51 * Needed by the Nforce GART driver for the time being. Would be
52 * nice to do this some other way instead of needing this export.
53 */
54 EXPORT_SYMBOL_GPL(agp_memory_reserved);
55
56 /*
57 * Generic routines for handling agp_memory structures -
58 * They use the basic page allocation routines to do the brunt of the work.
59 */
60
agp_free_key(int key)61 void agp_free_key(int key)
62 {
63 if (key < 0)
64 return;
65
66 if (key < MAXKEY)
67 clear_bit(key, agp_bridge->key_list);
68 }
69 EXPORT_SYMBOL(agp_free_key);
70
71
agp_get_key(void)72 static int agp_get_key(void)
73 {
74 int bit;
75
76 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
77 if (bit < MAXKEY) {
78 set_bit(bit, agp_bridge->key_list);
79 return bit;
80 }
81 return -1;
82 }
83
84 /*
85 * Use kmalloc if possible for the page list. Otherwise fall back to
86 * vmalloc. This speeds things up and also saves memory for small AGP
87 * regions.
88 */
89
agp_alloc_page_array(size_t size,struct agp_memory * mem)90 void agp_alloc_page_array(size_t size, struct agp_memory *mem)
91 {
92 mem->pages = kvmalloc(size, GFP_KERNEL);
93 }
94 EXPORT_SYMBOL(agp_alloc_page_array);
95
agp_create_user_memory(unsigned long num_agp_pages)96 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
97 {
98 struct agp_memory *new;
99 unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
100
101 if (INT_MAX/sizeof(struct page *) < num_agp_pages)
102 return NULL;
103
104 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
105 if (new == NULL)
106 return NULL;
107
108 new->key = agp_get_key();
109
110 if (new->key < 0) {
111 kfree(new);
112 return NULL;
113 }
114
115 agp_alloc_page_array(alloc_size, new);
116
117 if (new->pages == NULL) {
118 agp_free_key(new->key);
119 kfree(new);
120 return NULL;
121 }
122 new->num_scratch_pages = 0;
123 return new;
124 }
125
agp_create_memory(int scratch_pages)126 struct agp_memory *agp_create_memory(int scratch_pages)
127 {
128 struct agp_memory *new;
129
130 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
131 if (new == NULL)
132 return NULL;
133
134 new->key = agp_get_key();
135
136 if (new->key < 0) {
137 kfree(new);
138 return NULL;
139 }
140
141 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
142
143 if (new->pages == NULL) {
144 agp_free_key(new->key);
145 kfree(new);
146 return NULL;
147 }
148 new->num_scratch_pages = scratch_pages;
149 new->type = AGP_NORMAL_MEMORY;
150 return new;
151 }
152 EXPORT_SYMBOL(agp_create_memory);
153
154 /**
155 * agp_free_memory - free memory associated with an agp_memory pointer.
156 *
157 * @curr: agp_memory pointer to be freed.
158 *
159 * It is the only function that can be called when the backend is not owned
160 * by the caller. (So it can free memory on client death.)
161 */
agp_free_memory(struct agp_memory * curr)162 void agp_free_memory(struct agp_memory *curr)
163 {
164 size_t i;
165
166 if (curr == NULL)
167 return;
168
169 if (curr->is_bound)
170 agp_unbind_memory(curr);
171
172 if (curr->type >= AGP_USER_TYPES) {
173 agp_generic_free_by_type(curr);
174 return;
175 }
176
177 if (curr->type != 0) {
178 curr->bridge->driver->free_by_type(curr);
179 return;
180 }
181 if (curr->page_count != 0) {
182 if (curr->bridge->driver->agp_destroy_pages) {
183 curr->bridge->driver->agp_destroy_pages(curr);
184 } else {
185
186 for (i = 0; i < curr->page_count; i++) {
187 curr->bridge->driver->agp_destroy_page(
188 curr->pages[i],
189 AGP_PAGE_DESTROY_UNMAP);
190 }
191 for (i = 0; i < curr->page_count; i++) {
192 curr->bridge->driver->agp_destroy_page(
193 curr->pages[i],
194 AGP_PAGE_DESTROY_FREE);
195 }
196 }
197 }
198 agp_free_key(curr->key);
199 agp_free_page_array(curr);
200 kfree(curr);
201 }
202 EXPORT_SYMBOL(agp_free_memory);
203
204 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
205
206 /**
207 * agp_allocate_memory - allocate a group of pages of a certain type.
208 *
209 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
210 * @page_count: size_t argument of the number of pages
211 * @type: u32 argument of the type of memory to be allocated.
212 *
213 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
214 * maps to physical ram. Any other type is device dependent.
215 *
216 * It returns NULL whenever memory is unavailable.
217 */
agp_allocate_memory(struct agp_bridge_data * bridge,size_t page_count,u32 type)218 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
219 size_t page_count, u32 type)
220 {
221 int scratch_pages;
222 struct agp_memory *new;
223 size_t i;
224 int cur_memory;
225
226 if (!bridge)
227 return NULL;
228
229 cur_memory = atomic_read(&bridge->current_memory_agp);
230 if ((cur_memory + page_count > bridge->max_memory_agp) ||
231 (cur_memory + page_count < page_count))
232 return NULL;
233
234 if (type >= AGP_USER_TYPES) {
235 new = agp_generic_alloc_user(page_count, type);
236 if (new)
237 new->bridge = bridge;
238 return new;
239 }
240
241 if (type != 0) {
242 new = bridge->driver->alloc_by_type(page_count, type);
243 if (new)
244 new->bridge = bridge;
245 return new;
246 }
247
248 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
249
250 new = agp_create_memory(scratch_pages);
251
252 if (new == NULL)
253 return NULL;
254
255 if (bridge->driver->agp_alloc_pages) {
256 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
257 agp_free_memory(new);
258 return NULL;
259 }
260 new->bridge = bridge;
261 return new;
262 }
263
264 for (i = 0; i < page_count; i++) {
265 struct page *page = bridge->driver->agp_alloc_page(bridge);
266
267 if (page == NULL) {
268 agp_free_memory(new);
269 return NULL;
270 }
271 new->pages[i] = page;
272 new->page_count++;
273 }
274 new->bridge = bridge;
275
276 return new;
277 }
278 EXPORT_SYMBOL(agp_allocate_memory);
279
280
281 /* End - Generic routines for handling agp_memory structures */
282
283
agp_return_size(void)284 static int agp_return_size(void)
285 {
286 int current_size;
287 void *temp;
288
289 temp = agp_bridge->current_size;
290
291 switch (agp_bridge->driver->size_type) {
292 case U8_APER_SIZE:
293 current_size = A_SIZE_8(temp)->size;
294 break;
295 case U16_APER_SIZE:
296 current_size = A_SIZE_16(temp)->size;
297 break;
298 case U32_APER_SIZE:
299 current_size = A_SIZE_32(temp)->size;
300 break;
301 case LVL2_APER_SIZE:
302 current_size = A_SIZE_LVL2(temp)->size;
303 break;
304 case FIXED_APER_SIZE:
305 current_size = A_SIZE_FIX(temp)->size;
306 break;
307 default:
308 current_size = 0;
309 break;
310 }
311
312 current_size -= (agp_memory_reserved / (1024*1024));
313 if (current_size <0)
314 current_size = 0;
315 return current_size;
316 }
317
318
agp_num_entries(void)319 int agp_num_entries(void)
320 {
321 int num_entries;
322 void *temp;
323
324 temp = agp_bridge->current_size;
325
326 switch (agp_bridge->driver->size_type) {
327 case U8_APER_SIZE:
328 num_entries = A_SIZE_8(temp)->num_entries;
329 break;
330 case U16_APER_SIZE:
331 num_entries = A_SIZE_16(temp)->num_entries;
332 break;
333 case U32_APER_SIZE:
334 num_entries = A_SIZE_32(temp)->num_entries;
335 break;
336 case LVL2_APER_SIZE:
337 num_entries = A_SIZE_LVL2(temp)->num_entries;
338 break;
339 case FIXED_APER_SIZE:
340 num_entries = A_SIZE_FIX(temp)->num_entries;
341 break;
342 default:
343 num_entries = 0;
344 break;
345 }
346
347 num_entries -= agp_memory_reserved>>PAGE_SHIFT;
348 if (num_entries<0)
349 num_entries = 0;
350 return num_entries;
351 }
352 EXPORT_SYMBOL_GPL(agp_num_entries);
353
354
355 /**
356 * agp_copy_info - copy bridge state information
357 *
358 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
359 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
360 *
361 * This function copies information about the agp bridge device and the state of
362 * the agp backend into an agp_kern_info pointer.
363 */
agp_copy_info(struct agp_bridge_data * bridge,struct agp_kern_info * info)364 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
365 {
366 memset(info, 0, sizeof(struct agp_kern_info));
367 if (!bridge) {
368 info->chipset = NOT_SUPPORTED;
369 return -EIO;
370 }
371
372 info->version.major = bridge->version->major;
373 info->version.minor = bridge->version->minor;
374 info->chipset = SUPPORTED;
375 info->device = bridge->dev;
376 if (bridge->mode & AGPSTAT_MODE_3_0)
377 info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
378 else
379 info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
380 info->aper_base = bridge->gart_bus_addr;
381 info->aper_size = agp_return_size();
382 info->max_memory = bridge->max_memory_agp;
383 info->current_memory = atomic_read(&bridge->current_memory_agp);
384 info->cant_use_aperture = bridge->driver->cant_use_aperture;
385 info->vm_ops = bridge->vm_ops;
386 info->page_mask = ~0UL;
387 return 0;
388 }
389 EXPORT_SYMBOL(agp_copy_info);
390
391 /* End - Routine to copy over information structure */
392
393 /*
394 * Routines for handling swapping of agp_memory into the GATT -
395 * These routines take agp_memory and insert them into the GATT.
396 * They call device specific routines to actually write to the GATT.
397 */
398
399 /**
400 * agp_bind_memory - Bind an agp_memory structure into the GATT.
401 *
402 * @curr: agp_memory pointer
403 * @pg_start: an offset into the graphics aperture translation table
404 *
405 * It returns -EINVAL if the pointer == NULL.
406 * It returns -EBUSY if the area of the table requested is already in use.
407 */
agp_bind_memory(struct agp_memory * curr,off_t pg_start)408 int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
409 {
410 int ret_val;
411
412 if (curr == NULL)
413 return -EINVAL;
414
415 if (curr->is_bound) {
416 printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
417 return -EINVAL;
418 }
419 if (!curr->is_flushed) {
420 curr->bridge->driver->cache_flush();
421 curr->is_flushed = true;
422 }
423
424 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
425
426 if (ret_val != 0)
427 return ret_val;
428
429 curr->is_bound = true;
430 curr->pg_start = pg_start;
431 spin_lock(&agp_bridge->mapped_lock);
432 list_add(&curr->mapped_list, &agp_bridge->mapped_list);
433 spin_unlock(&agp_bridge->mapped_lock);
434
435 return 0;
436 }
437 EXPORT_SYMBOL(agp_bind_memory);
438
439
440 /**
441 * agp_unbind_memory - Removes an agp_memory structure from the GATT
442 *
443 * @curr: agp_memory pointer to be removed from the GATT.
444 *
445 * It returns -EINVAL if this piece of agp_memory is not currently bound to
446 * the graphics aperture translation table or if the agp_memory pointer == NULL
447 */
agp_unbind_memory(struct agp_memory * curr)448 int agp_unbind_memory(struct agp_memory *curr)
449 {
450 int ret_val;
451
452 if (curr == NULL)
453 return -EINVAL;
454
455 if (!curr->is_bound) {
456 printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
457 return -EINVAL;
458 }
459
460 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
461
462 if (ret_val != 0)
463 return ret_val;
464
465 curr->is_bound = false;
466 curr->pg_start = 0;
467 spin_lock(&curr->bridge->mapped_lock);
468 list_del(&curr->mapped_list);
469 spin_unlock(&curr->bridge->mapped_lock);
470 return 0;
471 }
472 EXPORT_SYMBOL(agp_unbind_memory);
473
474
475 /* End - Routines for handling swapping of agp_memory into the GATT */
476
477
478 /* Generic Agp routines - Start */
agp_v2_parse_one(u32 * requested_mode,u32 * bridge_agpstat,u32 * vga_agpstat)479 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
480 {
481 u32 tmp;
482
483 if (*requested_mode & AGP2_RESERVED_MASK) {
484 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
485 *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
486 *requested_mode &= ~AGP2_RESERVED_MASK;
487 }
488
489 /*
490 * Some dumb bridges are programmed to disobey the AGP2 spec.
491 * This is likely a BIOS misprogramming rather than poweron default, or
492 * it would be a lot more common.
493 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
494 * AGPv2 spec 6.1.9 states:
495 * The RATE field indicates the data transfer rates supported by this
496 * device. A.G.P. devices must report all that apply.
497 * Fix them up as best we can.
498 */
499 switch (*bridge_agpstat & 7) {
500 case 4:
501 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
502 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
503 "Fixing up support for x2 & x1\n");
504 break;
505 case 2:
506 *bridge_agpstat |= AGPSTAT2_1X;
507 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
508 "Fixing up support for x1\n");
509 break;
510 default:
511 break;
512 }
513
514 /* Check the speed bits make sense. Only one should be set. */
515 tmp = *requested_mode & 7;
516 switch (tmp) {
517 case 0:
518 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
519 *requested_mode |= AGPSTAT2_1X;
520 break;
521 case 1:
522 case 2:
523 break;
524 case 3:
525 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
526 break;
527 case 4:
528 break;
529 case 5:
530 case 6:
531 case 7:
532 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
533 break;
534 }
535
536 /* disable SBA if it's not supported */
537 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
538 *bridge_agpstat &= ~AGPSTAT_SBA;
539
540 /* Set rate */
541 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
542 *bridge_agpstat &= ~AGPSTAT2_4X;
543
544 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
545 *bridge_agpstat &= ~AGPSTAT2_2X;
546
547 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
548 *bridge_agpstat &= ~AGPSTAT2_1X;
549
550 /* Now we know what mode it should be, clear out the unwanted bits. */
551 if (*bridge_agpstat & AGPSTAT2_4X)
552 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
553
554 if (*bridge_agpstat & AGPSTAT2_2X)
555 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
556
557 if (*bridge_agpstat & AGPSTAT2_1X)
558 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
559
560 /* Apply any errata. */
561 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
562 *bridge_agpstat &= ~AGPSTAT_FW;
563
564 if (agp_bridge->flags & AGP_ERRATA_SBA)
565 *bridge_agpstat &= ~AGPSTAT_SBA;
566
567 if (agp_bridge->flags & AGP_ERRATA_1X) {
568 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
569 *bridge_agpstat |= AGPSTAT2_1X;
570 }
571
572 /* If we've dropped down to 1X, disable fast writes. */
573 if (*bridge_agpstat & AGPSTAT2_1X)
574 *bridge_agpstat &= ~AGPSTAT_FW;
575 }
576
577 /*
578 * requested_mode = Mode requested by (typically) X.
579 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
580 * vga_agpstat = PCI_AGP_STATUS from graphic card.
581 */
agp_v3_parse_one(u32 * requested_mode,u32 * bridge_agpstat,u32 * vga_agpstat)582 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
583 {
584 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
585 u32 tmp;
586
587 if (*requested_mode & AGP3_RESERVED_MASK) {
588 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
589 *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
590 *requested_mode &= ~AGP3_RESERVED_MASK;
591 }
592
593 /* Check the speed bits make sense. */
594 tmp = *requested_mode & 7;
595 if (tmp == 0) {
596 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
597 *requested_mode |= AGPSTAT3_4X;
598 }
599 if (tmp >= 3) {
600 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
601 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
602 }
603
604 /* ARQSZ - Set the value to the maximum one.
605 * Don't allow the mode register to override values. */
606 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
607 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
608
609 /* Calibration cycle.
610 * Don't allow the mode register to override values. */
611 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
612 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
613
614 /* SBA *must* be supported for AGP v3 */
615 *bridge_agpstat |= AGPSTAT_SBA;
616
617 /*
618 * Set speed.
619 * Check for invalid speeds. This can happen when applications
620 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
621 */
622 if (*requested_mode & AGPSTAT_MODE_3_0) {
623 /*
624 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
625 * have been passed a 3.0 mode, but with 2.x speed bits set.
626 * AGP2.x 4x -> AGP3.0 4x.
627 */
628 if (*requested_mode & AGPSTAT2_4X) {
629 printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
630 current->comm, *requested_mode);
631 *requested_mode &= ~AGPSTAT2_4X;
632 *requested_mode |= AGPSTAT3_4X;
633 }
634 } else {
635 /*
636 * The caller doesn't know what they are doing. We are in 3.0 mode,
637 * but have been passed an AGP 2.x mode.
638 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
639 */
640 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
641 current->comm, *requested_mode);
642 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
643 *requested_mode |= AGPSTAT3_4X;
644 }
645
646 if (*requested_mode & AGPSTAT3_8X) {
647 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
648 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
649 *bridge_agpstat |= AGPSTAT3_4X;
650 printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
651 return;
652 }
653 if (!(*vga_agpstat & AGPSTAT3_8X)) {
654 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
655 *bridge_agpstat |= AGPSTAT3_4X;
656 printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
657 return;
658 }
659 /* All set, bridge & device can do AGP x8*/
660 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
661 goto done;
662
663 } else if (*requested_mode & AGPSTAT3_4X) {
664 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
665 *bridge_agpstat |= AGPSTAT3_4X;
666 goto done;
667
668 } else {
669
670 /*
671 * If we didn't specify an AGP mode, we see if both
672 * the graphics card, and the bridge can do x8, and use if so.
673 * If not, we fall back to x4 mode.
674 */
675 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
676 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
677 "supported by bridge & card (x8).\n");
678 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
679 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
680 } else {
681 printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
682 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
683 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
684 *bridge_agpstat, origbridge);
685 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
686 *bridge_agpstat |= AGPSTAT3_4X;
687 }
688 if (!(*vga_agpstat & AGPSTAT3_8X)) {
689 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
690 *vga_agpstat, origvga);
691 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
692 *vga_agpstat |= AGPSTAT3_4X;
693 }
694 }
695 }
696
697 done:
698 /* Apply any errata. */
699 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
700 *bridge_agpstat &= ~AGPSTAT_FW;
701
702 if (agp_bridge->flags & AGP_ERRATA_SBA)
703 *bridge_agpstat &= ~AGPSTAT_SBA;
704
705 if (agp_bridge->flags & AGP_ERRATA_1X) {
706 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
707 *bridge_agpstat |= AGPSTAT2_1X;
708 }
709 }
710
711
712 /**
713 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
714 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
715 * @requested_mode: requested agp_stat from userspace (Typically from X)
716 * @bridge_agpstat: current agp_stat from AGP bridge.
717 *
718 * This function will hunt for an AGP graphics card, and try to match
719 * the requested mode to the capabilities of both the bridge and the card.
720 */
agp_collect_device_status(struct agp_bridge_data * bridge,u32 requested_mode,u32 bridge_agpstat)721 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
722 {
723 struct pci_dev *device = NULL;
724 u32 vga_agpstat;
725 u8 cap_ptr;
726
727 for (;;) {
728 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
729 if (!device) {
730 printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
731 return 0;
732 }
733 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
734 if (cap_ptr)
735 break;
736 }
737
738 /*
739 * Ok, here we have a AGP device. Disable impossible
740 * settings, and adjust the readqueue to the minimum.
741 */
742 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
743
744 /* adjust RQ depth */
745 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
746 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
747 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
748
749 /* disable FW if it's not supported */
750 if (!((bridge_agpstat & AGPSTAT_FW) &&
751 (vga_agpstat & AGPSTAT_FW) &&
752 (requested_mode & AGPSTAT_FW)))
753 bridge_agpstat &= ~AGPSTAT_FW;
754
755 /* Check to see if we are operating in 3.0 mode */
756 if (agp_bridge->mode & AGPSTAT_MODE_3_0)
757 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
758 else
759 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
760
761 pci_dev_put(device);
762 return bridge_agpstat;
763 }
764 EXPORT_SYMBOL(agp_collect_device_status);
765
766
agp_device_command(u32 bridge_agpstat,bool agp_v3)767 void agp_device_command(u32 bridge_agpstat, bool agp_v3)
768 {
769 struct pci_dev *device = NULL;
770 int mode;
771
772 mode = bridge_agpstat & 0x7;
773 if (agp_v3)
774 mode *= 4;
775
776 for_each_pci_dev(device) {
777 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
778 if (!agp)
779 continue;
780
781 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
782 agp_v3 ? 3 : 2, mode);
783 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
784 }
785 }
786 EXPORT_SYMBOL(agp_device_command);
787
788
get_agp_version(struct agp_bridge_data * bridge)789 void get_agp_version(struct agp_bridge_data *bridge)
790 {
791 u32 ncapid;
792
793 /* Exit early if already set by errata workarounds. */
794 if (bridge->major_version != 0)
795 return;
796
797 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
798 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
799 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
800 }
801 EXPORT_SYMBOL(get_agp_version);
802
803
agp_generic_enable(struct agp_bridge_data * bridge,u32 requested_mode)804 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
805 {
806 u32 bridge_agpstat, temp;
807
808 get_agp_version(agp_bridge);
809
810 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
811 agp_bridge->major_version, agp_bridge->minor_version);
812
813 pci_read_config_dword(agp_bridge->dev,
814 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
815
816 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
817 if (bridge_agpstat == 0)
818 /* Something bad happened. FIXME: Return error code? */
819 return;
820
821 bridge_agpstat |= AGPSTAT_AGP_ENABLE;
822
823 /* Do AGP version specific frobbing. */
824 if (bridge->major_version >= 3) {
825 if (bridge->mode & AGPSTAT_MODE_3_0) {
826 /* If we have 3.5, we can do the isoch stuff. */
827 if (bridge->minor_version >= 5)
828 agp_3_5_enable(bridge);
829 agp_device_command(bridge_agpstat, true);
830 return;
831 } else {
832 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
833 bridge_agpstat &= ~(7<<10) ;
834 pci_read_config_dword(bridge->dev,
835 bridge->capndx+AGPCTRL, &temp);
836 temp |= (1<<9);
837 pci_write_config_dword(bridge->dev,
838 bridge->capndx+AGPCTRL, temp);
839
840 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
841 }
842 }
843
844 /* AGP v<3 */
845 agp_device_command(bridge_agpstat, false);
846 }
847 EXPORT_SYMBOL(agp_generic_enable);
848
849
agp_generic_create_gatt_table(struct agp_bridge_data * bridge)850 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
851 {
852 char *table;
853 char *table_end;
854 int page_order;
855 int num_entries;
856 int i;
857 void *temp;
858 struct page *page;
859
860 /* The generic routines can't handle 2 level gatt's */
861 if (bridge->driver->size_type == LVL2_APER_SIZE)
862 return -EINVAL;
863
864 table = NULL;
865 i = bridge->aperture_size_idx;
866 temp = bridge->current_size;
867 page_order = num_entries = 0;
868
869 if (bridge->driver->size_type != FIXED_APER_SIZE) {
870 do {
871 switch (bridge->driver->size_type) {
872 case U8_APER_SIZE:
873 page_order =
874 A_SIZE_8(temp)->page_order;
875 num_entries =
876 A_SIZE_8(temp)->num_entries;
877 break;
878 case U16_APER_SIZE:
879 page_order = A_SIZE_16(temp)->page_order;
880 num_entries = A_SIZE_16(temp)->num_entries;
881 break;
882 case U32_APER_SIZE:
883 page_order = A_SIZE_32(temp)->page_order;
884 num_entries = A_SIZE_32(temp)->num_entries;
885 break;
886 /* This case will never really happen. */
887 case FIXED_APER_SIZE:
888 case LVL2_APER_SIZE:
889 default:
890 page_order = num_entries = 0;
891 break;
892 }
893
894 table = alloc_gatt_pages(page_order);
895
896 if (table == NULL) {
897 i++;
898 switch (bridge->driver->size_type) {
899 case U8_APER_SIZE:
900 bridge->current_size = A_IDX8(bridge);
901 break;
902 case U16_APER_SIZE:
903 bridge->current_size = A_IDX16(bridge);
904 break;
905 case U32_APER_SIZE:
906 bridge->current_size = A_IDX32(bridge);
907 break;
908 /* These cases will never really happen. */
909 case FIXED_APER_SIZE:
910 case LVL2_APER_SIZE:
911 default:
912 break;
913 }
914 temp = bridge->current_size;
915 } else {
916 bridge->aperture_size_idx = i;
917 }
918 } while (!table && (i < bridge->driver->num_aperture_sizes));
919 } else {
920 page_order = ((struct aper_size_info_fixed *) temp)->page_order;
921 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
922 table = alloc_gatt_pages(page_order);
923 }
924
925 if (table == NULL)
926 return -ENOMEM;
927
928 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
929
930 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
931 SetPageReserved(page);
932
933 bridge->gatt_table_real = (u32 *) table;
934 agp_gatt_table = (void *)table;
935
936 bridge->driver->cache_flush();
937 #ifdef CONFIG_X86
938 if (set_memory_uc((unsigned long)table, 1 << page_order))
939 printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
940
941 bridge->gatt_table = (u32 __iomem *)table;
942 #else
943 bridge->gatt_table = ioremap(virt_to_phys(table),
944 (PAGE_SIZE * (1 << page_order)));
945 bridge->driver->cache_flush();
946 #endif
947
948 if (bridge->gatt_table == NULL) {
949 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
950 ClearPageReserved(page);
951
952 free_gatt_pages(table, page_order);
953
954 return -ENOMEM;
955 }
956 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
957
958 /* AK: bogus, should encode addresses > 4GB */
959 for (i = 0; i < num_entries; i++) {
960 writel(bridge->scratch_page, bridge->gatt_table+i);
961 readl(bridge->gatt_table+i); /* PCI Posting. */
962 }
963
964 return 0;
965 }
966 EXPORT_SYMBOL(agp_generic_create_gatt_table);
967
agp_generic_free_gatt_table(struct agp_bridge_data * bridge)968 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
969 {
970 int page_order;
971 char *table, *table_end;
972 void *temp;
973 struct page *page;
974
975 temp = bridge->current_size;
976
977 switch (bridge->driver->size_type) {
978 case U8_APER_SIZE:
979 page_order = A_SIZE_8(temp)->page_order;
980 break;
981 case U16_APER_SIZE:
982 page_order = A_SIZE_16(temp)->page_order;
983 break;
984 case U32_APER_SIZE:
985 page_order = A_SIZE_32(temp)->page_order;
986 break;
987 case FIXED_APER_SIZE:
988 page_order = A_SIZE_FIX(temp)->page_order;
989 break;
990 case LVL2_APER_SIZE:
991 /* The generic routines can't deal with 2 level gatt's */
992 return -EINVAL;
993 default:
994 page_order = 0;
995 break;
996 }
997
998 /* Do not worry about freeing memory, because if this is
999 * called, then all agp memory is deallocated and removed
1000 * from the table. */
1001
1002 #ifdef CONFIG_X86
1003 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1004 #else
1005 iounmap(bridge->gatt_table);
1006 #endif
1007 table = (char *) bridge->gatt_table_real;
1008 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1009
1010 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1011 ClearPageReserved(page);
1012
1013 free_gatt_pages(bridge->gatt_table_real, page_order);
1014
1015 agp_gatt_table = NULL;
1016 bridge->gatt_table = NULL;
1017 bridge->gatt_table_real = NULL;
1018 bridge->gatt_bus_addr = 0;
1019
1020 return 0;
1021 }
1022 EXPORT_SYMBOL(agp_generic_free_gatt_table);
1023
1024
agp_generic_insert_memory(struct agp_memory * mem,off_t pg_start,int type)1025 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1026 {
1027 int num_entries;
1028 size_t i;
1029 off_t j;
1030 void *temp;
1031 struct agp_bridge_data *bridge;
1032 int mask_type;
1033
1034 bridge = mem->bridge;
1035 if (!bridge)
1036 return -EINVAL;
1037
1038 if (mem->page_count == 0)
1039 return 0;
1040
1041 temp = bridge->current_size;
1042
1043 switch (bridge->driver->size_type) {
1044 case U8_APER_SIZE:
1045 num_entries = A_SIZE_8(temp)->num_entries;
1046 break;
1047 case U16_APER_SIZE:
1048 num_entries = A_SIZE_16(temp)->num_entries;
1049 break;
1050 case U32_APER_SIZE:
1051 num_entries = A_SIZE_32(temp)->num_entries;
1052 break;
1053 case FIXED_APER_SIZE:
1054 num_entries = A_SIZE_FIX(temp)->num_entries;
1055 break;
1056 case LVL2_APER_SIZE:
1057 /* The generic routines can't deal with 2 level gatt's */
1058 return -EINVAL;
1059 default:
1060 num_entries = 0;
1061 break;
1062 }
1063
1064 num_entries -= agp_memory_reserved/PAGE_SIZE;
1065 if (num_entries < 0) num_entries = 0;
1066
1067 if (type != mem->type)
1068 return -EINVAL;
1069
1070 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1071 if (mask_type != 0) {
1072 /* The generic routines know nothing of memory types */
1073 return -EINVAL;
1074 }
1075
1076 if (((pg_start + mem->page_count) > num_entries) ||
1077 ((pg_start + mem->page_count) < pg_start))
1078 return -EINVAL;
1079
1080 j = pg_start;
1081
1082 while (j < (pg_start + mem->page_count)) {
1083 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1084 return -EBUSY;
1085 j++;
1086 }
1087
1088 if (!mem->is_flushed) {
1089 bridge->driver->cache_flush();
1090 mem->is_flushed = true;
1091 }
1092
1093 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1094 writel(bridge->driver->mask_memory(bridge,
1095 page_to_phys(mem->pages[i]),
1096 mask_type),
1097 bridge->gatt_table+j);
1098 }
1099 readl(bridge->gatt_table+j-1); /* PCI Posting. */
1100
1101 bridge->driver->tlb_flush(mem);
1102 return 0;
1103 }
1104 EXPORT_SYMBOL(agp_generic_insert_memory);
1105
1106
agp_generic_remove_memory(struct agp_memory * mem,off_t pg_start,int type)1107 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1108 {
1109 size_t i;
1110 struct agp_bridge_data *bridge;
1111 int mask_type, num_entries;
1112
1113 bridge = mem->bridge;
1114 if (!bridge)
1115 return -EINVAL;
1116
1117 if (mem->page_count == 0)
1118 return 0;
1119
1120 if (type != mem->type)
1121 return -EINVAL;
1122
1123 num_entries = agp_num_entries();
1124 if (((pg_start + mem->page_count) > num_entries) ||
1125 ((pg_start + mem->page_count) < pg_start))
1126 return -EINVAL;
1127
1128 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1129 if (mask_type != 0) {
1130 /* The generic routines know nothing of memory types */
1131 return -EINVAL;
1132 }
1133
1134 /* AK: bogus, should encode addresses > 4GB */
1135 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1136 writel(bridge->scratch_page, bridge->gatt_table+i);
1137 }
1138 readl(bridge->gatt_table+i-1); /* PCI Posting. */
1139
1140 bridge->driver->tlb_flush(mem);
1141 return 0;
1142 }
1143 EXPORT_SYMBOL(agp_generic_remove_memory);
1144
agp_generic_alloc_by_type(size_t page_count,int type)1145 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1146 {
1147 return NULL;
1148 }
1149 EXPORT_SYMBOL(agp_generic_alloc_by_type);
1150
agp_generic_free_by_type(struct agp_memory * curr)1151 void agp_generic_free_by_type(struct agp_memory *curr)
1152 {
1153 agp_free_page_array(curr);
1154 agp_free_key(curr->key);
1155 kfree(curr);
1156 }
1157 EXPORT_SYMBOL(agp_generic_free_by_type);
1158
agp_generic_alloc_user(size_t page_count,int type)1159 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1160 {
1161 struct agp_memory *new;
1162 int i;
1163 int pages;
1164
1165 pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1166 new = agp_create_user_memory(page_count);
1167 if (new == NULL)
1168 return NULL;
1169
1170 for (i = 0; i < page_count; i++)
1171 new->pages[i] = NULL;
1172 new->page_count = 0;
1173 new->type = type;
1174 new->num_scratch_pages = pages;
1175
1176 return new;
1177 }
1178 EXPORT_SYMBOL(agp_generic_alloc_user);
1179
1180 /*
1181 * Basic Page Allocation Routines -
1182 * These routines handle page allocation and by default they reserve the allocated
1183 * memory. They also handle incrementing the current_memory_agp value, Which is checked
1184 * against a maximum value.
1185 */
1186
agp_generic_alloc_pages(struct agp_bridge_data * bridge,struct agp_memory * mem,size_t num_pages)1187 int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
1188 {
1189 struct page * page;
1190 int i, ret = -ENOMEM;
1191
1192 for (i = 0; i < num_pages; i++) {
1193 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1194 /* agp_free_memory() needs gart address */
1195 if (page == NULL)
1196 goto out;
1197
1198 #ifndef CONFIG_X86
1199 map_page_into_agp(page);
1200 #endif
1201 get_page(page);
1202 atomic_inc(&agp_bridge->current_memory_agp);
1203
1204 mem->pages[i] = page;
1205 mem->page_count++;
1206 }
1207
1208 #ifdef CONFIG_X86
1209 set_pages_array_uc(mem->pages, num_pages);
1210 #endif
1211 ret = 0;
1212 out:
1213 return ret;
1214 }
1215 EXPORT_SYMBOL(agp_generic_alloc_pages);
1216
agp_generic_alloc_page(struct agp_bridge_data * bridge)1217 struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1218 {
1219 struct page * page;
1220
1221 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1222 if (page == NULL)
1223 return NULL;
1224
1225 map_page_into_agp(page);
1226
1227 get_page(page);
1228 atomic_inc(&agp_bridge->current_memory_agp);
1229 return page;
1230 }
1231 EXPORT_SYMBOL(agp_generic_alloc_page);
1232
agp_generic_destroy_pages(struct agp_memory * mem)1233 void agp_generic_destroy_pages(struct agp_memory *mem)
1234 {
1235 int i;
1236 struct page *page;
1237
1238 if (!mem)
1239 return;
1240
1241 #ifdef CONFIG_X86
1242 set_pages_array_wb(mem->pages, mem->page_count);
1243 #endif
1244
1245 for (i = 0; i < mem->page_count; i++) {
1246 page = mem->pages[i];
1247
1248 #ifndef CONFIG_X86
1249 unmap_page_from_agp(page);
1250 #endif
1251 put_page(page);
1252 __free_page(page);
1253 atomic_dec(&agp_bridge->current_memory_agp);
1254 mem->pages[i] = NULL;
1255 }
1256 }
1257 EXPORT_SYMBOL(agp_generic_destroy_pages);
1258
agp_generic_destroy_page(struct page * page,int flags)1259 void agp_generic_destroy_page(struct page *page, int flags)
1260 {
1261 if (page == NULL)
1262 return;
1263
1264 if (flags & AGP_PAGE_DESTROY_UNMAP)
1265 unmap_page_from_agp(page);
1266
1267 if (flags & AGP_PAGE_DESTROY_FREE) {
1268 put_page(page);
1269 __free_page(page);
1270 atomic_dec(&agp_bridge->current_memory_agp);
1271 }
1272 }
1273 EXPORT_SYMBOL(agp_generic_destroy_page);
1274
1275 /* End Basic Page Allocation Routines */
1276
1277
1278 /**
1279 * agp_enable - initialise the agp point-to-point connection.
1280 *
1281 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
1282 * @mode: agp mode register value to configure with.
1283 */
agp_enable(struct agp_bridge_data * bridge,u32 mode)1284 void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1285 {
1286 if (!bridge)
1287 return;
1288 bridge->driver->agp_enable(bridge, mode);
1289 }
1290 EXPORT_SYMBOL(agp_enable);
1291
1292 /* When we remove the global variable agp_bridge from all drivers
1293 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1294 */
1295
agp_generic_find_bridge(struct pci_dev * pdev)1296 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1297 {
1298 if (list_empty(&agp_bridges))
1299 return NULL;
1300
1301 return agp_bridge;
1302 }
1303
ipi_handler(void * null)1304 static void ipi_handler(void *null)
1305 {
1306 flush_agp_cache();
1307 }
1308
global_cache_flush(void)1309 void global_cache_flush(void)
1310 {
1311 on_each_cpu(ipi_handler, NULL, 1);
1312 }
1313 EXPORT_SYMBOL(global_cache_flush);
1314
agp_generic_mask_memory(struct agp_bridge_data * bridge,dma_addr_t addr,int type)1315 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1316 dma_addr_t addr, int type)
1317 {
1318 /* memory type is ignored in the generic routine */
1319 if (bridge->driver->masks)
1320 return addr | bridge->driver->masks[0].mask;
1321 else
1322 return addr;
1323 }
1324 EXPORT_SYMBOL(agp_generic_mask_memory);
1325
agp_generic_type_to_mask_type(struct agp_bridge_data * bridge,int type)1326 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1327 int type)
1328 {
1329 if (type >= AGP_USER_TYPES)
1330 return 0;
1331 return type;
1332 }
1333 EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1334
1335 /*
1336 * These functions are implemented according to the AGPv3 spec,
1337 * which covers implementation details that had previously been
1338 * left open.
1339 */
1340
agp3_generic_fetch_size(void)1341 int agp3_generic_fetch_size(void)
1342 {
1343 u16 temp_size;
1344 int i;
1345 struct aper_size_info_16 *values;
1346
1347 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1348 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1349
1350 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1351 if (temp_size == values[i].size_value) {
1352 agp_bridge->previous_size =
1353 agp_bridge->current_size = (void *) (values + i);
1354
1355 agp_bridge->aperture_size_idx = i;
1356 return values[i].size;
1357 }
1358 }
1359 return 0;
1360 }
1361 EXPORT_SYMBOL(agp3_generic_fetch_size);
1362
agp3_generic_tlbflush(struct agp_memory * mem)1363 void agp3_generic_tlbflush(struct agp_memory *mem)
1364 {
1365 u32 ctrl;
1366 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1367 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1368 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1369 }
1370 EXPORT_SYMBOL(agp3_generic_tlbflush);
1371
agp3_generic_configure(void)1372 int agp3_generic_configure(void)
1373 {
1374 u32 temp;
1375 struct aper_size_info_16 *current_size;
1376
1377 current_size = A_SIZE_16(agp_bridge->current_size);
1378
1379 agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
1380 AGP_APERTURE_BAR);
1381
1382 /* set aperture size */
1383 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1384 /* set gart pointer */
1385 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1386 /* enable aperture and GTLB */
1387 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1388 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1389 return 0;
1390 }
1391 EXPORT_SYMBOL(agp3_generic_configure);
1392
agp3_generic_cleanup(void)1393 void agp3_generic_cleanup(void)
1394 {
1395 u32 ctrl;
1396 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1397 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1398 }
1399 EXPORT_SYMBOL(agp3_generic_cleanup);
1400
1401 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1402 {
1403 {4096, 1048576, 10,0x000},
1404 {2048, 524288, 9, 0x800},
1405 {1024, 262144, 8, 0xc00},
1406 { 512, 131072, 7, 0xe00},
1407 { 256, 65536, 6, 0xf00},
1408 { 128, 32768, 5, 0xf20},
1409 { 64, 16384, 4, 0xf30},
1410 { 32, 8192, 3, 0xf38},
1411 { 16, 4096, 2, 0xf3c},
1412 { 8, 2048, 1, 0xf3e},
1413 { 4, 1024, 0, 0xf3f}
1414 };
1415 EXPORT_SYMBOL(agp3_generic_sizes);
1416
1417