Lines Matching +full:4 +full:- +full:temp

8  * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
28 * The i460 can operate with large (4MB) pages, but there is no sane way to support this
41 #define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
42 #define I460_SRAM_IO_DISABLE (1 << 4)
47 /* Control bits for Out-Of-GART coherency and Burst Write Combining */
52 * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
75 /* structure for tracking partial use of 4MB GART pages: */
77 unsigned long *alloced_map; /* bitmap of kernel-pages in use */
87 * The 32GB aperture is only available with a 4M GART page size. Due to the
91 {32768, 0, 0, 4},
107 u8 temp; in i460_fetch_size() local
111 pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp); in i460_fetch_size()
112 i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; in i460_fetch_size()
117 "I/O (GART) page-size %luKB doesn't match expected " in i460_fetch_size()
119 1UL << (i460.io_page_shift - 10), in i460_fetch_size()
124 values = A_SIZE_8(agp_bridge->driver->aperture_sizes); in i460_fetch_size()
126 pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); in i460_fetch_size()
129 if (temp & I460_SRAM_IO_DISABLE) { in i460_fetch_size()
136 if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { in i460_fetch_size()
137 printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); in i460_fetch_size()
142 if (temp & I460_BAPBASE_ENABLE) in i460_fetch_size()
147 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { in i460_fetch_size()
153 values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); in i460_fetch_size()
157 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { in i460_fetch_size()
159 if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { in i460_fetch_size()
160 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); in i460_fetch_size()
161 agp_bridge->aperture_size_idx = i; in i460_fetch_size()
181 u8 temp; in i460_write_agpsiz() local
183 pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); in i460_write_agpsiz()
184 pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, in i460_write_agpsiz()
185 ((temp & ~I460_AGPSIZ_MASK) | size_value)); in i460_write_agpsiz()
192 previous_size = A_SIZE_8(agp_bridge->previous_size); in i460_cleanup()
193 i460_write_agpsiz(previous_size->size_value); in i460_cleanup()
204 } temp; in i460_configure() local
209 temp.large = 0; in i460_configure()
211 current_size = A_SIZE_8(agp_bridge->current_size); in i460_configure()
212 i460_write_agpsiz(current_size->size_value); in i460_configure()
216 * This has to be done since the AGP aperture can be above 4GB on in i460_configure()
219 pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0])); in i460_configure()
220 pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1])); in i460_configure()
223 agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1); in i460_configure()
225 pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch); in i460_configure()
226 pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, in i460_configure()
234 size = current_size->num_entries * sizeof(i460.lp_desc[0]); in i460_configure()
237 return -ENOMEM; in i460_configure()
245 void *temp; in i460_create_gatt_table() local
250 temp = agp_bridge->current_size; in i460_create_gatt_table()
251 page_order = A_SIZE_8(temp)->page_order; in i460_create_gatt_table()
252 num_entries = A_SIZE_8(temp)->num_entries; in i460_create_gatt_table()
257 return -ENOMEM; in i460_create_gatt_table()
261 agp_bridge->gatt_table_real = NULL; in i460_create_gatt_table()
262 agp_bridge->gatt_table = NULL; in i460_create_gatt_table()
263 agp_bridge->gatt_bus_addr = 0; in i460_create_gatt_table()
267 WR_FLUSH_GATT(i - 1); in i460_create_gatt_table()
274 void *temp; in i460_free_gatt_table() local
276 temp = agp_bridge->current_size; in i460_free_gatt_table()
278 num_entries = A_SIZE_8(temp)->num_entries; in i460_free_gatt_table()
282 WR_FLUSH_GATT(num_entries - 1); in i460_free_gatt_table()
298 void *temp; in i460_insert_memory_small_io_page() local
301 mem, pg_start, type, page_to_phys(mem->pages[0])); in i460_insert_memory_small_io_page()
303 if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) in i460_insert_memory_small_io_page()
304 return -EINVAL; in i460_insert_memory_small_io_page()
308 temp = agp_bridge->current_size; in i460_insert_memory_small_io_page()
309 num_entries = A_SIZE_8(temp)->num_entries; in i460_insert_memory_small_io_page()
311 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { in i460_insert_memory_small_io_page()
313 return -EINVAL; in i460_insert_memory_small_io_page()
317 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { in i460_insert_memory_small_io_page()
321 return -EBUSY; in i460_insert_memory_small_io_page()
327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { in i460_insert_memory_small_io_page()
328 paddr = page_to_phys(mem->pages[i]); in i460_insert_memory_small_io_page()
330 WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); in i460_insert_memory_small_io_page()
332 WR_FLUSH_GATT(j - 1); in i460_insert_memory_small_io_page()
346 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) in i460_remove_memory_small_io_page()
348 WR_FLUSH_GATT(i - 1); in i460_remove_memory_small_io_page()
368 unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; in i460_alloc_large_page()
371 lp->page = alloc_pages(GFP_KERNEL, order); in i460_alloc_large_page()
372 if (!lp->page) { in i460_alloc_large_page()
373 printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); in i460_alloc_large_page()
374 return -ENOMEM; in i460_alloc_large_page()
377 map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; in i460_alloc_large_page()
378 lp->alloced_map = kzalloc(map_size, GFP_KERNEL); in i460_alloc_large_page()
379 if (!lp->alloced_map) { in i460_alloc_large_page()
380 __free_pages(lp->page, order); in i460_alloc_large_page()
382 return -ENOMEM; in i460_alloc_large_page()
385 lp->paddr = page_to_phys(lp->page); in i460_alloc_large_page()
386 lp->refcount = 0; in i460_alloc_large_page()
387 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); in i460_alloc_large_page()
393 kfree(lp->alloced_map); in i460_free_large_page()
394 lp->alloced_map = NULL; in i460_free_large_page()
396 __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT); in i460_free_large_page()
397 atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); in i460_free_large_page()
405 void *temp; in i460_insert_memory_large_io_page() local
407 if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) in i460_insert_memory_large_io_page()
408 return -EINVAL; in i460_insert_memory_large_io_page()
410 temp = agp_bridge->current_size; in i460_insert_memory_large_io_page()
411 num_entries = A_SIZE_8(temp)->num_entries; in i460_insert_memory_large_io_page()
415 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_insert_memory_large_io_page()
417 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_insert_memory_large_io_page()
421 return -EINVAL; in i460_insert_memory_large_io_page()
426 if (!lp->alloced_map) in i460_insert_memory_large_io_page()
433 if (test_bit(idx, lp->alloced_map)) in i460_insert_memory_large_io_page()
434 return -EBUSY; in i460_insert_memory_large_io_page()
439 if (!lp->alloced_map) { in i460_insert_memory_large_io_page()
442 return -ENOMEM; in i460_insert_memory_large_io_page()
443 pg = lp - i460.lp_desc; in i460_insert_memory_large_io_page()
445 lp->paddr, 0)); in i460_insert_memory_large_io_page()
453 mem->pages[i] = lp->page; in i460_insert_memory_large_io_page()
454 __set_bit(idx, lp->alloced_map); in i460_insert_memory_large_io_page()
455 ++lp->refcount; in i460_insert_memory_large_io_page()
466 void *temp; in i460_remove_memory_large_io_page() local
468 temp = agp_bridge->current_size; in i460_remove_memory_large_io_page()
469 num_entries = A_SIZE_8(temp)->num_entries; in i460_remove_memory_large_io_page()
473 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; in i460_remove_memory_large_io_page()
475 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; in i460_remove_memory_large_io_page()
482 mem->pages[i] = NULL; in i460_remove_memory_large_io_page()
483 __clear_bit(idx, lp->alloced_map); in i460_remove_memory_large_io_page()
484 --lp->refcount; in i460_remove_memory_large_io_page()
488 if (lp->refcount == 0) { in i460_remove_memory_large_io_page()
489 pg = lp - i460.lp_desc; in i460_remove_memory_large_io_page()
521 * multi-kernel-page alloc might fit inside of an already allocated GART page).
552 return bridge->driver->masks[0].mask in i460_mask_memory()
553 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); in i460_mask_memory()
598 return -ENODEV; in agp_intel_i460_probe()
602 return -ENOMEM; in agp_intel_i460_probe()
604 bridge->driver = &intel_i460_driver; in agp_intel_i460_probe()
605 bridge->dev = pdev; in agp_intel_i460_probe()
606 bridge->capndx = cap_ptr; in agp_intel_i460_probe()
637 .name = "agpgart-intel-i460",
646 return -EINVAL; in agp_intel_i460_init()