xref: /openbmc/linux/arch/alpha/kernel/core_titan.c (revision 1da177e4)
1 /*
2  *	linux/arch/alpha/kernel/core_titan.c
3  *
4  * Code common to all TITAN core logic chips.
5  */
6 
7 #define __EXTERN_INLINE inline
8 #include <asm/io.h>
9 #include <asm/core_titan.h>
10 #undef __EXTERN_INLINE
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/pci.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 
20 #include <asm/ptrace.h>
21 #include <asm/smp.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
24 
25 #include "proto.h"
26 #include "pci_impl.h"
27 
28 /* Save Titan configuration data as the console had it set up.  */
29 
30 struct
31 {
32 	unsigned long wsba[4];
33 	unsigned long wsm[4];
34 	unsigned long tba[4];
35 } saved_config[4] __attribute__((common));
36 
37 /*
38  * BIOS32-style PCI interface:
39  */
40 
41 #define DEBUG_CONFIG 0
42 
43 #if DEBUG_CONFIG
44 # define DBG_CFG(args)	printk args
45 #else
46 # define DBG_CFG(args)
47 #endif
48 
49 
50 /*
51  * Routines to access TIG registers.
52  */
53 static inline volatile unsigned long *
54 mk_tig_addr(int offset)
55 {
56 	return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
57 }
58 
59 static inline u8
60 titan_read_tig(int offset, u8 value)
61 {
62 	volatile unsigned long *tig_addr = mk_tig_addr(offset);
63 	return (u8)(*tig_addr & 0xff);
64 }
65 
66 static inline void
67 titan_write_tig(int offset, u8 value)
68 {
69 	volatile unsigned long *tig_addr = mk_tig_addr(offset);
70 	*tig_addr = (unsigned long)value;
71 }
72 
73 
74 /*
75  * Given a bus, device, and function number, compute resulting
76  * configuration space address
77  * accordingly.  It is therefore not safe to have concurrent
78  * invocations to configuration space access routines, but there
79  * really shouldn't be any need for this.
80  *
81  * Note that all config space accesses use Type 1 address format.
82  *
83  * Note also that type 1 is determined by non-zero bus number.
84  *
85  * Type 1:
86  *
87  *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
88  *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
89  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
90  * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
91  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
92  *
93  *	31:24	reserved
94  *	23:16	bus number (8 bits = 128 possible buses)
95  *	15:11	Device number (5 bits)
96  *	10:8	function number
97  *	 7:2	register number
98  *
99  * Notes:
100  *	The function number selects which function of a multi-function device
101  *	(e.g., SCSI and Ethernet).
102  *
103  *	The register selects a DWORD (32 bit) register offset.  Hence it
104  *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
105  *	bits.
106  */
107 
108 static int
109 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
110 	     unsigned long *pci_addr, unsigned char *type1)
111 {
112 	struct pci_controller *hose = pbus->sysdata;
113 	unsigned long addr;
114 	u8 bus = pbus->number;
115 
116 	DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
117 		 "pci_addr=0x%p, type1=0x%p)\n",
118 		 bus, device_fn, where, pci_addr, type1));
119 
120 	if (!pbus->parent) /* No parent means peer PCI bus. */
121 		bus = 0;
122         *type1 = (bus != 0);
123 
124         addr = (bus << 16) | (device_fn << 8) | where;
125 	addr |= hose->config_space_base;
126 
127 	*pci_addr = addr;
128 	DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
129 	return 0;
130 }
131 
132 static int
133 titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
134 		  int size, u32 *value)
135 {
136 	unsigned long addr;
137 	unsigned char type1;
138 
139 	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
140 		return PCIBIOS_DEVICE_NOT_FOUND;
141 
142 	switch (size) {
143 	case 1:
144 		*value = __kernel_ldbu(*(vucp)addr);
145 		break;
146 	case 2:
147 		*value = __kernel_ldwu(*(vusp)addr);
148 		break;
149 	case 4:
150 		*value = *(vuip)addr;
151 		break;
152 	}
153 
154 	return PCIBIOS_SUCCESSFUL;
155 }
156 
157 static int
158 titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
159 		   int size, u32 value)
160 {
161 	unsigned long addr;
162 	unsigned char type1;
163 
164 	if (mk_conf_addr(bus, devfn, where, &addr, &type1))
165 		return PCIBIOS_DEVICE_NOT_FOUND;
166 
167 	switch (size) {
168 	case 1:
169 		__kernel_stb(value, *(vucp)addr);
170 		mb();
171 		__kernel_ldbu(*(vucp)addr);
172 		break;
173 	case 2:
174 		__kernel_stw(value, *(vusp)addr);
175 		mb();
176 		__kernel_ldwu(*(vusp)addr);
177 		break;
178 	case 4:
179 		*(vuip)addr = value;
180 		mb();
181 		*(vuip)addr;
182 		break;
183 	}
184 
185 	return PCIBIOS_SUCCESSFUL;
186 }
187 
188 struct pci_ops titan_pci_ops =
189 {
190 	.read =		titan_read_config,
191 	.write =	titan_write_config,
192 };
193 
194 
195 void
196 titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
197 {
198 	titan_pachip *pachip =
199 	  (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
200 	titan_pachip_port *port;
201 	volatile unsigned long *csr;
202 	unsigned long value;
203 
204 	/* Get the right hose.  */
205 	port = &pachip->g_port;
206 	if (hose->index & 2)
207 		port = &pachip->a_port;
208 
209 	/* We can invalidate up to 8 tlb entries in a go.  The flush
210 	   matches against <31:16> in the pci address.
211 	   Note that gtlbi* and atlbi* are in the same place in the g_port
212 	   and a_port, respectively, so the g_port offset can be used
213 	   even if hose is an a_port */
214 	csr = &port->port_specific.g.gtlbia.csr;
215 	if (((start ^ end) & 0xffff0000) == 0)
216 		csr = &port->port_specific.g.gtlbiv.csr;
217 
218 	/* For TBIA, it doesn't matter what value we write.  For TBI,
219 	   it's the shifted tag bits.  */
220 	value = (start & 0xffff0000) >> 12;
221 
222 	wmb();
223 	*csr = value;
224 	mb();
225 	*csr;
226 }
227 
228 static int
229 titan_query_agp(titan_pachip_port *port)
230 {
231 	union TPAchipPCTL pctl;
232 
233 	/* set up APCTL */
234 	pctl.pctl_q_whole = port->pctl.csr;
235 
236 	return pctl.pctl_r_bits.apctl_v_agp_present;
237 
238 }
239 
240 static void __init
241 titan_init_one_pachip_port(titan_pachip_port *port, int index)
242 {
243 	struct pci_controller *hose;
244 
245 	hose = alloc_pci_controller();
246 	if (index == 0)
247 		pci_isa_hose = hose;
248 	hose->io_space = alloc_resource();
249 	hose->mem_space = alloc_resource();
250 
251 	/*
252 	 * This is for userland consumption.  The 40-bit PIO bias that we
253 	 * use in the kernel through KSEG doesn't work in the page table
254 	 * based user mappings. (43-bit KSEG sign extends the physical
255 	 * address from bit 40 to hit the I/O bit - mapped addresses don't).
256 	 * So make sure we get the 43-bit PIO bias.
257 	 */
258 	hose->sparse_mem_base = 0;
259 	hose->sparse_io_base = 0;
260 	hose->dense_mem_base
261 	  = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
262 	hose->dense_io_base
263 	  = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
264 
265 	hose->config_space_base = TITAN_CONF(index);
266 	hose->index = index;
267 
268 	hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
269 	hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
270 	hose->io_space->name = pci_io_names[index];
271 	hose->io_space->flags = IORESOURCE_IO;
272 
273 	hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
274 	hose->mem_space->end = hose->mem_space->start + 0xffffffff;
275 	hose->mem_space->name = pci_mem_names[index];
276 	hose->mem_space->flags = IORESOURCE_MEM;
277 
278 	if (request_resource(&ioport_resource, hose->io_space) < 0)
279 		printk(KERN_ERR "Failed to request IO on hose %d\n", index);
280 	if (request_resource(&iomem_resource, hose->mem_space) < 0)
281 		printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
282 
283 	/*
284 	 * Save the existing PCI window translations.  SRM will
285 	 * need them when we go to reboot.
286 	 */
287 	saved_config[index].wsba[0] = port->wsba[0].csr;
288 	saved_config[index].wsm[0]  = port->wsm[0].csr;
289 	saved_config[index].tba[0]  = port->tba[0].csr;
290 
291 	saved_config[index].wsba[1] = port->wsba[1].csr;
292 	saved_config[index].wsm[1]  = port->wsm[1].csr;
293 	saved_config[index].tba[1]  = port->tba[1].csr;
294 
295 	saved_config[index].wsba[2] = port->wsba[2].csr;
296 	saved_config[index].wsm[2]  = port->wsm[2].csr;
297 	saved_config[index].tba[2]  = port->tba[2].csr;
298 
299 	saved_config[index].wsba[3] = port->wsba[3].csr;
300 	saved_config[index].wsm[3]  = port->wsm[3].csr;
301 	saved_config[index].tba[3]  = port->tba[3].csr;
302 
303 	/*
304 	 * Set up the PCI to main memory translation windows.
305 	 *
306 	 * Note: Window 3 on Titan is Scatter-Gather ONLY.
307 	 *
308 	 * Window 0 is scatter-gather 8MB at 8MB (for isa)
309 	 * Window 1 is direct access 1GB at 2GB
310 	 * Window 2 is scatter-gather 1GB at 3GB
311 	 */
312 	hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
313 	hose->sg_isa->align_entry = 8; /* 64KB for ISA */
314 
315 	hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
316 	hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
317 
318 	port->wsba[0].csr = hose->sg_isa->dma_base | 3;
319 	port->wsm[0].csr  = (hose->sg_isa->size - 1) & 0xfff00000;
320 	port->tba[0].csr  = virt_to_phys(hose->sg_isa->ptes);
321 
322 	port->wsba[1].csr = __direct_map_base | 1;
323 	port->wsm[1].csr  = (__direct_map_size - 1) & 0xfff00000;
324 	port->tba[1].csr  = 0;
325 
326 	port->wsba[2].csr = hose->sg_pci->dma_base | 3;
327 	port->wsm[2].csr  = (hose->sg_pci->size - 1) & 0xfff00000;
328 	port->tba[2].csr  = virt_to_phys(hose->sg_pci->ptes);
329 
330 	port->wsba[3].csr = 0;
331 
332 	/* Enable the Monster Window to make DAC pci64 possible.  */
333 	port->pctl.csr |= pctl_m_mwin;
334 
335 	/*
336 	 * If it's an AGP port, initialize agplastwr.
337 	 */
338 	if (titan_query_agp(port))
339 		port->port_specific.a.agplastwr.csr = __direct_map_base;
340 
341 	titan_pci_tbi(hose, 0, -1);
342 }
343 
344 static void __init
345 titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
346 {
347 	int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
348 
349 	/* Init the ports in hose order... */
350 	titan_init_one_pachip_port(&pachip0->g_port, 0);	/* hose 0 */
351 	if (pchip1_present)
352 		titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
353 	titan_init_one_pachip_port(&pachip0->a_port, 2);	/* hose 2 */
354 	if (pchip1_present)
355 		titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
356 }
357 
358 static void __init
359 titan_init_vga_hose(void)
360 {
361 #ifdef CONFIG_VGA_HOSE
362 	u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
363 
364 	if (pu64[7] == 3) {	/* TERM_TYPE == graphics */
365 		struct pci_controller *hose;
366 		int h = (pu64[30] >> 24) & 0xff;	/* console hose # */
367 
368 		/*
369 		 * Our hose numbering matches the console's, so just find
370 		 * the right one...
371 		 */
372 		for (hose = hose_head; hose; hose = hose->next) {
373 			if (hose->index == h) break;
374 		}
375 
376 		if (hose) {
377 			printk("Console graphics on hose %d\n", hose->index);
378 			pci_vga_hose = hose;
379 		}
380 	}
381 #endif /* CONFIG_VGA_HOSE */
382 }
383 
384 void __init
385 titan_init_arch(void)
386 {
387 #if 0
388 	printk("%s: titan_init_arch()\n", __FUNCTION__);
389 	printk("%s: CChip registers:\n", __FUNCTION__);
390 	printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr);
391 	printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr);
392 	printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr);
393 	printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr);
394 	printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr);
395 	printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr);
396 	printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr);
397 	printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr);
398 
399 	printk("%s: DChip registers:\n", __FUNCTION__);
400 	printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr);
401 	printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr);
402 	printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr);
403 #endif
404 
405 	boot_cpuid = __hard_smp_processor_id();
406 
407 	/* With multiple PCI busses, we play with I/O as physical addrs.  */
408 	ioport_resource.end = ~0UL;
409 
410 	/* PCI DMA Direct Mapping is 1GB at 2GB.  */
411 	__direct_map_base = 0x80000000;
412 	__direct_map_size = 0x40000000;
413 
414 	/* Init the PA chip(s).  */
415 	titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
416 
417 	/* Check for graphic console location (if any).  */
418 	titan_init_vga_hose();
419 }
420 
421 static void
422 titan_kill_one_pachip_port(titan_pachip_port *port, int index)
423 {
424 	port->wsba[0].csr = saved_config[index].wsba[0];
425 	port->wsm[0].csr  = saved_config[index].wsm[0];
426 	port->tba[0].csr  = saved_config[index].tba[0];
427 
428 	port->wsba[1].csr = saved_config[index].wsba[1];
429 	port->wsm[1].csr  = saved_config[index].wsm[1];
430 	port->tba[1].csr  = saved_config[index].tba[1];
431 
432 	port->wsba[2].csr = saved_config[index].wsba[2];
433 	port->wsm[2].csr  = saved_config[index].wsm[2];
434 	port->tba[2].csr  = saved_config[index].tba[2];
435 
436 	port->wsba[3].csr = saved_config[index].wsba[3];
437 	port->wsm[3].csr  = saved_config[index].wsm[3];
438 	port->tba[3].csr  = saved_config[index].tba[3];
439 }
440 
441 static void
442 titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
443 {
444 	int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
445 
446 	if (pchip1_present) {
447 		titan_kill_one_pachip_port(&pachip1->g_port, 1);
448 		titan_kill_one_pachip_port(&pachip1->a_port, 3);
449 	}
450 	titan_kill_one_pachip_port(&pachip0->g_port, 0);
451 	titan_kill_one_pachip_port(&pachip0->a_port, 2);
452 }
453 
454 void
455 titan_kill_arch(int mode)
456 {
457 	titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
458 }
459 
460 
461 /*
462  * IO map support.
463  */
464 
465 void __iomem *
466 titan_ioremap(unsigned long addr, unsigned long size)
467 {
468 	int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
469 	unsigned long baddr = addr & ~TITAN_HOSE_MASK;
470 	unsigned long last = baddr + size - 1;
471 	struct pci_controller *hose;
472 	struct vm_struct *area;
473 	unsigned long vaddr;
474 	unsigned long *ptes;
475 	unsigned long pfn;
476 
477 	/*
478 	 * Adjust the addr.
479 	 */
480 #ifdef CONFIG_VGA_HOSE
481 	if (pci_vga_hose && __titan_is_mem_vga(addr)) {
482 		h = pci_vga_hose->index;
483 		addr += pci_vga_hose->mem_space->start;
484 	}
485 #endif
486 
487 	/*
488 	 * Find the hose.
489 	 */
490 	for (hose = hose_head; hose; hose = hose->next)
491 		if (hose->index == h)
492 			break;
493 	if (!hose)
494 		return NULL;
495 
496 	/*
497 	 * Is it direct-mapped?
498 	 */
499 	if ((baddr >= __direct_map_base) &&
500 	    ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
501 		vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
502 		return (void __iomem *) vaddr;
503 	}
504 
505 	/*
506 	 * Check the scatter-gather arena.
507 	 */
508 	if (hose->sg_pci &&
509 	    baddr >= (unsigned long)hose->sg_pci->dma_base &&
510 	    last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
511 
512 		/*
513 		 * Adjust the limits (mappings must be page aligned)
514 		 */
515 		baddr -= hose->sg_pci->dma_base;
516 		last -= hose->sg_pci->dma_base;
517 		baddr &= PAGE_MASK;
518 		size = PAGE_ALIGN(last) - baddr;
519 
520 		/*
521 		 * Map it
522 		 */
523 		area = get_vm_area(size, VM_IOREMAP);
524 		if (!area)
525 			return NULL;
526 
527 		ptes = hose->sg_pci->ptes;
528 		for (vaddr = (unsigned long)area->addr;
529 		    baddr <= last;
530 		    baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
531 			pfn = ptes[baddr >> PAGE_SHIFT];
532 			if (!(pfn & 1)) {
533 				printk("ioremap failed... pte not valid...\n");
534 				vfree(area->addr);
535 				return NULL;
536 			}
537 			pfn >>= 1;	/* make it a true pfn */
538 
539 			if (__alpha_remap_area_pages(vaddr,
540 						     pfn << PAGE_SHIFT,
541 						     PAGE_SIZE, 0)) {
542 				printk("FAILED to map...\n");
543 				vfree(area->addr);
544 				return NULL;
545 			}
546 		}
547 
548 		flush_tlb_all();
549 
550 		vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
551 		return (void __iomem *) vaddr;
552 	}
553 
554 	return NULL;
555 }
556 
557 void
558 titan_iounmap(volatile void __iomem *xaddr)
559 {
560 	unsigned long addr = (unsigned long) xaddr;
561 	if (addr >= VMALLOC_START)
562 		vfree((void *)(PAGE_MASK & addr));
563 }
564 
565 int
566 titan_is_mmio(const volatile void __iomem *xaddr)
567 {
568 	unsigned long addr = (unsigned long) xaddr;
569 
570 	if (addr >= VMALLOC_START)
571 		return 1;
572 	else
573 		return (addr & 0x100000000UL) == 0;
574 }
575 
576 #ifndef CONFIG_ALPHA_GENERIC
577 EXPORT_SYMBOL(titan_ioremap);
578 EXPORT_SYMBOL(titan_iounmap);
579 EXPORT_SYMBOL(titan_is_mmio);
580 #endif
581 
582 /*
583  * AGP GART Support.
584  */
585 #include <linux/agp_backend.h>
586 #include <asm/agp_backend.h>
587 #include <linux/slab.h>
588 #include <linux/delay.h>
589 
590 struct titan_agp_aperture {
591 	struct pci_iommu_arena *arena;
592 	long pg_start;
593 	long pg_count;
594 };
595 
596 static int
597 titan_agp_setup(alpha_agp_info *agp)
598 {
599 	struct titan_agp_aperture *aper;
600 
601 	if (!alpha_agpgart_size)
602 		return -ENOMEM;
603 
604 	aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
605 	if (aper == NULL)
606 		return -ENOMEM;
607 
608 	aper->arena = agp->hose->sg_pci;
609 	aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
610 	aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
611 				       aper->pg_count - 1);
612 	if (aper->pg_start < 0) {
613 		printk(KERN_ERR "Failed to reserve AGP memory\n");
614 		kfree(aper);
615 		return -ENOMEM;
616 	}
617 
618 	agp->aperture.bus_base =
619 		aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
620 	agp->aperture.size = aper->pg_count * PAGE_SIZE;
621 	agp->aperture.sysdata = aper;
622 
623 	return 0;
624 }
625 
626 static void
627 titan_agp_cleanup(alpha_agp_info *agp)
628 {
629 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
630 	int status;
631 
632 	status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
633 	if (status == -EBUSY) {
634 		printk(KERN_WARNING
635 		       "Attempted to release bound AGP memory - unbinding\n");
636 		iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
637 		status = iommu_release(aper->arena, aper->pg_start,
638 				       aper->pg_count);
639 	}
640 	if (status < 0)
641 		printk(KERN_ERR "Failed to release AGP memory\n");
642 
643 	kfree(aper);
644 	kfree(agp);
645 }
646 
647 static int
648 titan_agp_configure(alpha_agp_info *agp)
649 {
650 	union TPAchipPCTL pctl;
651 	titan_pachip_port *port = agp->private;
652 	pctl.pctl_q_whole = port->pctl.csr;
653 
654 	/* Side-Band Addressing? */
655 	pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
656 
657 	/* AGP Rate? */
658 	pctl.pctl_r_bits.apctl_v_agp_rate = 0;		/* 1x */
659 	if (agp->mode.bits.rate & 2)
660 		pctl.pctl_r_bits.apctl_v_agp_rate = 1;	/* 2x */
661 #if 0
662 	if (agp->mode.bits.rate & 4)
663 		pctl.pctl_r_bits.apctl_v_agp_rate = 2;	/* 4x */
664 #endif
665 
666 	/* RQ Depth? */
667 	pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
668 	pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
669 
670 	/*
671 	 * AGP Enable.
672 	 */
673 	pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
674 
675 	/* Tell the user.  */
676 	printk("Enabling AGP: %dX%s\n",
677 	       1 << pctl.pctl_r_bits.apctl_v_agp_rate,
678 	       pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
679 
680 	/* Write it.  */
681 	port->pctl.csr = pctl.pctl_q_whole;
682 
683 	/* And wait at least 5000 66MHz cycles (per Titan spec).  */
684 	udelay(100);
685 
686 	return 0;
687 }
688 
689 static int
690 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
691 {
692 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
693 	return iommu_bind(aper->arena, aper->pg_start + pg_start,
694 			  mem->page_count, mem->memory);
695 }
696 
697 static int
698 titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
699 {
700 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
701 	return iommu_unbind(aper->arena, aper->pg_start + pg_start,
702 			    mem->page_count);
703 }
704 
705 static unsigned long
706 titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
707 {
708 	struct titan_agp_aperture *aper = agp->aperture.sysdata;
709 	unsigned long baddr = addr - aper->arena->dma_base;
710 	unsigned long pte;
711 
712 	if (addr < agp->aperture.bus_base ||
713 	    addr >= agp->aperture.bus_base + agp->aperture.size) {
714 		printk("%s: addr out of range\n", __FUNCTION__);
715 		return -EINVAL;
716 	}
717 
718 	pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
719 	if (!(pte & 1)) {
720 		printk("%s: pte not valid\n", __FUNCTION__);
721 		return -EINVAL;
722 	}
723 
724 	return (pte >> 1) << PAGE_SHIFT;
725 }
726 
727 struct alpha_agp_ops titan_agp_ops =
728 {
729 	.setup		= titan_agp_setup,
730 	.cleanup	= titan_agp_cleanup,
731 	.configure	= titan_agp_configure,
732 	.bind		= titan_agp_bind_memory,
733 	.unbind		= titan_agp_unbind_memory,
734 	.translate	= titan_agp_translate
735 };
736 
737 alpha_agp_info *
738 titan_agp_info(void)
739 {
740 	alpha_agp_info *agp;
741 	struct pci_controller *hose;
742 	titan_pachip_port *port;
743 	int hosenum = -1;
744 	union TPAchipPCTL pctl;
745 
746 	/*
747 	 * Find the AGP port.
748 	 */
749 	port = &TITAN_pachip0->a_port;
750 	if (titan_query_agp(port))
751 		hosenum = 2;
752 	if (hosenum < 0 &&
753 	    titan_query_agp(port = &TITAN_pachip1->a_port))
754 		hosenum = 3;
755 
756 	/*
757 	 * Find the hose the port is on.
758 	 */
759 	for (hose = hose_head; hose; hose = hose->next)
760 		if (hose->index == hosenum)
761 			break;
762 
763 	if (!hose || !hose->sg_pci)
764 		return NULL;
765 
766 	/*
767 	 * Allocate the info structure.
768 	 */
769 	agp = kmalloc(sizeof(*agp), GFP_KERNEL);
770 
771 	/*
772 	 * Fill it in.
773 	 */
774 	agp->hose = hose;
775 	agp->private = port;
776 	agp->ops = &titan_agp_ops;
777 
778 	/*
779 	 * Aperture - not configured until ops.setup().
780 	 *
781 	 * FIXME - should we go ahead and allocate it here?
782 	 */
783 	agp->aperture.bus_base = 0;
784 	agp->aperture.size = 0;
785 	agp->aperture.sysdata = NULL;
786 
787 	/*
788 	 * Capabilities.
789 	 */
790 	agp->capability.lw = 0;
791 	agp->capability.bits.rate = 3; 	/* 2x, 1x */
792 	agp->capability.bits.sba = 1;
793 	agp->capability.bits.rq = 7;	/* 8 - 1 */
794 
795 	/*
796 	 * Mode.
797 	 */
798 	pctl.pctl_q_whole = port->pctl.csr;
799 	agp->mode.lw = 0;
800 	agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
801 	agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
802 	agp->mode.bits.rq = 7;	/* RQ Depth? */
803 	agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
804 
805 	return agp;
806 }
807