xref: /openbmc/linux/drivers/char/agp/sworks-agp.c (revision 87c2ce3b)
1 /*
2  * Serverworks AGPGART routines.
3  */
4 
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/init.h>
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/jiffies.h>
11 #include <linux/agp_backend.h>
12 #include "agp.h"
13 
14 #define SVWRKS_COMMAND		0x04
15 #define SVWRKS_APSIZE		0x10
16 #define SVWRKS_MMBASE		0x14
17 #define SVWRKS_CACHING		0x4b
18 #define SVWRKS_AGP_ENABLE	0x60
19 #define SVWRKS_FEATURE		0x68
20 
21 #define SVWRKS_SIZE_MASK	0xfe000000
22 
23 /* Memory mapped registers */
24 #define SVWRKS_GART_CACHE	0x02
25 #define SVWRKS_GATTBASE		0x04
26 #define SVWRKS_TLBFLUSH		0x10
27 #define SVWRKS_POSTFLUSH	0x14
28 #define SVWRKS_DIRFLUSH		0x0c
29 
30 
31 struct serverworks_page_map {
32 	unsigned long *real;
33 	unsigned long __iomem *remapped;
34 };
35 
36 static struct _serverworks_private {
37 	struct pci_dev *svrwrks_dev;	/* device one */
38 	volatile u8 __iomem *registers;
39 	struct serverworks_page_map **gatt_pages;
40 	int num_tables;
41 	struct serverworks_page_map scratch_dir;
42 
43 	int gart_addr_ofs;
44 	int mm_addr_ofs;
45 } serverworks_private;
46 
47 static int serverworks_create_page_map(struct serverworks_page_map *page_map)
48 {
49 	int i;
50 
51 	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
52 	if (page_map->real == NULL) {
53 		return -ENOMEM;
54 	}
55 	SetPageReserved(virt_to_page(page_map->real));
56 	global_cache_flush();
57 	page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
58 					    PAGE_SIZE);
59 	if (page_map->remapped == NULL) {
60 		ClearPageReserved(virt_to_page(page_map->real));
61 		free_page((unsigned long) page_map->real);
62 		page_map->real = NULL;
63 		return -ENOMEM;
64 	}
65 	global_cache_flush();
66 
67 	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
68 		writel(agp_bridge->scratch_page, page_map->remapped+i);
69 
70 	return 0;
71 }
72 
73 static void serverworks_free_page_map(struct serverworks_page_map *page_map)
74 {
75 	iounmap(page_map->remapped);
76 	ClearPageReserved(virt_to_page(page_map->real));
77 	free_page((unsigned long) page_map->real);
78 }
79 
80 static void serverworks_free_gatt_pages(void)
81 {
82 	int i;
83 	struct serverworks_page_map **tables;
84 	struct serverworks_page_map *entry;
85 
86 	tables = serverworks_private.gatt_pages;
87 	for(i = 0; i < serverworks_private.num_tables; i++) {
88 		entry = tables[i];
89 		if (entry != NULL) {
90 			if (entry->real != NULL) {
91 				serverworks_free_page_map(entry);
92 			}
93 			kfree(entry);
94 		}
95 	}
96 	kfree(tables);
97 }
98 
99 static int serverworks_create_gatt_pages(int nr_tables)
100 {
101 	struct serverworks_page_map **tables;
102 	struct serverworks_page_map *entry;
103 	int retval = 0;
104 	int i;
105 
106 	tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
107 			 GFP_KERNEL);
108 	if (tables == NULL)
109 		return -ENOMEM;
110 
111 	for (i = 0; i < nr_tables; i++) {
112 		entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
113 		if (entry == NULL) {
114 			retval = -ENOMEM;
115 			break;
116 		}
117 		tables[i] = entry;
118 		retval = serverworks_create_page_map(entry);
119 		if (retval != 0) break;
120 	}
121 	serverworks_private.num_tables = nr_tables;
122 	serverworks_private.gatt_pages = tables;
123 
124 	if (retval != 0) serverworks_free_gatt_pages();
125 
126 	return retval;
127 }
128 
129 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
130 	GET_PAGE_DIR_IDX(addr)]->remapped)
131 
132 #ifndef GET_PAGE_DIR_OFF
133 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
134 #endif
135 
136 #ifndef GET_PAGE_DIR_IDX
137 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
138 	GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
139 #endif
140 
141 #ifndef GET_GATT_OFF
142 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
143 #endif
144 
145 static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
146 {
147 	struct aper_size_info_lvl2 *value;
148 	struct serverworks_page_map page_dir;
149 	int retval;
150 	u32 temp;
151 	int i;
152 
153 	value = A_SIZE_LVL2(agp_bridge->current_size);
154 	retval = serverworks_create_page_map(&page_dir);
155 	if (retval != 0) {
156 		return retval;
157 	}
158 	retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
159 	if (retval != 0) {
160 		serverworks_free_page_map(&page_dir);
161 		return retval;
162 	}
163 	/* Create a fake scratch directory */
164 	for(i = 0; i < 1024; i++) {
165 		writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
166 		writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
167 	}
168 
169 	retval = serverworks_create_gatt_pages(value->num_entries / 1024);
170 	if (retval != 0) {
171 		serverworks_free_page_map(&page_dir);
172 		serverworks_free_page_map(&serverworks_private.scratch_dir);
173 		return retval;
174 	}
175 
176 	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
177 	agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
178 	agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
179 
180 	/* Get the address for the gart region.
181 	 * This is a bus address even on the alpha, b/c its
182 	 * used to program the agp master not the cpu
183 	 */
184 
185 	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
186 	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
187 
188 	/* Calculate the agp offset */
189 
190 	for(i = 0; i < value->num_entries / 1024; i++)
191 		writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
192 
193 	return 0;
194 }
195 
196 static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
197 {
198 	struct serverworks_page_map page_dir;
199 
200 	page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
201 	page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
202 
203 	serverworks_free_gatt_pages();
204 	serverworks_free_page_map(&page_dir);
205 	serverworks_free_page_map(&serverworks_private.scratch_dir);
206 	return 0;
207 }
208 
209 static int serverworks_fetch_size(void)
210 {
211 	int i;
212 	u32 temp;
213 	u32 temp2;
214 	struct aper_size_info_lvl2 *values;
215 
216 	values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
217 	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
218 	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
219 					SVWRKS_SIZE_MASK);
220 	pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
221 	pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
222 	temp2 &= SVWRKS_SIZE_MASK;
223 
224 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
225 		if (temp2 == values[i].size_value) {
226 			agp_bridge->previous_size =
227 			    agp_bridge->current_size = (void *) (values + i);
228 
229 			agp_bridge->aperture_size_idx = i;
230 			return values[i].size;
231 		}
232 	}
233 
234 	return 0;
235 }
236 
237 /*
238  * This routine could be implemented by taking the addresses
239  * written to the GATT, and flushing them individually.  However
240  * currently it just flushes the whole table.  Which is probably
241  * more efficent, since agp_memory blocks can be a large number of
242  * entries.
243  */
244 static void serverworks_tlbflush(struct agp_memory *temp)
245 {
246 	unsigned long timeout;
247 
248 	writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
249 	timeout = jiffies + 3*HZ;
250 	while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
251 		cpu_relax();
252 		if (time_after(jiffies, timeout)) {
253 			printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n");
254 			break;
255 		}
256 	}
257 
258 	writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
259 	timeout = jiffies + 3*HZ;
260 	while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
261 		cpu_relax();
262 		if (time_after(jiffies, timeout)) {
263 			printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n");
264 			break;
265 		}
266 	}
267 }
268 
269 static int serverworks_configure(void)
270 {
271 	struct aper_size_info_lvl2 *current_size;
272 	u32 temp;
273 	u8 enable_reg;
274 	u16 cap_reg;
275 
276 	current_size = A_SIZE_LVL2(agp_bridge->current_size);
277 
278 	/* Get the memory mapped registers */
279 	pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
280 	temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
281 	serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
282 	if (!serverworks_private.registers) {
283 		printk (KERN_ERR PFX "Unable to ioremap() memory.\n");
284 		return -ENOMEM;
285 	}
286 
287 	writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
288 	readb(serverworks_private.registers+SVWRKS_GART_CACHE);	/* PCI Posting. */
289 
290 	writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
291 	readl(serverworks_private.registers+SVWRKS_GATTBASE);	/* PCI Posting. */
292 
293 	cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
294 	cap_reg &= ~0x0007;
295 	cap_reg |= 0x4;
296 	writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
297 	readw(serverworks_private.registers+SVWRKS_COMMAND);
298 
299 	pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
300 	enable_reg |= 0x1; /* Agp Enable bit */
301 	pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
302 	serverworks_tlbflush(NULL);
303 
304 	agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
305 
306 	/* Fill in the mode register */
307 	pci_read_config_dword(serverworks_private.svrwrks_dev,
308 			      agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
309 
310 	pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
311 	enable_reg &= ~0x3;
312 	pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
313 
314 	pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
315 	enable_reg |= (1<<6);
316 	pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
317 
318 	return 0;
319 }
320 
321 static void serverworks_cleanup(void)
322 {
323 	iounmap((void __iomem *) serverworks_private.registers);
324 }
325 
326 static int serverworks_insert_memory(struct agp_memory *mem,
327 			     off_t pg_start, int type)
328 {
329 	int i, j, num_entries;
330 	unsigned long __iomem *cur_gatt;
331 	unsigned long addr;
332 
333 	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
334 
335 	if (type != 0 || mem->type != 0) {
336 		return -EINVAL;
337 	}
338 	if ((pg_start + mem->page_count) > num_entries) {
339 		return -EINVAL;
340 	}
341 
342 	j = pg_start;
343 	while (j < (pg_start + mem->page_count)) {
344 		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
345 		cur_gatt = SVRWRKS_GET_GATT(addr);
346 		if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
347 			return -EBUSY;
348 		j++;
349 	}
350 
351 	if (mem->is_flushed == FALSE) {
352 		global_cache_flush();
353 		mem->is_flushed = TRUE;
354 	}
355 
356 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
357 		addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
358 		cur_gatt = SVRWRKS_GET_GATT(addr);
359 		writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
360 	}
361 	serverworks_tlbflush(mem);
362 	return 0;
363 }
364 
365 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
366 			     int type)
367 {
368 	int i;
369 	unsigned long __iomem *cur_gatt;
370 	unsigned long addr;
371 
372 	if (type != 0 || mem->type != 0) {
373 		return -EINVAL;
374 	}
375 
376 	global_cache_flush();
377 	serverworks_tlbflush(mem);
378 
379 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
380 		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
381 		cur_gatt = SVRWRKS_GET_GATT(addr);
382 		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
383 	}
384 
385 	serverworks_tlbflush(mem);
386 	return 0;
387 }
388 
389 static struct gatt_mask serverworks_masks[] =
390 {
391 	{.mask = 1, .type = 0}
392 };
393 
394 static struct aper_size_info_lvl2 serverworks_sizes[7] =
395 {
396 	{2048, 524288, 0x80000000},
397 	{1024, 262144, 0xc0000000},
398 	{512, 131072, 0xe0000000},
399 	{256, 65536, 0xf0000000},
400 	{128, 32768, 0xf8000000},
401 	{64, 16384, 0xfc000000},
402 	{32, 8192, 0xfe000000}
403 };
404 
405 static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
406 {
407 	u32 command;
408 
409 	pci_read_config_dword(serverworks_private.svrwrks_dev,
410 			      bridge->capndx + PCI_AGP_STATUS,
411 			      &command);
412 
413 	command = agp_collect_device_status(bridge, mode, command);
414 
415 	command &= ~0x10;	/* disable FW */
416 	command &= ~0x08;
417 
418 	command |= 0x100;
419 
420 	pci_write_config_dword(serverworks_private.svrwrks_dev,
421 			       bridge->capndx + PCI_AGP_COMMAND,
422 			       command);
423 
424 	agp_device_command(command, 0);
425 }
426 
427 static struct agp_bridge_driver sworks_driver = {
428 	.owner			= THIS_MODULE,
429 	.aperture_sizes		= serverworks_sizes,
430 	.size_type		= LVL2_APER_SIZE,
431 	.num_aperture_sizes	= 7,
432 	.configure		= serverworks_configure,
433 	.fetch_size		= serverworks_fetch_size,
434 	.cleanup		= serverworks_cleanup,
435 	.tlb_flush		= serverworks_tlbflush,
436 	.mask_memory		= agp_generic_mask_memory,
437 	.masks			= serverworks_masks,
438 	.agp_enable		= serverworks_agp_enable,
439 	.cache_flush		= global_cache_flush,
440 	.create_gatt_table	= serverworks_create_gatt_table,
441 	.free_gatt_table	= serverworks_free_gatt_table,
442 	.insert_memory		= serverworks_insert_memory,
443 	.remove_memory		= serverworks_remove_memory,
444 	.alloc_by_type		= agp_generic_alloc_by_type,
445 	.free_by_type		= agp_generic_free_by_type,
446 	.agp_alloc_page		= agp_generic_alloc_page,
447 	.agp_destroy_page	= agp_generic_destroy_page,
448 };
449 
450 static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
451 					   const struct pci_device_id *ent)
452 {
453 	struct agp_bridge_data *bridge;
454 	struct pci_dev *bridge_dev;
455 	u32 temp, temp2;
456 	u8 cap_ptr = 0;
457 
458 	/* Everything is on func 1 here so we are hardcoding function one */
459 	bridge_dev = pci_find_slot((unsigned int)pdev->bus->number,
460 			PCI_DEVFN(0, 1));
461 	if (!bridge_dev) {
462 		printk(KERN_INFO PFX "Detected a Serverworks chipset "
463 		       "but could not find the secondary device.\n");
464 		return -ENODEV;
465 	}
466 
467 	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
468 
469 	switch (pdev->device) {
470 	case 0x0006:
471 		/* ServerWorks CNB20HE
472 		Fail silently.*/
473 		printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
474 		return -ENODEV;
475 
476 	case PCI_DEVICE_ID_SERVERWORKS_HE:
477 	case PCI_DEVICE_ID_SERVERWORKS_LE:
478 	case 0x0007:
479 		break;
480 
481 	default:
482 		if (cap_ptr)
483 			printk(KERN_ERR PFX "Unsupported Serverworks chipset "
484 					"(device id: %04x)\n", pdev->device);
485 		return -ENODEV;
486 	}
487 
488 	serverworks_private.svrwrks_dev = bridge_dev;
489 	serverworks_private.gart_addr_ofs = 0x10;
490 
491 	pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
492 	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
493 		pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
494 		if (temp2 != 0) {
495 			printk(KERN_INFO PFX "Detected 64 bit aperture address, "
496 			       "but top bits are not zero.  Disabling agp\n");
497 			return -ENODEV;
498 		}
499 		serverworks_private.mm_addr_ofs = 0x18;
500 	} else
501 		serverworks_private.mm_addr_ofs = 0x14;
502 
503 	pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
504 	if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
505 		pci_read_config_dword(pdev,
506 				serverworks_private.mm_addr_ofs + 4, &temp2);
507 		if (temp2 != 0) {
508 			printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
509 			       "but top bits are not zero.  Disabling agp\n");
510 			return -ENODEV;
511 		}
512 	}
513 
514 	bridge = agp_alloc_bridge();
515 	if (!bridge)
516 		return -ENOMEM;
517 
518 	bridge->driver = &sworks_driver;
519 	bridge->dev_private_data = &serverworks_private,
520 	bridge->dev = pdev;
521 
522 	pci_set_drvdata(pdev, bridge);
523 	return agp_add_bridge(bridge);
524 }
525 
526 static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
527 {
528 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
529 
530 	agp_remove_bridge(bridge);
531 	agp_put_bridge(bridge);
532 }
533 
534 static struct pci_device_id agp_serverworks_pci_table[] = {
535 	{
536 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
537 	.class_mask	= ~0,
538 	.vendor		= PCI_VENDOR_ID_SERVERWORKS,
539 	.device		= PCI_ANY_ID,
540 	.subvendor	= PCI_ANY_ID,
541 	.subdevice	= PCI_ANY_ID,
542 	},
543 	{ }
544 };
545 
546 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
547 
548 static struct pci_driver agp_serverworks_pci_driver = {
549 	.name		= "agpgart-serverworks",
550 	.id_table	= agp_serverworks_pci_table,
551 	.probe		= agp_serverworks_probe,
552 	.remove		= agp_serverworks_remove,
553 };
554 
555 static int __init agp_serverworks_init(void)
556 {
557 	if (agp_off)
558 		return -EINVAL;
559 	return pci_register_driver(&agp_serverworks_pci_driver);
560 }
561 
562 static void __exit agp_serverworks_cleanup(void)
563 {
564 	pci_unregister_driver(&agp_serverworks_pci_driver);
565 }
566 
567 module_init(agp_serverworks_init);
568 module_exit(agp_serverworks_cleanup);
569 
570 MODULE_LICENSE("GPL and additional rights");
571 
572