1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for the Tundra TSI148 VME-PCI Bridge Chip
4  *
5  * Author: Martyn Welch <martyn.welch@ge.com>
6  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7  *
8  * Based on work by Tom Armistead and Ajit Prem
9  * Copyright 2004 Motorola Inc.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/mm.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/proc_fs.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/byteorder/generic.h>
29 
30 #include "vme.h"
31 #include "vme_bridge.h"
32 #include "vme_tsi148.h"
33 
34 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
35 static void tsi148_remove(struct pci_dev *);
36 
37 /* Module parameter */
38 static bool err_chk;
39 static int geoid;
40 
41 static const char driver_name[] = "vme_tsi148";
42 
43 static const struct pci_device_id tsi148_ids[] = {
44 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
45 	{ },
46 };
47 
48 MODULE_DEVICE_TABLE(pci, tsi148_ids);
49 
50 static struct pci_driver tsi148_driver = {
51 	.name = driver_name,
52 	.id_table = tsi148_ids,
53 	.probe = tsi148_probe,
54 	.remove = tsi148_remove,
55 };
56 
57 static void reg_join(unsigned int high, unsigned int low,
58 	unsigned long long *variable)
59 {
60 	*variable = (unsigned long long)high << 32;
61 	*variable |= (unsigned long long)low;
62 }
63 
64 static void reg_split(unsigned long long variable, unsigned int *high,
65 	unsigned int *low)
66 {
67 	*low = (unsigned int)variable & 0xFFFFFFFF;
68 	*high = (unsigned int)(variable >> 32);
69 }
70 
71 /*
72  * Wakes up DMA queue.
73  */
74 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
75 	int channel_mask)
76 {
77 	u32 serviced = 0;
78 
79 	if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
80 		wake_up(&bridge->dma_queue[0]);
81 		serviced |= TSI148_LCSR_INTC_DMA0C;
82 	}
83 	if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
84 		wake_up(&bridge->dma_queue[1]);
85 		serviced |= TSI148_LCSR_INTC_DMA1C;
86 	}
87 
88 	return serviced;
89 }
90 
91 /*
92  * Wake up location monitor queue
93  */
94 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
95 {
96 	int i;
97 	u32 serviced = 0;
98 
99 	for (i = 0; i < 4; i++) {
100 		if (stat & TSI148_LCSR_INTS_LMS[i]) {
101 			/* We only enable interrupts if the callback is set */
102 			bridge->lm_callback[i](bridge->lm_data[i]);
103 			serviced |= TSI148_LCSR_INTC_LMC[i];
104 		}
105 	}
106 
107 	return serviced;
108 }
109 
110 /*
111  * Wake up mail box queue.
112  *
113  * XXX This functionality is not exposed up though API.
114  */
115 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
116 {
117 	int i;
118 	u32 val;
119 	u32 serviced = 0;
120 	struct tsi148_driver *bridge;
121 
122 	bridge = tsi148_bridge->driver_priv;
123 
124 	for (i = 0; i < 4; i++) {
125 		if (stat & TSI148_LCSR_INTS_MBS[i]) {
126 			val = ioread32be(bridge->base +	TSI148_GCSR_MBOX[i]);
127 			dev_err(tsi148_bridge->parent, "VME Mailbox %d received: 0x%x\n",
128 				i, val);
129 			serviced |= TSI148_LCSR_INTC_MBC[i];
130 		}
131 	}
132 
133 	return serviced;
134 }
135 
136 /*
137  * Display error & status message when PERR (PCI) exception interrupt occurs.
138  */
139 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
140 {
141 	struct tsi148_driver *bridge;
142 
143 	bridge = tsi148_bridge->driver_priv;
144 
145 	dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
146 		ioread32be(bridge->base + TSI148_LCSR_EDPAU),
147 		ioread32be(bridge->base + TSI148_LCSR_EDPAL),
148 		ioread32be(bridge->base + TSI148_LCSR_EDPAT));
149 
150 	dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
151 		ioread32be(bridge->base + TSI148_LCSR_EDPXA),
152 		ioread32be(bridge->base + TSI148_LCSR_EDPXS));
153 
154 	iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
155 
156 	return TSI148_LCSR_INTC_PERRC;
157 }
158 
159 /*
160  * Save address and status when VME error interrupt occurs.
161  */
162 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
163 {
164 	unsigned int error_addr_high, error_addr_low;
165 	unsigned long long error_addr;
166 	u32 error_attrib;
167 	int error_am;
168 	struct tsi148_driver *bridge;
169 
170 	bridge = tsi148_bridge->driver_priv;
171 
172 	error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
173 	error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
174 	error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
175 	error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
176 
177 	reg_join(error_addr_high, error_addr_low, &error_addr);
178 
179 	/* Check for exception register overflow (we have lost error data) */
180 	if (error_attrib & TSI148_LCSR_VEAT_VEOF)
181 		dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow Occurred\n");
182 
183 	if (err_chk)
184 		vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
185 	else
186 		dev_err(tsi148_bridge->parent,
187 			"VME Bus Error at address: 0x%llx, attributes: %08x\n",
188 			error_addr, error_attrib);
189 
190 	/* Clear Status */
191 	iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
192 
193 	return TSI148_LCSR_INTC_VERRC;
194 }
195 
196 /*
197  * Wake up IACK queue.
198  */
199 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
200 {
201 	wake_up(&bridge->iack_queue);
202 
203 	return TSI148_LCSR_INTC_IACKC;
204 }
205 
206 /*
207  * Calling VME bus interrupt callback if provided.
208  */
209 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
210 	u32 stat)
211 {
212 	int vec, i, serviced = 0;
213 	struct tsi148_driver *bridge;
214 
215 	bridge = tsi148_bridge->driver_priv;
216 
217 	for (i = 7; i > 0; i--) {
218 		if (stat & (1 << i)) {
219 			/*
220 			 * Note: Even though the registers are defined as
221 			 * 32-bits in the spec, we only want to issue 8-bit
222 			 * IACK cycles on the bus, read from offset 3.
223 			 */
224 			vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
225 
226 			vme_irq_handler(tsi148_bridge, i, vec);
227 
228 			serviced |= (1 << i);
229 		}
230 	}
231 
232 	return serviced;
233 }
234 
235 /*
236  * Top level interrupt handler.  Clears appropriate interrupt status bits and
237  * then calls appropriate sub handler(s).
238  */
239 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
240 {
241 	u32 stat, enable, serviced = 0;
242 	struct vme_bridge *tsi148_bridge;
243 	struct tsi148_driver *bridge;
244 
245 	tsi148_bridge = ptr;
246 
247 	bridge = tsi148_bridge->driver_priv;
248 
249 	/* Determine which interrupts are unmasked and set */
250 	enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
251 	stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
252 
253 	/* Only look at unmasked interrupts */
254 	stat &= enable;
255 
256 	if (unlikely(!stat))
257 		return IRQ_NONE;
258 
259 	/* Call subhandlers as appropriate */
260 	/* DMA irqs */
261 	if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
262 		serviced |= tsi148_DMA_irqhandler(bridge, stat);
263 
264 	/* Location monitor irqs */
265 	if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
266 			TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
267 		serviced |= tsi148_LM_irqhandler(bridge, stat);
268 
269 	/* Mail box irqs */
270 	if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
271 			TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
272 		serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
273 
274 	/* PCI bus error */
275 	if (stat & TSI148_LCSR_INTS_PERRS)
276 		serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
277 
278 	/* VME bus error */
279 	if (stat & TSI148_LCSR_INTS_VERRS)
280 		serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
281 
282 	/* IACK irq */
283 	if (stat & TSI148_LCSR_INTS_IACKS)
284 		serviced |= tsi148_IACK_irqhandler(bridge);
285 
286 	/* VME bus irqs */
287 	if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
288 			TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
289 			TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
290 			TSI148_LCSR_INTS_IRQ1S))
291 		serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
292 
293 	/* Clear serviced interrupts */
294 	iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
295 
296 	return IRQ_HANDLED;
297 }
298 
299 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
300 {
301 	int result;
302 	unsigned int tmp;
303 	struct pci_dev *pdev;
304 	struct tsi148_driver *bridge;
305 
306 	pdev = to_pci_dev(tsi148_bridge->parent);
307 
308 	bridge = tsi148_bridge->driver_priv;
309 
310 	result = request_irq(pdev->irq,
311 			     tsi148_irqhandler,
312 			     IRQF_SHARED,
313 			     driver_name, tsi148_bridge);
314 	if (result) {
315 		dev_err(tsi148_bridge->parent, "Can't get assigned pci irq vector %02X\n",
316 			pdev->irq);
317 		return result;
318 	}
319 
320 	/* Enable and unmask interrupts */
321 	tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
322 		TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
323 		TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
324 		TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
325 		TSI148_LCSR_INTEO_IACKEO;
326 
327 	/* This leaves the following interrupts masked.
328 	 * TSI148_LCSR_INTEO_VIEEO
329 	 * TSI148_LCSR_INTEO_SYSFLEO
330 	 * TSI148_LCSR_INTEO_ACFLEO
331 	 */
332 
333 	/* Don't enable Location Monitor interrupts here - they will be
334 	 * enabled when the location monitors are properly configured and
335 	 * a callback has been attached.
336 	 * TSI148_LCSR_INTEO_LM0EO
337 	 * TSI148_LCSR_INTEO_LM1EO
338 	 * TSI148_LCSR_INTEO_LM2EO
339 	 * TSI148_LCSR_INTEO_LM3EO
340 	 */
341 
342 	/* Don't enable VME interrupts until we add a handler, else the board
343 	 * will respond to it and we don't want that unless it knows how to
344 	 * properly deal with it.
345 	 * TSI148_LCSR_INTEO_IRQ7EO
346 	 * TSI148_LCSR_INTEO_IRQ6EO
347 	 * TSI148_LCSR_INTEO_IRQ5EO
348 	 * TSI148_LCSR_INTEO_IRQ4EO
349 	 * TSI148_LCSR_INTEO_IRQ3EO
350 	 * TSI148_LCSR_INTEO_IRQ2EO
351 	 * TSI148_LCSR_INTEO_IRQ1EO
352 	 */
353 
354 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
355 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
356 
357 	return 0;
358 }
359 
360 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
361 	struct pci_dev *pdev)
362 {
363 	struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
364 
365 	/* Turn off interrupts */
366 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
367 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
368 
369 	/* Clear all interrupts */
370 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
371 
372 	/* Detach interrupt handler */
373 	free_irq(pdev->irq, tsi148_bridge);
374 }
375 
376 /*
377  * Check to see if an IACk has been received, return true (1) or false (0).
378  */
379 static int tsi148_iack_received(struct tsi148_driver *bridge)
380 {
381 	u32 tmp;
382 
383 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
384 
385 	if (tmp & TSI148_LCSR_VICR_IRQS)
386 		return 0;
387 	else
388 		return 1;
389 }
390 
391 /*
392  * Configure VME interrupt
393  */
394 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
395 	int state, int sync)
396 {
397 	struct pci_dev *pdev;
398 	u32 tmp;
399 	struct tsi148_driver *bridge;
400 
401 	bridge = tsi148_bridge->driver_priv;
402 
403 	/* We need to do the ordering differently for enabling and disabling */
404 	if (state == 0) {
405 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
406 		tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
407 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
408 
409 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
410 		tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
411 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
412 
413 		if (sync != 0) {
414 			pdev = to_pci_dev(tsi148_bridge->parent);
415 			synchronize_irq(pdev->irq);
416 		}
417 	} else {
418 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
419 		tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
420 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
421 
422 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
423 		tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
424 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
425 	}
426 }
427 
428 /*
429  * Generate a VME bus interrupt at the requested level & vector. Wait for
430  * interrupt to be acked.
431  */
432 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
433 	int statid)
434 {
435 	u32 tmp;
436 	struct tsi148_driver *bridge;
437 
438 	bridge = tsi148_bridge->driver_priv;
439 
440 	mutex_lock(&bridge->vme_int);
441 
442 	/* Read VICR register */
443 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
444 
445 	/* Set Status/ID */
446 	tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
447 		(statid & TSI148_LCSR_VICR_STID_M);
448 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
449 
450 	/* Assert VMEbus IRQ */
451 	tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
452 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
453 
454 	/* XXX Consider implementing a timeout? */
455 	wait_event_interruptible(bridge->iack_queue,
456 		tsi148_iack_received(bridge));
457 
458 	mutex_unlock(&bridge->vme_int);
459 
460 	return 0;
461 }
462 
463 /*
464  * Initialize a slave window with the requested attributes.
465  */
466 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
467 	unsigned long long vme_base, unsigned long long size,
468 	dma_addr_t pci_base, u32 aspace, u32 cycle)
469 {
470 	unsigned int i, addr = 0, granularity = 0;
471 	unsigned int temp_ctl = 0;
472 	unsigned int vme_base_low, vme_base_high;
473 	unsigned int vme_bound_low, vme_bound_high;
474 	unsigned int pci_offset_low, pci_offset_high;
475 	unsigned long long vme_bound, pci_offset;
476 	struct vme_bridge *tsi148_bridge;
477 	struct tsi148_driver *bridge;
478 
479 	tsi148_bridge = image->parent;
480 	bridge = tsi148_bridge->driver_priv;
481 
482 	i = image->number;
483 
484 	switch (aspace) {
485 	case VME_A16:
486 		granularity = 0x10;
487 		addr |= TSI148_LCSR_ITAT_AS_A16;
488 		break;
489 	case VME_A24:
490 		granularity = 0x1000;
491 		addr |= TSI148_LCSR_ITAT_AS_A24;
492 		break;
493 	case VME_A32:
494 		granularity = 0x10000;
495 		addr |= TSI148_LCSR_ITAT_AS_A32;
496 		break;
497 	case VME_A64:
498 		granularity = 0x10000;
499 		addr |= TSI148_LCSR_ITAT_AS_A64;
500 		break;
501 	default:
502 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
503 		return -EINVAL;
504 	}
505 
506 	/* Convert 64-bit variables to 2x 32-bit variables */
507 	reg_split(vme_base, &vme_base_high, &vme_base_low);
508 
509 	/*
510 	 * Bound address is a valid address for the window, adjust
511 	 * accordingly
512 	 */
513 	vme_bound = vme_base + size - granularity;
514 	reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
515 	pci_offset = (unsigned long long)pci_base - vme_base;
516 	reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
517 
518 	if (vme_base_low & (granularity - 1)) {
519 		dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
520 		return -EINVAL;
521 	}
522 	if (vme_bound_low & (granularity - 1)) {
523 		dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
524 		return -EINVAL;
525 	}
526 	if (pci_offset_low & (granularity - 1)) {
527 		dev_err(tsi148_bridge->parent, "Invalid PCI Offset alignment\n");
528 		return -EINVAL;
529 	}
530 
531 	/*  Disable while we are mucking around */
532 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
533 		TSI148_LCSR_OFFSET_ITAT);
534 	temp_ctl &= ~TSI148_LCSR_ITAT_EN;
535 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
536 		TSI148_LCSR_OFFSET_ITAT);
537 
538 	/* Setup mapping */
539 	iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
540 		TSI148_LCSR_OFFSET_ITSAU);
541 	iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
542 		TSI148_LCSR_OFFSET_ITSAL);
543 	iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
544 		TSI148_LCSR_OFFSET_ITEAU);
545 	iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
546 		TSI148_LCSR_OFFSET_ITEAL);
547 	iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
548 		TSI148_LCSR_OFFSET_ITOFU);
549 	iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
550 		TSI148_LCSR_OFFSET_ITOFL);
551 
552 	/* Setup 2eSST speeds */
553 	temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
554 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
555 	case VME_2eSST160:
556 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
557 		break;
558 	case VME_2eSST267:
559 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
560 		break;
561 	case VME_2eSST320:
562 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
563 		break;
564 	}
565 
566 	/* Setup cycle types */
567 	temp_ctl &= ~(0x1F << 7);
568 	if (cycle & VME_BLT)
569 		temp_ctl |= TSI148_LCSR_ITAT_BLT;
570 	if (cycle & VME_MBLT)
571 		temp_ctl |= TSI148_LCSR_ITAT_MBLT;
572 	if (cycle & VME_2eVME)
573 		temp_ctl |= TSI148_LCSR_ITAT_2eVME;
574 	if (cycle & VME_2eSST)
575 		temp_ctl |= TSI148_LCSR_ITAT_2eSST;
576 	if (cycle & VME_2eSSTB)
577 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
578 
579 	/* Setup address space */
580 	temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
581 	temp_ctl |= addr;
582 
583 	temp_ctl &= ~0xF;
584 	if (cycle & VME_SUPER)
585 		temp_ctl |= TSI148_LCSR_ITAT_SUPR;
586 	if (cycle & VME_USER)
587 		temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
588 	if (cycle & VME_PROG)
589 		temp_ctl |= TSI148_LCSR_ITAT_PGM;
590 	if (cycle & VME_DATA)
591 		temp_ctl |= TSI148_LCSR_ITAT_DATA;
592 
593 	/* Write ctl reg without enable */
594 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
595 		TSI148_LCSR_OFFSET_ITAT);
596 
597 	if (enabled)
598 		temp_ctl |= TSI148_LCSR_ITAT_EN;
599 
600 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
601 		TSI148_LCSR_OFFSET_ITAT);
602 
603 	return 0;
604 }
605 
606 /*
607  * Get slave window configuration.
608  */
609 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
610 	unsigned long long *vme_base, unsigned long long *size,
611 	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
612 {
613 	unsigned int i, granularity = 0, ctl = 0;
614 	unsigned int vme_base_low, vme_base_high;
615 	unsigned int vme_bound_low, vme_bound_high;
616 	unsigned int pci_offset_low, pci_offset_high;
617 	unsigned long long vme_bound, pci_offset;
618 	struct tsi148_driver *bridge;
619 
620 	bridge = image->parent->driver_priv;
621 
622 	i = image->number;
623 
624 	/* Read registers */
625 	ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
626 		TSI148_LCSR_OFFSET_ITAT);
627 
628 	vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
629 		TSI148_LCSR_OFFSET_ITSAU);
630 	vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
631 		TSI148_LCSR_OFFSET_ITSAL);
632 	vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
633 		TSI148_LCSR_OFFSET_ITEAU);
634 	vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
635 		TSI148_LCSR_OFFSET_ITEAL);
636 	pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
637 		TSI148_LCSR_OFFSET_ITOFU);
638 	pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
639 		TSI148_LCSR_OFFSET_ITOFL);
640 
641 	/* Convert 64-bit variables to 2x 32-bit variables */
642 	reg_join(vme_base_high, vme_base_low, vme_base);
643 	reg_join(vme_bound_high, vme_bound_low, &vme_bound);
644 	reg_join(pci_offset_high, pci_offset_low, &pci_offset);
645 
646 	*pci_base = (dma_addr_t)(*vme_base + pci_offset);
647 
648 	*enabled = 0;
649 	*aspace = 0;
650 	*cycle = 0;
651 
652 	if (ctl & TSI148_LCSR_ITAT_EN)
653 		*enabled = 1;
654 
655 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
656 		granularity = 0x10;
657 		*aspace |= VME_A16;
658 	}
659 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
660 		granularity = 0x1000;
661 		*aspace |= VME_A24;
662 	}
663 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
664 		granularity = 0x10000;
665 		*aspace |= VME_A32;
666 	}
667 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
668 		granularity = 0x10000;
669 		*aspace |= VME_A64;
670 	}
671 
672 	/* Need granularity before we set the size */
673 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
674 
675 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
676 		*cycle |= VME_2eSST160;
677 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
678 		*cycle |= VME_2eSST267;
679 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
680 		*cycle |= VME_2eSST320;
681 
682 	if (ctl & TSI148_LCSR_ITAT_BLT)
683 		*cycle |= VME_BLT;
684 	if (ctl & TSI148_LCSR_ITAT_MBLT)
685 		*cycle |= VME_MBLT;
686 	if (ctl & TSI148_LCSR_ITAT_2eVME)
687 		*cycle |= VME_2eVME;
688 	if (ctl & TSI148_LCSR_ITAT_2eSST)
689 		*cycle |= VME_2eSST;
690 	if (ctl & TSI148_LCSR_ITAT_2eSSTB)
691 		*cycle |= VME_2eSSTB;
692 
693 	if (ctl & TSI148_LCSR_ITAT_SUPR)
694 		*cycle |= VME_SUPER;
695 	if (ctl & TSI148_LCSR_ITAT_NPRIV)
696 		*cycle |= VME_USER;
697 	if (ctl & TSI148_LCSR_ITAT_PGM)
698 		*cycle |= VME_PROG;
699 	if (ctl & TSI148_LCSR_ITAT_DATA)
700 		*cycle |= VME_DATA;
701 
702 	return 0;
703 }
704 
705 /*
706  * Allocate and map PCI Resource
707  */
708 static int tsi148_alloc_resource(struct vme_master_resource *image,
709 	unsigned long long size)
710 {
711 	unsigned long long existing_size;
712 	int retval = 0;
713 	struct pci_dev *pdev;
714 	struct vme_bridge *tsi148_bridge;
715 
716 	tsi148_bridge = image->parent;
717 
718 	pdev = to_pci_dev(tsi148_bridge->parent);
719 
720 	existing_size = (unsigned long long)(image->bus_resource.end -
721 		image->bus_resource.start);
722 
723 	/* If the existing size is OK, return */
724 	if ((size != 0) && (existing_size == (size - 1)))
725 		return 0;
726 
727 	if (existing_size != 0) {
728 		iounmap(image->kern_base);
729 		image->kern_base = NULL;
730 		kfree(image->bus_resource.name);
731 		release_resource(&image->bus_resource);
732 		memset(&image->bus_resource, 0, sizeof(image->bus_resource));
733 	}
734 
735 	/* Exit here if size is zero */
736 	if (size == 0)
737 		return 0;
738 
739 	if (!image->bus_resource.name) {
740 		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
741 		if (!image->bus_resource.name) {
742 			retval = -ENOMEM;
743 			goto err_name;
744 		}
745 	}
746 
747 	sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
748 		image->number);
749 
750 	image->bus_resource.start = 0;
751 	image->bus_resource.end = (unsigned long)size;
752 	image->bus_resource.flags = IORESOURCE_MEM;
753 
754 	retval = pci_bus_alloc_resource(pdev->bus,
755 		&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
756 		0, NULL, NULL);
757 	if (retval) {
758 		dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
759 			image->number, (unsigned long)size,
760 			(unsigned long)image->bus_resource.start);
761 		goto err_resource;
762 	}
763 
764 	image->kern_base = ioremap(
765 		image->bus_resource.start, size);
766 	if (!image->kern_base) {
767 		dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
768 		retval = -ENOMEM;
769 		goto err_remap;
770 	}
771 
772 	return 0;
773 
774 err_remap:
775 	release_resource(&image->bus_resource);
776 err_resource:
777 	kfree(image->bus_resource.name);
778 	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
779 err_name:
780 	return retval;
781 }
782 
783 /*
784  * Free and unmap PCI Resource
785  */
786 static void tsi148_free_resource(struct vme_master_resource *image)
787 {
788 	iounmap(image->kern_base);
789 	image->kern_base = NULL;
790 	release_resource(&image->bus_resource);
791 	kfree(image->bus_resource.name);
792 	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
793 }
794 
795 /*
796  * Set the attributes of an outbound window.
797  */
798 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
799 	unsigned long long vme_base, unsigned long long size, u32 aspace,
800 	u32 cycle, u32 dwidth)
801 {
802 	int retval = 0;
803 	unsigned int i;
804 	unsigned int temp_ctl = 0;
805 	unsigned int pci_base_low, pci_base_high;
806 	unsigned int pci_bound_low, pci_bound_high;
807 	unsigned int vme_offset_low, vme_offset_high;
808 	unsigned long long pci_bound, vme_offset, pci_base;
809 	struct vme_bridge *tsi148_bridge;
810 	struct tsi148_driver *bridge;
811 	struct pci_bus_region region;
812 	struct pci_dev *pdev;
813 
814 	tsi148_bridge = image->parent;
815 
816 	bridge = tsi148_bridge->driver_priv;
817 
818 	pdev = to_pci_dev(tsi148_bridge->parent);
819 
820 	/* Verify input data */
821 	if (vme_base & 0xFFFF) {
822 		dev_err(tsi148_bridge->parent, "Invalid VME Window alignment\n");
823 		retval = -EINVAL;
824 		goto err_window;
825 	}
826 
827 	if ((size == 0) && (enabled != 0)) {
828 		dev_err(tsi148_bridge->parent, "Size must be non-zero for enabled windows\n");
829 		retval = -EINVAL;
830 		goto err_window;
831 	}
832 
833 	spin_lock(&image->lock);
834 
835 	/* Let's allocate the resource here rather than further up the stack as
836 	 * it avoids pushing loads of bus dependent stuff up the stack. If size
837 	 * is zero, any existing resource will be freed.
838 	 */
839 	retval = tsi148_alloc_resource(image, size);
840 	if (retval) {
841 		spin_unlock(&image->lock);
842 		dev_err(tsi148_bridge->parent, "Unable to allocate memory for resource\n");
843 		goto err_res;
844 	}
845 
846 	if (size == 0) {
847 		pci_base = 0;
848 		pci_bound = 0;
849 		vme_offset = 0;
850 	} else {
851 		pcibios_resource_to_bus(pdev->bus, &region,
852 					&image->bus_resource);
853 		pci_base = region.start;
854 
855 		/*
856 		 * Bound address is a valid address for the window, adjust
857 		 * according to window granularity.
858 		 */
859 		pci_bound = pci_base + (size - 0x10000);
860 		vme_offset = vme_base - pci_base;
861 	}
862 
863 	/* Convert 64-bit variables to 2x 32-bit variables */
864 	reg_split(pci_base, &pci_base_high, &pci_base_low);
865 	reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
866 	reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
867 
868 	if (pci_base_low & 0xFFFF) {
869 		spin_unlock(&image->lock);
870 		dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
871 		retval = -EINVAL;
872 		goto err_gran;
873 	}
874 	if (pci_bound_low & 0xFFFF) {
875 		spin_unlock(&image->lock);
876 		dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
877 		retval = -EINVAL;
878 		goto err_gran;
879 	}
880 	if (vme_offset_low & 0xFFFF) {
881 		spin_unlock(&image->lock);
882 		dev_err(tsi148_bridge->parent, "Invalid VME Offset alignment\n");
883 		retval = -EINVAL;
884 		goto err_gran;
885 	}
886 
887 	i = image->number;
888 
889 	/* Disable while we are mucking around */
890 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
891 		TSI148_LCSR_OFFSET_OTAT);
892 	temp_ctl &= ~TSI148_LCSR_OTAT_EN;
893 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
894 		TSI148_LCSR_OFFSET_OTAT);
895 
896 	/* Setup 2eSST speeds */
897 	temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
898 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
899 	case VME_2eSST160:
900 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
901 		break;
902 	case VME_2eSST267:
903 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
904 		break;
905 	case VME_2eSST320:
906 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
907 		break;
908 	}
909 
910 	/* Setup cycle types */
911 	if (cycle & VME_BLT) {
912 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
913 		temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
914 	}
915 	if (cycle & VME_MBLT) {
916 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
917 		temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
918 	}
919 	if (cycle & VME_2eVME) {
920 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
921 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
922 	}
923 	if (cycle & VME_2eSST) {
924 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
925 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
926 	}
927 	if (cycle & VME_2eSSTB) {
928 		dev_warn(tsi148_bridge->parent, "Currently not setting Broadcast Select Registers\n");
929 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
930 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
931 	}
932 
933 	/* Setup data width */
934 	temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
935 	switch (dwidth) {
936 	case VME_D16:
937 		temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
938 		break;
939 	case VME_D32:
940 		temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
941 		break;
942 	default:
943 		spin_unlock(&image->lock);
944 		dev_err(tsi148_bridge->parent, "Invalid data width\n");
945 		retval = -EINVAL;
946 		goto err_dwidth;
947 	}
948 
949 	/* Setup address space */
950 	temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
951 	switch (aspace) {
952 	case VME_A16:
953 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
954 		break;
955 	case VME_A24:
956 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
957 		break;
958 	case VME_A32:
959 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
960 		break;
961 	case VME_A64:
962 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
963 		break;
964 	case VME_CRCSR:
965 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
966 		break;
967 	case VME_USER1:
968 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
969 		break;
970 	case VME_USER2:
971 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
972 		break;
973 	case VME_USER3:
974 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
975 		break;
976 	case VME_USER4:
977 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
978 		break;
979 	default:
980 		spin_unlock(&image->lock);
981 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
982 		retval = -EINVAL;
983 		goto err_aspace;
984 	}
985 
986 	temp_ctl &= ~(3<<4);
987 	if (cycle & VME_SUPER)
988 		temp_ctl |= TSI148_LCSR_OTAT_SUP;
989 	if (cycle & VME_PROG)
990 		temp_ctl |= TSI148_LCSR_OTAT_PGM;
991 
992 	/* Setup mapping */
993 	iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
994 		TSI148_LCSR_OFFSET_OTSAU);
995 	iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
996 		TSI148_LCSR_OFFSET_OTSAL);
997 	iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
998 		TSI148_LCSR_OFFSET_OTEAU);
999 	iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1000 		TSI148_LCSR_OFFSET_OTEAL);
1001 	iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1002 		TSI148_LCSR_OFFSET_OTOFU);
1003 	iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1004 		TSI148_LCSR_OFFSET_OTOFL);
1005 
1006 	/* Write ctl reg without enable */
1007 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1008 		TSI148_LCSR_OFFSET_OTAT);
1009 
1010 	if (enabled)
1011 		temp_ctl |= TSI148_LCSR_OTAT_EN;
1012 
1013 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1014 		TSI148_LCSR_OFFSET_OTAT);
1015 
1016 	spin_unlock(&image->lock);
1017 	return 0;
1018 
1019 err_aspace:
1020 err_dwidth:
1021 err_gran:
1022 	tsi148_free_resource(image);
1023 err_res:
1024 err_window:
1025 	return retval;
1026 
1027 }
1028 
1029 /*
1030  * Set the attributes of an outbound window.
1031  *
1032  * XXX Not parsing prefetch information.
1033  */
1034 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1035 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1036 	u32 *cycle, u32 *dwidth)
1037 {
1038 	unsigned int i, ctl;
1039 	unsigned int pci_base_low, pci_base_high;
1040 	unsigned int pci_bound_low, pci_bound_high;
1041 	unsigned int vme_offset_low, vme_offset_high;
1042 
1043 	unsigned long long pci_base, pci_bound, vme_offset;
1044 	struct tsi148_driver *bridge;
1045 
1046 	bridge = image->parent->driver_priv;
1047 
1048 	i = image->number;
1049 
1050 	ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1051 		TSI148_LCSR_OFFSET_OTAT);
1052 
1053 	pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1054 		TSI148_LCSR_OFFSET_OTSAU);
1055 	pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1056 		TSI148_LCSR_OFFSET_OTSAL);
1057 	pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1058 		TSI148_LCSR_OFFSET_OTEAU);
1059 	pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1060 		TSI148_LCSR_OFFSET_OTEAL);
1061 	vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1062 		TSI148_LCSR_OFFSET_OTOFU);
1063 	vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1064 		TSI148_LCSR_OFFSET_OTOFL);
1065 
1066 	/* Convert 64-bit variables to 2x 32-bit variables */
1067 	reg_join(pci_base_high, pci_base_low, &pci_base);
1068 	reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1069 	reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1070 
1071 	*vme_base = pci_base + vme_offset;
1072 	*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1073 
1074 	*enabled = 0;
1075 	*aspace = 0;
1076 	*cycle = 0;
1077 	*dwidth = 0;
1078 
1079 	if (ctl & TSI148_LCSR_OTAT_EN)
1080 		*enabled = 1;
1081 
1082 	/* Setup address space */
1083 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1084 		*aspace |= VME_A16;
1085 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1086 		*aspace |= VME_A24;
1087 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1088 		*aspace |= VME_A32;
1089 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1090 		*aspace |= VME_A64;
1091 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1092 		*aspace |= VME_CRCSR;
1093 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1094 		*aspace |= VME_USER1;
1095 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1096 		*aspace |= VME_USER2;
1097 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1098 		*aspace |= VME_USER3;
1099 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1100 		*aspace |= VME_USER4;
1101 
1102 	/* Setup 2eSST speeds */
1103 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1104 		*cycle |= VME_2eSST160;
1105 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1106 		*cycle |= VME_2eSST267;
1107 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1108 		*cycle |= VME_2eSST320;
1109 
1110 	/* Setup cycle types */
1111 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1112 		*cycle |= VME_SCT;
1113 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1114 		*cycle |= VME_BLT;
1115 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1116 		*cycle |= VME_MBLT;
1117 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1118 		*cycle |= VME_2eVME;
1119 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1120 		*cycle |= VME_2eSST;
1121 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1122 		*cycle |= VME_2eSSTB;
1123 
1124 	if (ctl & TSI148_LCSR_OTAT_SUP)
1125 		*cycle |= VME_SUPER;
1126 	else
1127 		*cycle |= VME_USER;
1128 
1129 	if (ctl & TSI148_LCSR_OTAT_PGM)
1130 		*cycle |= VME_PROG;
1131 	else
1132 		*cycle |= VME_DATA;
1133 
1134 	/* Setup data width */
1135 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1136 		*dwidth = VME_D16;
1137 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1138 		*dwidth = VME_D32;
1139 
1140 	return 0;
1141 }
1142 
1143 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1144 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1145 	u32 *cycle, u32 *dwidth)
1146 {
1147 	int retval;
1148 
1149 	spin_lock(&image->lock);
1150 
1151 	retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1152 		cycle, dwidth);
1153 
1154 	spin_unlock(&image->lock);
1155 
1156 	return retval;
1157 }
1158 
1159 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1160 	size_t count, loff_t offset)
1161 {
1162 	int retval, enabled;
1163 	unsigned long long vme_base, size;
1164 	u32 aspace, cycle, dwidth;
1165 	struct vme_error_handler *handler = NULL;
1166 	struct vme_bridge *tsi148_bridge;
1167 	void __iomem *addr = image->kern_base + offset;
1168 	unsigned int done = 0;
1169 	unsigned int count32;
1170 
1171 	tsi148_bridge = image->parent;
1172 
1173 	spin_lock(&image->lock);
1174 
1175 	if (err_chk) {
1176 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1177 				    &cycle, &dwidth);
1178 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1179 						     vme_base + offset, count);
1180 		if (!handler) {
1181 			spin_unlock(&image->lock);
1182 			return -ENOMEM;
1183 		}
1184 	}
1185 
1186 	/* The following code handles VME address alignment. We cannot use
1187 	 * memcpy_xxx here because it may cut data transfers in to 8-bit
1188 	 * cycles when D16 or D32 cycles are required on the VME bus.
1189 	 * On the other hand, the bridge itself assures that the maximum data
1190 	 * cycle configured for the transfer is used and splits it
1191 	 * automatically for non-aligned addresses, so we don't want the
1192 	 * overhead of needlessly forcing small transfers for the entire cycle.
1193 	 */
1194 	if ((uintptr_t)addr & 0x1) {
1195 		*(u8 *)buf = ioread8(addr);
1196 		done += 1;
1197 		if (done == count)
1198 			goto out;
1199 	}
1200 	if ((uintptr_t)(addr + done) & 0x2) {
1201 		if ((count - done) < 2) {
1202 			*(u8 *)(buf + done) = ioread8(addr + done);
1203 			done += 1;
1204 			goto out;
1205 		} else {
1206 			*(u16 *)(buf + done) = ioread16(addr + done);
1207 			done += 2;
1208 		}
1209 	}
1210 
1211 	count32 = (count - done) & ~0x3;
1212 	while (done < count32) {
1213 		*(u32 *)(buf + done) = ioread32(addr + done);
1214 		done += 4;
1215 	}
1216 
1217 	if ((count - done) & 0x2) {
1218 		*(u16 *)(buf + done) = ioread16(addr + done);
1219 		done += 2;
1220 	}
1221 	if ((count - done) & 0x1) {
1222 		*(u8 *)(buf + done) = ioread8(addr + done);
1223 		done += 1;
1224 	}
1225 
1226 out:
1227 	retval = count;
1228 
1229 	if (err_chk) {
1230 		if (handler->num_errors) {
1231 			dev_err(image->parent->parent,
1232 				"First VME read error detected an at address 0x%llx\n",
1233 				handler->first_error);
1234 			retval = handler->first_error - (vme_base + offset);
1235 		}
1236 		vme_unregister_error_handler(handler);
1237 	}
1238 
1239 	spin_unlock(&image->lock);
1240 
1241 	return retval;
1242 }
1243 
1244 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1245 	size_t count, loff_t offset)
1246 {
1247 	int retval = 0, enabled;
1248 	unsigned long long vme_base, size;
1249 	u32 aspace, cycle, dwidth;
1250 	void __iomem *addr = image->kern_base + offset;
1251 	unsigned int done = 0;
1252 	unsigned int count32;
1253 
1254 	struct vme_error_handler *handler = NULL;
1255 	struct vme_bridge *tsi148_bridge;
1256 	struct tsi148_driver *bridge;
1257 
1258 	tsi148_bridge = image->parent;
1259 
1260 	bridge = tsi148_bridge->driver_priv;
1261 
1262 	spin_lock(&image->lock);
1263 
1264 	if (err_chk) {
1265 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1266 				    &cycle, &dwidth);
1267 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1268 						     vme_base + offset, count);
1269 		if (!handler) {
1270 			spin_unlock(&image->lock);
1271 			return -ENOMEM;
1272 		}
1273 	}
1274 
1275 	/* Here we apply for the same strategy we do in master_read
1276 	 * function in order to assure the correct cycles.
1277 	 */
1278 	if ((uintptr_t)addr & 0x1) {
1279 		iowrite8(*(u8 *)buf, addr);
1280 		done += 1;
1281 		if (done == count)
1282 			goto out;
1283 	}
1284 	if ((uintptr_t)(addr + done) & 0x2) {
1285 		if ((count - done) < 2) {
1286 			iowrite8(*(u8 *)(buf + done), addr + done);
1287 			done += 1;
1288 			goto out;
1289 		} else {
1290 			iowrite16(*(u16 *)(buf + done), addr + done);
1291 			done += 2;
1292 		}
1293 	}
1294 
1295 	count32 = (count - done) & ~0x3;
1296 	while (done < count32) {
1297 		iowrite32(*(u32 *)(buf + done), addr + done);
1298 		done += 4;
1299 	}
1300 
1301 	if ((count - done) & 0x2) {
1302 		iowrite16(*(u16 *)(buf + done), addr + done);
1303 		done += 2;
1304 	}
1305 	if ((count - done) & 0x1) {
1306 		iowrite8(*(u8 *)(buf + done), addr + done);
1307 		done += 1;
1308 	}
1309 
1310 out:
1311 	retval = count;
1312 
1313 	/*
1314 	 * Writes are posted. We need to do a read on the VME bus to flush out
1315 	 * all of the writes before we check for errors. We can't guarantee
1316 	 * that reading the data we have just written is safe. It is believed
1317 	 * that there isn't any read, write re-ordering, so we can read any
1318 	 * location in VME space, so lets read the Device ID from the tsi148's
1319 	 * own registers as mapped into CR/CSR space.
1320 	 *
1321 	 * We check for saved errors in the written address range/space.
1322 	 */
1323 
1324 	if (err_chk) {
1325 		ioread16(bridge->flush_image->kern_base + 0x7F000);
1326 
1327 		if (handler->num_errors) {
1328 			dev_warn(tsi148_bridge->parent,
1329 				 "First VME write error detected an at address 0x%llx\n",
1330 				 handler->first_error);
1331 			retval = handler->first_error - (vme_base + offset);
1332 		}
1333 		vme_unregister_error_handler(handler);
1334 	}
1335 
1336 	spin_unlock(&image->lock);
1337 
1338 	return retval;
1339 }
1340 
1341 /*
1342  * Perform an RMW cycle on the VME bus.
1343  *
1344  * Requires a previously configured master window, returns final value.
1345  */
1346 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1347 	unsigned int mask, unsigned int compare, unsigned int swap,
1348 	loff_t offset)
1349 {
1350 	unsigned long long pci_addr;
1351 	unsigned int pci_addr_high, pci_addr_low;
1352 	u32 tmp, result;
1353 	int i;
1354 	struct tsi148_driver *bridge;
1355 
1356 	bridge = image->parent->driver_priv;
1357 
1358 	/* Find the PCI address that maps to the desired VME address */
1359 	i = image->number;
1360 
1361 	/* Locking as we can only do one of these at a time */
1362 	mutex_lock(&bridge->vme_rmw);
1363 
1364 	/* Lock image */
1365 	spin_lock(&image->lock);
1366 
1367 	pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1368 		TSI148_LCSR_OFFSET_OTSAU);
1369 	pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1370 		TSI148_LCSR_OFFSET_OTSAL);
1371 
1372 	reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1373 	reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1374 
1375 	/* Configure registers */
1376 	iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1377 	iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1378 	iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1379 	iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1380 	iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1381 
1382 	/* Enable RMW */
1383 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1384 	tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1385 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1386 
1387 	/* Kick process off with a read to the required address. */
1388 	result = ioread32be(image->kern_base + offset);
1389 
1390 	/* Disable RMW */
1391 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1392 	tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1393 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1394 
1395 	spin_unlock(&image->lock);
1396 
1397 	mutex_unlock(&bridge->vme_rmw);
1398 
1399 	return result;
1400 }
1401 
1402 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1403 	u32 aspace, u32 cycle, u32 dwidth)
1404 {
1405 	u32 val;
1406 
1407 	val = be32_to_cpu(*attr);
1408 
1409 	/* Setup 2eSST speeds */
1410 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1411 	case VME_2eSST160:
1412 		val |= TSI148_LCSR_DSAT_2eSSTM_160;
1413 		break;
1414 	case VME_2eSST267:
1415 		val |= TSI148_LCSR_DSAT_2eSSTM_267;
1416 		break;
1417 	case VME_2eSST320:
1418 		val |= TSI148_LCSR_DSAT_2eSSTM_320;
1419 		break;
1420 	}
1421 
1422 	/* Setup cycle types */
1423 	if (cycle & VME_SCT)
1424 		val |= TSI148_LCSR_DSAT_TM_SCT;
1425 
1426 	if (cycle & VME_BLT)
1427 		val |= TSI148_LCSR_DSAT_TM_BLT;
1428 
1429 	if (cycle & VME_MBLT)
1430 		val |= TSI148_LCSR_DSAT_TM_MBLT;
1431 
1432 	if (cycle & VME_2eVME)
1433 		val |= TSI148_LCSR_DSAT_TM_2eVME;
1434 
1435 	if (cycle & VME_2eSST)
1436 		val |= TSI148_LCSR_DSAT_TM_2eSST;
1437 
1438 	if (cycle & VME_2eSSTB) {
1439 		dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1440 		val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1441 	}
1442 
1443 	/* Setup data width */
1444 	switch (dwidth) {
1445 	case VME_D16:
1446 		val |= TSI148_LCSR_DSAT_DBW_16;
1447 		break;
1448 	case VME_D32:
1449 		val |= TSI148_LCSR_DSAT_DBW_32;
1450 		break;
1451 	default:
1452 		dev_err(dev, "Invalid data width\n");
1453 		return -EINVAL;
1454 	}
1455 
1456 	/* Setup address space */
1457 	switch (aspace) {
1458 	case VME_A16:
1459 		val |= TSI148_LCSR_DSAT_AMODE_A16;
1460 		break;
1461 	case VME_A24:
1462 		val |= TSI148_LCSR_DSAT_AMODE_A24;
1463 		break;
1464 	case VME_A32:
1465 		val |= TSI148_LCSR_DSAT_AMODE_A32;
1466 		break;
1467 	case VME_A64:
1468 		val |= TSI148_LCSR_DSAT_AMODE_A64;
1469 		break;
1470 	case VME_CRCSR:
1471 		val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1472 		break;
1473 	case VME_USER1:
1474 		val |= TSI148_LCSR_DSAT_AMODE_USER1;
1475 		break;
1476 	case VME_USER2:
1477 		val |= TSI148_LCSR_DSAT_AMODE_USER2;
1478 		break;
1479 	case VME_USER3:
1480 		val |= TSI148_LCSR_DSAT_AMODE_USER3;
1481 		break;
1482 	case VME_USER4:
1483 		val |= TSI148_LCSR_DSAT_AMODE_USER4;
1484 		break;
1485 	default:
1486 		dev_err(dev, "Invalid address space\n");
1487 		return -EINVAL;
1488 	}
1489 
1490 	if (cycle & VME_SUPER)
1491 		val |= TSI148_LCSR_DSAT_SUP;
1492 	if (cycle & VME_PROG)
1493 		val |= TSI148_LCSR_DSAT_PGM;
1494 
1495 	*attr = cpu_to_be32(val);
1496 
1497 	return 0;
1498 }
1499 
1500 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1501 	u32 aspace, u32 cycle, u32 dwidth)
1502 {
1503 	u32 val;
1504 
1505 	val = be32_to_cpu(*attr);
1506 
1507 	/* Setup 2eSST speeds */
1508 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1509 	case VME_2eSST160:
1510 		val |= TSI148_LCSR_DDAT_2eSSTM_160;
1511 		break;
1512 	case VME_2eSST267:
1513 		val |= TSI148_LCSR_DDAT_2eSSTM_267;
1514 		break;
1515 	case VME_2eSST320:
1516 		val |= TSI148_LCSR_DDAT_2eSSTM_320;
1517 		break;
1518 	}
1519 
1520 	/* Setup cycle types */
1521 	if (cycle & VME_SCT)
1522 		val |= TSI148_LCSR_DDAT_TM_SCT;
1523 
1524 	if (cycle & VME_BLT)
1525 		val |= TSI148_LCSR_DDAT_TM_BLT;
1526 
1527 	if (cycle & VME_MBLT)
1528 		val |= TSI148_LCSR_DDAT_TM_MBLT;
1529 
1530 	if (cycle & VME_2eVME)
1531 		val |= TSI148_LCSR_DDAT_TM_2eVME;
1532 
1533 	if (cycle & VME_2eSST)
1534 		val |= TSI148_LCSR_DDAT_TM_2eSST;
1535 
1536 	if (cycle & VME_2eSSTB) {
1537 		dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1538 		val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1539 	}
1540 
1541 	/* Setup data width */
1542 	switch (dwidth) {
1543 	case VME_D16:
1544 		val |= TSI148_LCSR_DDAT_DBW_16;
1545 		break;
1546 	case VME_D32:
1547 		val |= TSI148_LCSR_DDAT_DBW_32;
1548 		break;
1549 	default:
1550 		dev_err(dev, "Invalid data width\n");
1551 		return -EINVAL;
1552 	}
1553 
1554 	/* Setup address space */
1555 	switch (aspace) {
1556 	case VME_A16:
1557 		val |= TSI148_LCSR_DDAT_AMODE_A16;
1558 		break;
1559 	case VME_A24:
1560 		val |= TSI148_LCSR_DDAT_AMODE_A24;
1561 		break;
1562 	case VME_A32:
1563 		val |= TSI148_LCSR_DDAT_AMODE_A32;
1564 		break;
1565 	case VME_A64:
1566 		val |= TSI148_LCSR_DDAT_AMODE_A64;
1567 		break;
1568 	case VME_CRCSR:
1569 		val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1570 		break;
1571 	case VME_USER1:
1572 		val |= TSI148_LCSR_DDAT_AMODE_USER1;
1573 		break;
1574 	case VME_USER2:
1575 		val |= TSI148_LCSR_DDAT_AMODE_USER2;
1576 		break;
1577 	case VME_USER3:
1578 		val |= TSI148_LCSR_DDAT_AMODE_USER3;
1579 		break;
1580 	case VME_USER4:
1581 		val |= TSI148_LCSR_DDAT_AMODE_USER4;
1582 		break;
1583 	default:
1584 		dev_err(dev, "Invalid address space\n");
1585 		return -EINVAL;
1586 	}
1587 
1588 	if (cycle & VME_SUPER)
1589 		val |= TSI148_LCSR_DDAT_SUP;
1590 	if (cycle & VME_PROG)
1591 		val |= TSI148_LCSR_DDAT_PGM;
1592 
1593 	*attr = cpu_to_be32(val);
1594 
1595 	return 0;
1596 }
1597 
1598 /*
1599  * Add a link list descriptor to the list
1600  *
1601  * Note: DMA engine expects the DMA descriptor to be big endian.
1602  */
1603 static int tsi148_dma_list_add(struct vme_dma_list *list,
1604 	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1605 {
1606 	struct tsi148_dma_entry *entry, *prev;
1607 	u32 address_high, address_low, val;
1608 	struct vme_dma_pattern *pattern_attr;
1609 	struct vme_dma_pci *pci_attr;
1610 	struct vme_dma_vme *vme_attr;
1611 	int retval = 0;
1612 	struct vme_bridge *tsi148_bridge;
1613 
1614 	tsi148_bridge = list->parent->parent;
1615 
1616 	/* Descriptor must be aligned on 64-bit boundaries */
1617 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1618 	if (!entry) {
1619 		retval = -ENOMEM;
1620 		goto err_mem;
1621 	}
1622 
1623 	/* Test descriptor alignment */
1624 	if ((unsigned long)&entry->descriptor & 0x7) {
1625 		dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 byte boundary as required: %p\n",
1626 			&entry->descriptor);
1627 		retval = -EINVAL;
1628 		goto err_align;
1629 	}
1630 
1631 	/* Given we are going to fill out the structure, we probably don't
1632 	 * need to zero it, but better safe than sorry for now.
1633 	 */
1634 	memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1635 
1636 	/* Fill out source part */
1637 	switch (src->type) {
1638 	case VME_DMA_PATTERN:
1639 		pattern_attr = src->private;
1640 
1641 		entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1642 
1643 		val = TSI148_LCSR_DSAT_TYP_PAT;
1644 
1645 		/* Default behaviour is 32 bit pattern */
1646 		if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1647 			val |= TSI148_LCSR_DSAT_PSZ;
1648 
1649 		/* It seems that the default behaviour is to increment */
1650 		if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1651 			val |= TSI148_LCSR_DSAT_NIN;
1652 		entry->descriptor.dsat = cpu_to_be32(val);
1653 		break;
1654 	case VME_DMA_PCI:
1655 		pci_attr = src->private;
1656 
1657 		reg_split((unsigned long long)pci_attr->address, &address_high,
1658 			&address_low);
1659 		entry->descriptor.dsau = cpu_to_be32(address_high);
1660 		entry->descriptor.dsal = cpu_to_be32(address_low);
1661 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1662 		break;
1663 	case VME_DMA_VME:
1664 		vme_attr = src->private;
1665 
1666 		reg_split((unsigned long long)vme_attr->address, &address_high,
1667 			&address_low);
1668 		entry->descriptor.dsau = cpu_to_be32(address_high);
1669 		entry->descriptor.dsal = cpu_to_be32(address_low);
1670 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1671 
1672 		retval = tsi148_dma_set_vme_src_attributes(
1673 			tsi148_bridge->parent, &entry->descriptor.dsat,
1674 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1675 		if (retval < 0)
1676 			goto err_source;
1677 		break;
1678 	default:
1679 		dev_err(tsi148_bridge->parent, "Invalid source type\n");
1680 		retval = -EINVAL;
1681 		goto err_source;
1682 	}
1683 
1684 	/* Assume last link - this will be over-written by adding another */
1685 	entry->descriptor.dnlau = cpu_to_be32(0);
1686 	entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1687 
1688 	/* Fill out destination part */
1689 	switch (dest->type) {
1690 	case VME_DMA_PCI:
1691 		pci_attr = dest->private;
1692 
1693 		reg_split((unsigned long long)pci_attr->address, &address_high,
1694 			&address_low);
1695 		entry->descriptor.ddau = cpu_to_be32(address_high);
1696 		entry->descriptor.ddal = cpu_to_be32(address_low);
1697 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1698 		break;
1699 	case VME_DMA_VME:
1700 		vme_attr = dest->private;
1701 
1702 		reg_split((unsigned long long)vme_attr->address, &address_high,
1703 			&address_low);
1704 		entry->descriptor.ddau = cpu_to_be32(address_high);
1705 		entry->descriptor.ddal = cpu_to_be32(address_low);
1706 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1707 
1708 		retval = tsi148_dma_set_vme_dest_attributes(
1709 			tsi148_bridge->parent, &entry->descriptor.ddat,
1710 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1711 		if (retval < 0)
1712 			goto err_dest;
1713 		break;
1714 	default:
1715 		dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1716 		retval = -EINVAL;
1717 		goto err_dest;
1718 	}
1719 
1720 	/* Fill out count */
1721 	entry->descriptor.dcnt = cpu_to_be32((u32)count);
1722 
1723 	/* Add to list */
1724 	list_add_tail(&entry->list, &list->entries);
1725 
1726 	entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1727 					   &entry->descriptor,
1728 					   sizeof(entry->descriptor),
1729 					   DMA_TO_DEVICE);
1730 	if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1731 		dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1732 		retval = -EINVAL;
1733 		goto err_dma;
1734 	}
1735 
1736 	/* Fill out previous descriptors "Next Address" */
1737 	if (entry->list.prev != &list->entries) {
1738 		reg_split((unsigned long long)entry->dma_handle, &address_high,
1739 			&address_low);
1740 		prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1741 				  list);
1742 		prev->descriptor.dnlau = cpu_to_be32(address_high);
1743 		prev->descriptor.dnlal = cpu_to_be32(address_low);
1744 
1745 	}
1746 
1747 	return 0;
1748 
1749 err_dma:
1750 	list_del(&entry->list);
1751 err_dest:
1752 err_source:
1753 err_align:
1754 		kfree(entry);
1755 err_mem:
1756 	return retval;
1757 }
1758 
1759 /*
1760  * Check to see if the provided DMA channel is busy.
1761  */
1762 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1763 {
1764 	u32 tmp;
1765 	struct tsi148_driver *bridge;
1766 
1767 	bridge = tsi148_bridge->driver_priv;
1768 
1769 	tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1770 		TSI148_LCSR_OFFSET_DSTA);
1771 
1772 	if (tmp & TSI148_LCSR_DSTA_BSY)
1773 		return 0;
1774 	else
1775 		return 1;
1776 
1777 }
1778 
1779 /*
1780  * Execute a previously generated link list
1781  *
1782  * XXX Need to provide control register configuration.
1783  */
1784 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1785 {
1786 	struct vme_dma_resource *ctrlr;
1787 	int channel, retval;
1788 	struct tsi148_dma_entry *entry;
1789 	u32 bus_addr_high, bus_addr_low;
1790 	u32 val, dctlreg = 0;
1791 	struct vme_bridge *tsi148_bridge;
1792 	struct tsi148_driver *bridge;
1793 
1794 	ctrlr = list->parent;
1795 
1796 	tsi148_bridge = ctrlr->parent;
1797 
1798 	bridge = tsi148_bridge->driver_priv;
1799 
1800 	mutex_lock(&ctrlr->mtx);
1801 
1802 	channel = ctrlr->number;
1803 
1804 	if (!list_empty(&ctrlr->running)) {
1805 		/*
1806 		 * XXX We have an active DMA transfer and currently haven't
1807 		 *     sorted out the mechanism for "pending" DMA transfers.
1808 		 *     Return busy.
1809 		 */
1810 		/* Need to add to pending here */
1811 		mutex_unlock(&ctrlr->mtx);
1812 		return -EBUSY;
1813 	}
1814 
1815 	list_add(&list->list, &ctrlr->running);
1816 
1817 	/* Get first bus address and write into registers */
1818 	entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1819 		list);
1820 
1821 	mutex_unlock(&ctrlr->mtx);
1822 
1823 	reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1824 
1825 	iowrite32be(bus_addr_high, bridge->base +
1826 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1827 	iowrite32be(bus_addr_low, bridge->base +
1828 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1829 
1830 	dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1831 		TSI148_LCSR_OFFSET_DCTL);
1832 
1833 	/* Start the operation */
1834 	iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1835 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1836 
1837 	retval = wait_event_interruptible(bridge->dma_queue[channel],
1838 		tsi148_dma_busy(ctrlr->parent, channel));
1839 
1840 	if (retval) {
1841 		iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1842 			TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1843 		/* Wait for the operation to abort */
1844 		wait_event(bridge->dma_queue[channel],
1845 			   tsi148_dma_busy(ctrlr->parent, channel));
1846 		retval = -EINTR;
1847 		goto exit;
1848 	}
1849 
1850 	/*
1851 	 * Read status register, this register is valid until we kick off a
1852 	 * new transfer.
1853 	 */
1854 	val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1855 		TSI148_LCSR_OFFSET_DSTA);
1856 
1857 	if (val & TSI148_LCSR_DSTA_VBE) {
1858 		dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1859 		retval = -EIO;
1860 	}
1861 
1862 exit:
1863 	/* Remove list from running list */
1864 	mutex_lock(&ctrlr->mtx);
1865 	list_del(&list->list);
1866 	mutex_unlock(&ctrlr->mtx);
1867 
1868 	return retval;
1869 }
1870 
1871 /*
1872  * Clean up a previously generated link list
1873  *
1874  * We have a separate function, don't assume that the chain can't be reused.
1875  */
1876 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1877 {
1878 	struct list_head *pos, *temp;
1879 	struct tsi148_dma_entry *entry;
1880 
1881 	struct vme_bridge *tsi148_bridge = list->parent->parent;
1882 
1883 	/* detach and free each entry */
1884 	list_for_each_safe(pos, temp, &list->entries) {
1885 		list_del(pos);
1886 		entry = list_entry(pos, struct tsi148_dma_entry, list);
1887 
1888 		dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1889 			sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1890 		kfree(entry);
1891 	}
1892 
1893 	return 0;
1894 }
1895 
1896 /*
1897  * All 4 location monitors reside at the same base - this is therefore a
1898  * system wide configuration.
1899  *
1900  * This does not enable the LM monitor - that should be done when the first
1901  * callback is attached and disabled when the last callback is removed.
1902  */
1903 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1904 	u32 aspace, u32 cycle)
1905 {
1906 	u32 lm_base_high, lm_base_low, lm_ctl = 0;
1907 	int i;
1908 	struct vme_bridge *tsi148_bridge;
1909 	struct tsi148_driver *bridge;
1910 
1911 	tsi148_bridge = lm->parent;
1912 
1913 	bridge = tsi148_bridge->driver_priv;
1914 
1915 	mutex_lock(&lm->mtx);
1916 
1917 	/* If we already have a callback attached, we can't move it! */
1918 	for (i = 0; i < lm->monitors; i++) {
1919 		if (bridge->lm_callback[i]) {
1920 			mutex_unlock(&lm->mtx);
1921 			dev_err(tsi148_bridge->parent, "Location monitor callback attached, can't reset\n");
1922 			return -EBUSY;
1923 		}
1924 	}
1925 
1926 	switch (aspace) {
1927 	case VME_A16:
1928 		lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1929 		break;
1930 	case VME_A24:
1931 		lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1932 		break;
1933 	case VME_A32:
1934 		lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1935 		break;
1936 	case VME_A64:
1937 		lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1938 		break;
1939 	default:
1940 		mutex_unlock(&lm->mtx);
1941 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
1942 		return -EINVAL;
1943 	}
1944 
1945 	if (cycle & VME_SUPER)
1946 		lm_ctl |= TSI148_LCSR_LMAT_SUPR;
1947 	if (cycle & VME_USER)
1948 		lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1949 	if (cycle & VME_PROG)
1950 		lm_ctl |= TSI148_LCSR_LMAT_PGM;
1951 	if (cycle & VME_DATA)
1952 		lm_ctl |= TSI148_LCSR_LMAT_DATA;
1953 
1954 	reg_split(lm_base, &lm_base_high, &lm_base_low);
1955 
1956 	iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1957 	iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1958 	iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1959 
1960 	mutex_unlock(&lm->mtx);
1961 
1962 	return 0;
1963 }
1964 
1965 /* Get configuration of the callback monitor and return whether it is enabled
1966  * or disabled.
1967  */
1968 static int tsi148_lm_get(struct vme_lm_resource *lm,
1969 	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1970 {
1971 	u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1972 	struct tsi148_driver *bridge;
1973 
1974 	bridge = lm->parent->driver_priv;
1975 
1976 	mutex_lock(&lm->mtx);
1977 
1978 	lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1979 	lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1980 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1981 
1982 	reg_join(lm_base_high, lm_base_low, lm_base);
1983 
1984 	if (lm_ctl & TSI148_LCSR_LMAT_EN)
1985 		enabled = 1;
1986 
1987 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
1988 		*aspace |= VME_A16;
1989 
1990 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
1991 		*aspace |= VME_A24;
1992 
1993 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
1994 		*aspace |= VME_A32;
1995 
1996 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
1997 		*aspace |= VME_A64;
1998 
1999 	if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2000 		*cycle |= VME_SUPER;
2001 	if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2002 		*cycle |= VME_USER;
2003 	if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2004 		*cycle |= VME_PROG;
2005 	if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2006 		*cycle |= VME_DATA;
2007 
2008 	mutex_unlock(&lm->mtx);
2009 
2010 	return enabled;
2011 }
2012 
2013 /*
2014  * Attach a callback to a specific location monitor.
2015  *
2016  * Callback will be passed the monitor triggered.
2017  */
2018 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2019 	void (*callback)(void *), void *data)
2020 {
2021 	u32 lm_ctl, tmp;
2022 	struct vme_bridge *tsi148_bridge;
2023 	struct tsi148_driver *bridge;
2024 
2025 	tsi148_bridge = lm->parent;
2026 
2027 	bridge = tsi148_bridge->driver_priv;
2028 
2029 	mutex_lock(&lm->mtx);
2030 
2031 	/* Ensure that the location monitor is configured - need PGM or DATA */
2032 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2033 	if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2034 		mutex_unlock(&lm->mtx);
2035 		dev_err(tsi148_bridge->parent, "Location monitor not properly configured\n");
2036 		return -EINVAL;
2037 	}
2038 
2039 	/* Check that a callback isn't already attached */
2040 	if (bridge->lm_callback[monitor]) {
2041 		mutex_unlock(&lm->mtx);
2042 		dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2043 		return -EBUSY;
2044 	}
2045 
2046 	/* Attach callback */
2047 	bridge->lm_callback[monitor] = callback;
2048 	bridge->lm_data[monitor] = data;
2049 
2050 	/* Enable Location Monitor interrupt */
2051 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2052 	tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2053 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2054 
2055 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2056 	tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2057 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2058 
2059 	/* Ensure that global Location Monitor Enable set */
2060 	if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2061 		lm_ctl |= TSI148_LCSR_LMAT_EN;
2062 		iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2063 	}
2064 
2065 	mutex_unlock(&lm->mtx);
2066 
2067 	return 0;
2068 }
2069 
2070 /*
2071  * Detach a callback function forn a specific location monitor.
2072  */
2073 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2074 {
2075 	u32 lm_en, tmp;
2076 	struct tsi148_driver *bridge;
2077 
2078 	bridge = lm->parent->driver_priv;
2079 
2080 	mutex_lock(&lm->mtx);
2081 
2082 	/* Disable Location Monitor and ensure previous interrupts are clear */
2083 	lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2084 	lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2085 	iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2086 
2087 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2088 	tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2089 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2090 
2091 	iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2092 		 bridge->base + TSI148_LCSR_INTC);
2093 
2094 	/* Detach callback */
2095 	bridge->lm_callback[monitor] = NULL;
2096 	bridge->lm_data[monitor] = NULL;
2097 
2098 	/* If all location monitors disabled, disable global Location Monitor */
2099 	if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2100 			TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2101 		tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2102 		tmp &= ~TSI148_LCSR_LMAT_EN;
2103 		iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2104 	}
2105 
2106 	mutex_unlock(&lm->mtx);
2107 
2108 	return 0;
2109 }
2110 
2111 /*
2112  * Determine Geographical Addressing
2113  */
2114 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2115 {
2116 	u32 slot = 0;
2117 	struct tsi148_driver *bridge;
2118 
2119 	bridge = tsi148_bridge->driver_priv;
2120 
2121 	if (!geoid) {
2122 		slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2123 		slot = slot & TSI148_LCSR_VSTAT_GA_M;
2124 	} else
2125 		slot = geoid;
2126 
2127 	return (int)slot;
2128 }
2129 
2130 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2131 	dma_addr_t *dma)
2132 {
2133 	struct pci_dev *pdev;
2134 
2135 	/* Find pci_dev container of dev */
2136 	pdev = to_pci_dev(parent);
2137 
2138 	return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
2139 }
2140 
2141 static void tsi148_free_consistent(struct device *parent, size_t size,
2142 	void *vaddr, dma_addr_t dma)
2143 {
2144 	struct pci_dev *pdev;
2145 
2146 	/* Find pci_dev container of dev */
2147 	pdev = to_pci_dev(parent);
2148 
2149 	dma_free_coherent(&pdev->dev, size, vaddr, dma);
2150 }
2151 
2152 /*
2153  * Configure CR/CSR space
2154  *
2155  * Access to the CR/CSR can be configured at power-up. The location of the
2156  * CR/CSR registers in the CR/CSR address space is determined by the boards
2157  * Auto-ID or Geographic address. This function ensures that the window is
2158  * enabled at an offset consistent with the boards geopgraphic address.
2159  *
2160  * Each board has a 512kB window, with the highest 4kB being used for the
2161  * boards registers, this means there is a fix length 508kB window which must
2162  * be mapped onto PCI memory.
2163  */
2164 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2165 	struct pci_dev *pdev)
2166 {
2167 	u32 cbar, crat, vstat;
2168 	u32 crcsr_bus_high, crcsr_bus_low;
2169 	int retval;
2170 	struct tsi148_driver *bridge;
2171 
2172 	bridge = tsi148_bridge->driver_priv;
2173 
2174 	/* Allocate mem for CR/CSR image */
2175 	bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
2176 						  VME_CRCSR_BUF_SIZE,
2177 						  &bridge->crcsr_bus, GFP_KERNEL);
2178 	if (!bridge->crcsr_kernel) {
2179 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for CR/CSR image\n");
2180 		return -ENOMEM;
2181 	}
2182 
2183 	reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2184 
2185 	iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2186 	iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2187 
2188 	/* Ensure that the CR/CSR is configured at the correct offset */
2189 	cbar = ioread32be(bridge->base + TSI148_CBAR);
2190 	cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2191 
2192 	vstat = tsi148_slot_get(tsi148_bridge);
2193 
2194 	if (cbar != vstat) {
2195 		cbar = vstat;
2196 		dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2197 		iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2198 	}
2199 	dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2200 
2201 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2202 	if (crat & TSI148_LCSR_CRAT_EN)
2203 		dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2204 	else {
2205 		dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2206 		iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2207 			bridge->base + TSI148_LCSR_CRAT);
2208 	}
2209 
2210 	/* If we want flushed, error-checked writes, set up a window
2211 	 * over the CR/CSR registers. We read from here to safely flush
2212 	 * through VME writes.
2213 	 */
2214 	if (err_chk) {
2215 		retval = tsi148_master_set(bridge->flush_image, 1,
2216 			(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2217 			VME_D16);
2218 		if (retval)
2219 			dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
2220 	}
2221 
2222 	return 0;
2223 
2224 }
2225 
2226 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2227 	struct pci_dev *pdev)
2228 {
2229 	u32 crat;
2230 	struct tsi148_driver *bridge;
2231 
2232 	bridge = tsi148_bridge->driver_priv;
2233 
2234 	/* Turn off CR/CSR space */
2235 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2236 	iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2237 		bridge->base + TSI148_LCSR_CRAT);
2238 
2239 	/* Free image */
2240 	iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2241 	iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2242 
2243 	dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
2244 			  bridge->crcsr_kernel, bridge->crcsr_bus);
2245 }
2246 
2247 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2248 {
2249 	int retval, i, master_num;
2250 	u32 data;
2251 	struct list_head *pos = NULL, *n;
2252 	struct vme_bridge *tsi148_bridge;
2253 	struct tsi148_driver *tsi148_device;
2254 	struct vme_master_resource *master_image;
2255 	struct vme_slave_resource *slave_image;
2256 	struct vme_dma_resource *dma_ctrlr;
2257 	struct vme_lm_resource *lm;
2258 
2259 	/* If we want to support more than one of each bridge, we need to
2260 	 * dynamically generate this so we get one per device
2261 	 */
2262 	tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
2263 	if (!tsi148_bridge) {
2264 		retval = -ENOMEM;
2265 		goto err_struct;
2266 	}
2267 	vme_init_bridge(tsi148_bridge);
2268 
2269 	tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
2270 	if (!tsi148_device) {
2271 		retval = -ENOMEM;
2272 		goto err_driver;
2273 	}
2274 
2275 	tsi148_bridge->driver_priv = tsi148_device;
2276 
2277 	/* Enable the device */
2278 	retval = pci_enable_device(pdev);
2279 	if (retval) {
2280 		dev_err(&pdev->dev, "Unable to enable device\n");
2281 		goto err_enable;
2282 	}
2283 
2284 	/* Map Registers */
2285 	retval = pci_request_regions(pdev, driver_name);
2286 	if (retval) {
2287 		dev_err(&pdev->dev, "Unable to reserve resources\n");
2288 		goto err_resource;
2289 	}
2290 
2291 	/* map registers in BAR 0 */
2292 	tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
2293 		4096);
2294 	if (!tsi148_device->base) {
2295 		dev_err(&pdev->dev, "Unable to remap CRG region\n");
2296 		retval = -EIO;
2297 		goto err_remap;
2298 	}
2299 
2300 	/* Check to see if the mapping worked out */
2301 	data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2302 	if (data != PCI_VENDOR_ID_TUNDRA) {
2303 		dev_err(&pdev->dev, "CRG region check failed\n");
2304 		retval = -EIO;
2305 		goto err_test;
2306 	}
2307 
2308 	/* Initialize wait queues & mutual exclusion flags */
2309 	init_waitqueue_head(&tsi148_device->dma_queue[0]);
2310 	init_waitqueue_head(&tsi148_device->dma_queue[1]);
2311 	init_waitqueue_head(&tsi148_device->iack_queue);
2312 	mutex_init(&tsi148_device->vme_int);
2313 	mutex_init(&tsi148_device->vme_rmw);
2314 
2315 	tsi148_bridge->parent = &pdev->dev;
2316 	strcpy(tsi148_bridge->name, driver_name);
2317 
2318 	/* Setup IRQ */
2319 	retval = tsi148_irq_init(tsi148_bridge);
2320 	if (retval != 0) {
2321 		dev_err(&pdev->dev, "Chip Initialization failed.\n");
2322 		goto err_irq;
2323 	}
2324 
2325 	/* If we are going to flush writes, we need to read from the VME bus.
2326 	 * We need to do this safely, thus we read the devices own CR/CSR
2327 	 * register. To do this we must set up a window in CR/CSR space and
2328 	 * hence have one less master window resource available.
2329 	 */
2330 	master_num = TSI148_MAX_MASTER;
2331 	if (err_chk) {
2332 		master_num--;
2333 
2334 		tsi148_device->flush_image =
2335 			kmalloc(sizeof(*tsi148_device->flush_image),
2336 				GFP_KERNEL);
2337 		if (!tsi148_device->flush_image) {
2338 			retval = -ENOMEM;
2339 			goto err_master;
2340 		}
2341 		tsi148_device->flush_image->parent = tsi148_bridge;
2342 		spin_lock_init(&tsi148_device->flush_image->lock);
2343 		tsi148_device->flush_image->locked = 1;
2344 		tsi148_device->flush_image->number = master_num;
2345 		memset(&tsi148_device->flush_image->bus_resource, 0,
2346 		       sizeof(tsi148_device->flush_image->bus_resource));
2347 		tsi148_device->flush_image->kern_base  = NULL;
2348 	}
2349 
2350 	/* Add master windows to list */
2351 	for (i = 0; i < master_num; i++) {
2352 		master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
2353 		if (!master_image) {
2354 			retval = -ENOMEM;
2355 			goto err_master;
2356 		}
2357 		master_image->parent = tsi148_bridge;
2358 		spin_lock_init(&master_image->lock);
2359 		master_image->locked = 0;
2360 		master_image->number = i;
2361 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2362 			VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2363 			VME_USER3 | VME_USER4;
2364 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2365 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2366 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2367 			VME_PROG | VME_DATA;
2368 		master_image->width_attr = VME_D16 | VME_D32;
2369 		memset(&master_image->bus_resource, 0,
2370 		       sizeof(master_image->bus_resource));
2371 		master_image->kern_base  = NULL;
2372 		list_add_tail(&master_image->list,
2373 			&tsi148_bridge->master_resources);
2374 	}
2375 
2376 	/* Add slave windows to list */
2377 	for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2378 		slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
2379 		if (!slave_image) {
2380 			retval = -ENOMEM;
2381 			goto err_slave;
2382 		}
2383 		slave_image->parent = tsi148_bridge;
2384 		mutex_init(&slave_image->mtx);
2385 		slave_image->locked = 0;
2386 		slave_image->number = i;
2387 		slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2388 			VME_A64;
2389 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2390 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2391 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2392 			VME_PROG | VME_DATA;
2393 		list_add_tail(&slave_image->list,
2394 			&tsi148_bridge->slave_resources);
2395 	}
2396 
2397 	/* Add dma engines to list */
2398 	for (i = 0; i < TSI148_MAX_DMA; i++) {
2399 		dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
2400 		if (!dma_ctrlr) {
2401 			retval = -ENOMEM;
2402 			goto err_dma;
2403 		}
2404 		dma_ctrlr->parent = tsi148_bridge;
2405 		mutex_init(&dma_ctrlr->mtx);
2406 		dma_ctrlr->locked = 0;
2407 		dma_ctrlr->number = i;
2408 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2409 			VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2410 			VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2411 			VME_DMA_PATTERN_TO_MEM;
2412 		INIT_LIST_HEAD(&dma_ctrlr->pending);
2413 		INIT_LIST_HEAD(&dma_ctrlr->running);
2414 		list_add_tail(&dma_ctrlr->list,
2415 			&tsi148_bridge->dma_resources);
2416 	}
2417 
2418 	/* Add location monitor to list */
2419 	lm = kmalloc(sizeof(*lm), GFP_KERNEL);
2420 	if (!lm) {
2421 		retval = -ENOMEM;
2422 		goto err_lm;
2423 	}
2424 	lm->parent = tsi148_bridge;
2425 	mutex_init(&lm->mtx);
2426 	lm->locked = 0;
2427 	lm->number = 1;
2428 	lm->monitors = 4;
2429 	list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2430 
2431 	tsi148_bridge->slave_get = tsi148_slave_get;
2432 	tsi148_bridge->slave_set = tsi148_slave_set;
2433 	tsi148_bridge->master_get = tsi148_master_get;
2434 	tsi148_bridge->master_set = tsi148_master_set;
2435 	tsi148_bridge->master_read = tsi148_master_read;
2436 	tsi148_bridge->master_write = tsi148_master_write;
2437 	tsi148_bridge->master_rmw = tsi148_master_rmw;
2438 	tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2439 	tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2440 	tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2441 	tsi148_bridge->irq_set = tsi148_irq_set;
2442 	tsi148_bridge->irq_generate = tsi148_irq_generate;
2443 	tsi148_bridge->lm_set = tsi148_lm_set;
2444 	tsi148_bridge->lm_get = tsi148_lm_get;
2445 	tsi148_bridge->lm_attach = tsi148_lm_attach;
2446 	tsi148_bridge->lm_detach = tsi148_lm_detach;
2447 	tsi148_bridge->slot_get = tsi148_slot_get;
2448 	tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2449 	tsi148_bridge->free_consistent = tsi148_free_consistent;
2450 
2451 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2452 	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2453 		(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2454 	if (!geoid)
2455 		dev_info(&pdev->dev, "VME geographical address is %d\n",
2456 			data & TSI148_LCSR_VSTAT_GA_M);
2457 	else
2458 		dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2459 			geoid);
2460 
2461 	dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2462 		err_chk ? "enabled" : "disabled");
2463 
2464 	retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2465 	if (retval) {
2466 		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2467 		goto err_crcsr;
2468 	}
2469 
2470 	retval = vme_register_bridge(tsi148_bridge);
2471 	if (retval != 0) {
2472 		dev_err(&pdev->dev, "Chip Registration failed.\n");
2473 		goto err_reg;
2474 	}
2475 
2476 	pci_set_drvdata(pdev, tsi148_bridge);
2477 
2478 	/* Clear VME bus "board fail", and "power-up reset" lines */
2479 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2480 	data &= ~TSI148_LCSR_VSTAT_BRDFL;
2481 	data |= TSI148_LCSR_VSTAT_CPURST;
2482 	iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2483 
2484 	return 0;
2485 
2486 err_reg:
2487 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2488 err_crcsr:
2489 err_lm:
2490 	/* resources are stored in link list */
2491 	list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2492 		lm = list_entry(pos, struct vme_lm_resource, list);
2493 		list_del(pos);
2494 		kfree(lm);
2495 	}
2496 err_dma:
2497 	/* resources are stored in link list */
2498 	list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2499 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2500 		list_del(pos);
2501 		kfree(dma_ctrlr);
2502 	}
2503 err_slave:
2504 	/* resources are stored in link list */
2505 	list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2506 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2507 		list_del(pos);
2508 		kfree(slave_image);
2509 	}
2510 err_master:
2511 	/* resources are stored in link list */
2512 	list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2513 		master_image = list_entry(pos, struct vme_master_resource,
2514 			list);
2515 		list_del(pos);
2516 		kfree(master_image);
2517 	}
2518 
2519 	tsi148_irq_exit(tsi148_bridge, pdev);
2520 err_irq:
2521 err_test:
2522 	iounmap(tsi148_device->base);
2523 err_remap:
2524 	pci_release_regions(pdev);
2525 err_resource:
2526 	pci_disable_device(pdev);
2527 err_enable:
2528 	kfree(tsi148_device);
2529 err_driver:
2530 	kfree(tsi148_bridge);
2531 err_struct:
2532 	return retval;
2533 
2534 }
2535 
2536 static void tsi148_remove(struct pci_dev *pdev)
2537 {
2538 	struct list_head *pos = NULL;
2539 	struct list_head *tmplist;
2540 	struct vme_master_resource *master_image;
2541 	struct vme_slave_resource *slave_image;
2542 	struct vme_dma_resource *dma_ctrlr;
2543 	int i;
2544 	struct tsi148_driver *bridge;
2545 	struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2546 
2547 	bridge = tsi148_bridge->driver_priv;
2548 
2549 	dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2550 
2551 	/*
2552 	 *  Shutdown all inbound and outbound windows.
2553 	 */
2554 	for (i = 0; i < 8; i++) {
2555 		iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2556 			TSI148_LCSR_OFFSET_ITAT);
2557 		iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2558 			TSI148_LCSR_OFFSET_OTAT);
2559 	}
2560 
2561 	/*
2562 	 *  Shutdown Location monitor.
2563 	 */
2564 	iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2565 
2566 	/*
2567 	 *  Shutdown CRG map.
2568 	 */
2569 	iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2570 
2571 	/*
2572 	 *  Clear error status.
2573 	 */
2574 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2575 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2576 	iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2577 
2578 	/*
2579 	 *  Remove VIRQ interrupt (if any)
2580 	 */
2581 	if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2582 		iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2583 
2584 	/*
2585 	 *  Map all Interrupts to PCI INTA
2586 	 */
2587 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2588 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2589 
2590 	tsi148_irq_exit(tsi148_bridge, pdev);
2591 
2592 	vme_unregister_bridge(tsi148_bridge);
2593 
2594 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2595 
2596 	/* resources are stored in link list */
2597 	list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2598 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2599 		list_del(pos);
2600 		kfree(dma_ctrlr);
2601 	}
2602 
2603 	/* resources are stored in link list */
2604 	list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2605 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2606 		list_del(pos);
2607 		kfree(slave_image);
2608 	}
2609 
2610 	/* resources are stored in link list */
2611 	list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2612 		master_image = list_entry(pos, struct vme_master_resource,
2613 			list);
2614 		list_del(pos);
2615 		kfree(master_image);
2616 	}
2617 
2618 	iounmap(bridge->base);
2619 
2620 	pci_release_regions(pdev);
2621 
2622 	pci_disable_device(pdev);
2623 
2624 	kfree(tsi148_bridge->driver_priv);
2625 
2626 	kfree(tsi148_bridge);
2627 }
2628 
2629 module_pci_driver(tsi148_driver);
2630 
2631 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2632 module_param(err_chk, bool, 0);
2633 
2634 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2635 module_param(geoid, int, 0);
2636 
2637 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2638 MODULE_LICENSE("GPL");
2639