1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 *
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 */
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/mm.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/proc_fs.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/byteorder/generic.h>
29
30 #include "vme.h"
31 #include "vme_bridge.h"
32 #include "vme_tsi148.h"
33
34 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
35 static void tsi148_remove(struct pci_dev *);
36
37 /* Module parameter */
38 static bool err_chk;
39 static int geoid;
40
41 static const char driver_name[] = "vme_tsi148";
42
43 static const struct pci_device_id tsi148_ids[] = {
44 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
45 { },
46 };
47
48 MODULE_DEVICE_TABLE(pci, tsi148_ids);
49
50 static struct pci_driver tsi148_driver = {
51 .name = driver_name,
52 .id_table = tsi148_ids,
53 .probe = tsi148_probe,
54 .remove = tsi148_remove,
55 };
56
reg_join(unsigned int high,unsigned int low,unsigned long long * variable)57 static void reg_join(unsigned int high, unsigned int low,
58 unsigned long long *variable)
59 {
60 *variable = (unsigned long long)high << 32;
61 *variable |= (unsigned long long)low;
62 }
63
reg_split(unsigned long long variable,unsigned int * high,unsigned int * low)64 static void reg_split(unsigned long long variable, unsigned int *high,
65 unsigned int *low)
66 {
67 *low = (unsigned int)variable & 0xFFFFFFFF;
68 *high = (unsigned int)(variable >> 32);
69 }
70
71 /*
72 * Wakes up DMA queue.
73 */
tsi148_DMA_irqhandler(struct tsi148_driver * bridge,int channel_mask)74 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
75 int channel_mask)
76 {
77 u32 serviced = 0;
78
79 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
80 wake_up(&bridge->dma_queue[0]);
81 serviced |= TSI148_LCSR_INTC_DMA0C;
82 }
83 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
84 wake_up(&bridge->dma_queue[1]);
85 serviced |= TSI148_LCSR_INTC_DMA1C;
86 }
87
88 return serviced;
89 }
90
91 /*
92 * Wake up location monitor queue
93 */
tsi148_LM_irqhandler(struct tsi148_driver * bridge,u32 stat)94 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
95 {
96 int i;
97 u32 serviced = 0;
98
99 for (i = 0; i < 4; i++) {
100 if (stat & TSI148_LCSR_INTS_LMS[i]) {
101 /* We only enable interrupts if the callback is set */
102 bridge->lm_callback[i](bridge->lm_data[i]);
103 serviced |= TSI148_LCSR_INTC_LMC[i];
104 }
105 }
106
107 return serviced;
108 }
109
110 /*
111 * Wake up mail box queue.
112 *
113 * XXX This functionality is not exposed up though API.
114 */
tsi148_MB_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)115 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
116 {
117 int i;
118 u32 val;
119 u32 serviced = 0;
120 struct tsi148_driver *bridge;
121
122 bridge = tsi148_bridge->driver_priv;
123
124 for (i = 0; i < 4; i++) {
125 if (stat & TSI148_LCSR_INTS_MBS[i]) {
126 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
127 dev_err(tsi148_bridge->parent, "VME Mailbox %d received: 0x%x\n",
128 i, val);
129 serviced |= TSI148_LCSR_INTC_MBC[i];
130 }
131 }
132
133 return serviced;
134 }
135
136 /*
137 * Display error & status message when PERR (PCI) exception interrupt occurs.
138 */
tsi148_PERR_irqhandler(struct vme_bridge * tsi148_bridge)139 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
140 {
141 struct tsi148_driver *bridge;
142
143 bridge = tsi148_bridge->driver_priv;
144
145 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
146 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
147 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
148 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
149
150 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
151 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
152 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
153
154 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
155
156 return TSI148_LCSR_INTC_PERRC;
157 }
158
159 /*
160 * Save address and status when VME error interrupt occurs.
161 */
tsi148_VERR_irqhandler(struct vme_bridge * tsi148_bridge)162 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
163 {
164 unsigned int error_addr_high, error_addr_low;
165 unsigned long long error_addr;
166 u32 error_attrib;
167 int error_am;
168 struct tsi148_driver *bridge;
169
170 bridge = tsi148_bridge->driver_priv;
171
172 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
173 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
174 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
175 error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
176
177 reg_join(error_addr_high, error_addr_low, &error_addr);
178
179 /* Check for exception register overflow (we have lost error data) */
180 if (error_attrib & TSI148_LCSR_VEAT_VEOF)
181 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow Occurred\n");
182
183 if (err_chk)
184 vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
185 else
186 dev_err(tsi148_bridge->parent,
187 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
188 error_addr, error_attrib);
189
190 /* Clear Status */
191 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
192
193 return TSI148_LCSR_INTC_VERRC;
194 }
195
196 /*
197 * Wake up IACK queue.
198 */
tsi148_IACK_irqhandler(struct tsi148_driver * bridge)199 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
200 {
201 wake_up(&bridge->iack_queue);
202
203 return TSI148_LCSR_INTC_IACKC;
204 }
205
206 /*
207 * Calling VME bus interrupt callback if provided.
208 */
tsi148_VIRQ_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)209 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
210 u32 stat)
211 {
212 int vec, i, serviced = 0;
213 struct tsi148_driver *bridge;
214
215 bridge = tsi148_bridge->driver_priv;
216
217 for (i = 7; i > 0; i--) {
218 if (stat & (1 << i)) {
219 /*
220 * Note: Even though the registers are defined as
221 * 32-bits in the spec, we only want to issue 8-bit
222 * IACK cycles on the bus, read from offset 3.
223 */
224 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
225
226 vme_irq_handler(tsi148_bridge, i, vec);
227
228 serviced |= (1 << i);
229 }
230 }
231
232 return serviced;
233 }
234
235 /*
236 * Top level interrupt handler. Clears appropriate interrupt status bits and
237 * then calls appropriate sub handler(s).
238 */
tsi148_irqhandler(int irq,void * ptr)239 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
240 {
241 u32 stat, enable, serviced = 0;
242 struct vme_bridge *tsi148_bridge;
243 struct tsi148_driver *bridge;
244
245 tsi148_bridge = ptr;
246
247 bridge = tsi148_bridge->driver_priv;
248
249 /* Determine which interrupts are unmasked and set */
250 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
251 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
252
253 /* Only look at unmasked interrupts */
254 stat &= enable;
255
256 if (unlikely(!stat))
257 return IRQ_NONE;
258
259 /* Call subhandlers as appropriate */
260 /* DMA irqs */
261 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
262 serviced |= tsi148_DMA_irqhandler(bridge, stat);
263
264 /* Location monitor irqs */
265 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
266 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
267 serviced |= tsi148_LM_irqhandler(bridge, stat);
268
269 /* Mail box irqs */
270 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
271 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
272 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
273
274 /* PCI bus error */
275 if (stat & TSI148_LCSR_INTS_PERRS)
276 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
277
278 /* VME bus error */
279 if (stat & TSI148_LCSR_INTS_VERRS)
280 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
281
282 /* IACK irq */
283 if (stat & TSI148_LCSR_INTS_IACKS)
284 serviced |= tsi148_IACK_irqhandler(bridge);
285
286 /* VME bus irqs */
287 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
288 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
289 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
290 TSI148_LCSR_INTS_IRQ1S))
291 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
292
293 /* Clear serviced interrupts */
294 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
295
296 return IRQ_HANDLED;
297 }
298
tsi148_irq_init(struct vme_bridge * tsi148_bridge)299 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
300 {
301 int result;
302 unsigned int tmp;
303 struct pci_dev *pdev;
304 struct tsi148_driver *bridge;
305
306 pdev = to_pci_dev(tsi148_bridge->parent);
307
308 bridge = tsi148_bridge->driver_priv;
309
310 result = request_irq(pdev->irq,
311 tsi148_irqhandler,
312 IRQF_SHARED,
313 driver_name, tsi148_bridge);
314 if (result) {
315 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq vector %02X\n",
316 pdev->irq);
317 return result;
318 }
319
320 /* Enable and unmask interrupts */
321 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
322 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
323 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
324 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
325 TSI148_LCSR_INTEO_IACKEO;
326
327 /* This leaves the following interrupts masked.
328 * TSI148_LCSR_INTEO_VIEEO
329 * TSI148_LCSR_INTEO_SYSFLEO
330 * TSI148_LCSR_INTEO_ACFLEO
331 */
332
333 /* Don't enable Location Monitor interrupts here - they will be
334 * enabled when the location monitors are properly configured and
335 * a callback has been attached.
336 * TSI148_LCSR_INTEO_LM0EO
337 * TSI148_LCSR_INTEO_LM1EO
338 * TSI148_LCSR_INTEO_LM2EO
339 * TSI148_LCSR_INTEO_LM3EO
340 */
341
342 /* Don't enable VME interrupts until we add a handler, else the board
343 * will respond to it and we don't want that unless it knows how to
344 * properly deal with it.
345 * TSI148_LCSR_INTEO_IRQ7EO
346 * TSI148_LCSR_INTEO_IRQ6EO
347 * TSI148_LCSR_INTEO_IRQ5EO
348 * TSI148_LCSR_INTEO_IRQ4EO
349 * TSI148_LCSR_INTEO_IRQ3EO
350 * TSI148_LCSR_INTEO_IRQ2EO
351 * TSI148_LCSR_INTEO_IRQ1EO
352 */
353
354 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
355 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
356
357 return 0;
358 }
359
tsi148_irq_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)360 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
361 struct pci_dev *pdev)
362 {
363 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
364
365 /* Turn off interrupts */
366 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
367 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
368
369 /* Clear all interrupts */
370 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
371
372 /* Detach interrupt handler */
373 free_irq(pdev->irq, tsi148_bridge);
374 }
375
376 /*
377 * Check to see if an IACk has been received, return true (1) or false (0).
378 */
tsi148_iack_received(struct tsi148_driver * bridge)379 static int tsi148_iack_received(struct tsi148_driver *bridge)
380 {
381 u32 tmp;
382
383 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
384
385 if (tmp & TSI148_LCSR_VICR_IRQS)
386 return 0;
387 else
388 return 1;
389 }
390
391 /*
392 * Configure VME interrupt
393 */
tsi148_irq_set(struct vme_bridge * tsi148_bridge,int level,int state,int sync)394 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
395 int state, int sync)
396 {
397 struct pci_dev *pdev;
398 u32 tmp;
399 struct tsi148_driver *bridge;
400
401 bridge = tsi148_bridge->driver_priv;
402
403 /* We need to do the ordering differently for enabling and disabling */
404 if (state == 0) {
405 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
406 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
407 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
408
409 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
410 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
411 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
412
413 if (sync != 0) {
414 pdev = to_pci_dev(tsi148_bridge->parent);
415 synchronize_irq(pdev->irq);
416 }
417 } else {
418 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
419 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
420 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
421
422 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
423 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
424 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
425 }
426 }
427
428 /*
429 * Generate a VME bus interrupt at the requested level & vector. Wait for
430 * interrupt to be acked.
431 */
tsi148_irq_generate(struct vme_bridge * tsi148_bridge,int level,int statid)432 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
433 int statid)
434 {
435 u32 tmp;
436 struct tsi148_driver *bridge;
437
438 bridge = tsi148_bridge->driver_priv;
439
440 mutex_lock(&bridge->vme_int);
441
442 /* Read VICR register */
443 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
444
445 /* Set Status/ID */
446 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
447 (statid & TSI148_LCSR_VICR_STID_M);
448 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
449
450 /* Assert VMEbus IRQ */
451 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
452 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
453
454 /* XXX Consider implementing a timeout? */
455 wait_event_interruptible(bridge->iack_queue,
456 tsi148_iack_received(bridge));
457
458 mutex_unlock(&bridge->vme_int);
459
460 return 0;
461 }
462
463 /*
464 * Initialize a slave window with the requested attributes.
465 */
tsi148_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)466 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
467 unsigned long long vme_base, unsigned long long size,
468 dma_addr_t pci_base, u32 aspace, u32 cycle)
469 {
470 unsigned int i, addr = 0, granularity = 0;
471 unsigned int temp_ctl = 0;
472 unsigned int vme_base_low, vme_base_high;
473 unsigned int vme_bound_low, vme_bound_high;
474 unsigned int pci_offset_low, pci_offset_high;
475 unsigned long long vme_bound, pci_offset;
476 struct vme_bridge *tsi148_bridge;
477 struct tsi148_driver *bridge;
478
479 tsi148_bridge = image->parent;
480 bridge = tsi148_bridge->driver_priv;
481
482 i = image->number;
483
484 switch (aspace) {
485 case VME_A16:
486 granularity = 0x10;
487 addr |= TSI148_LCSR_ITAT_AS_A16;
488 break;
489 case VME_A24:
490 granularity = 0x1000;
491 addr |= TSI148_LCSR_ITAT_AS_A24;
492 break;
493 case VME_A32:
494 granularity = 0x10000;
495 addr |= TSI148_LCSR_ITAT_AS_A32;
496 break;
497 case VME_A64:
498 granularity = 0x10000;
499 addr |= TSI148_LCSR_ITAT_AS_A64;
500 break;
501 default:
502 dev_err(tsi148_bridge->parent, "Invalid address space\n");
503 return -EINVAL;
504 }
505
506 /* Convert 64-bit variables to 2x 32-bit variables */
507 reg_split(vme_base, &vme_base_high, &vme_base_low);
508
509 /*
510 * Bound address is a valid address for the window, adjust
511 * accordingly
512 */
513 vme_bound = vme_base + size - granularity;
514 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
515 pci_offset = (unsigned long long)pci_base - vme_base;
516 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
517
518 if (vme_base_low & (granularity - 1)) {
519 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
520 return -EINVAL;
521 }
522 if (vme_bound_low & (granularity - 1)) {
523 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
524 return -EINVAL;
525 }
526 if (pci_offset_low & (granularity - 1)) {
527 dev_err(tsi148_bridge->parent, "Invalid PCI Offset alignment\n");
528 return -EINVAL;
529 }
530
531 /* Disable while we are mucking around */
532 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
533 TSI148_LCSR_OFFSET_ITAT);
534 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
535 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
536 TSI148_LCSR_OFFSET_ITAT);
537
538 /* Setup mapping */
539 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
540 TSI148_LCSR_OFFSET_ITSAU);
541 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
542 TSI148_LCSR_OFFSET_ITSAL);
543 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
544 TSI148_LCSR_OFFSET_ITEAU);
545 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
546 TSI148_LCSR_OFFSET_ITEAL);
547 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
548 TSI148_LCSR_OFFSET_ITOFU);
549 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
550 TSI148_LCSR_OFFSET_ITOFL);
551
552 /* Setup 2eSST speeds */
553 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
554 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
555 case VME_2eSST160:
556 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
557 break;
558 case VME_2eSST267:
559 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
560 break;
561 case VME_2eSST320:
562 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
563 break;
564 }
565
566 /* Setup cycle types */
567 temp_ctl &= ~(0x1F << 7);
568 if (cycle & VME_BLT)
569 temp_ctl |= TSI148_LCSR_ITAT_BLT;
570 if (cycle & VME_MBLT)
571 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
572 if (cycle & VME_2eVME)
573 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
574 if (cycle & VME_2eSST)
575 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
576 if (cycle & VME_2eSSTB)
577 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
578
579 /* Setup address space */
580 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
581 temp_ctl |= addr;
582
583 temp_ctl &= ~0xF;
584 if (cycle & VME_SUPER)
585 temp_ctl |= TSI148_LCSR_ITAT_SUPR;
586 if (cycle & VME_USER)
587 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
588 if (cycle & VME_PROG)
589 temp_ctl |= TSI148_LCSR_ITAT_PGM;
590 if (cycle & VME_DATA)
591 temp_ctl |= TSI148_LCSR_ITAT_DATA;
592
593 /* Write ctl reg without enable */
594 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
595 TSI148_LCSR_OFFSET_ITAT);
596
597 if (enabled)
598 temp_ctl |= TSI148_LCSR_ITAT_EN;
599
600 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
601 TSI148_LCSR_OFFSET_ITAT);
602
603 return 0;
604 }
605
606 /*
607 * Get slave window configuration.
608 */
tsi148_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)609 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
610 unsigned long long *vme_base, unsigned long long *size,
611 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
612 {
613 unsigned int i, granularity = 0, ctl = 0;
614 unsigned int vme_base_low, vme_base_high;
615 unsigned int vme_bound_low, vme_bound_high;
616 unsigned int pci_offset_low, pci_offset_high;
617 unsigned long long vme_bound, pci_offset;
618 struct tsi148_driver *bridge;
619
620 bridge = image->parent->driver_priv;
621
622 i = image->number;
623
624 /* Read registers */
625 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
626 TSI148_LCSR_OFFSET_ITAT);
627
628 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
629 TSI148_LCSR_OFFSET_ITSAU);
630 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
631 TSI148_LCSR_OFFSET_ITSAL);
632 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
633 TSI148_LCSR_OFFSET_ITEAU);
634 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
635 TSI148_LCSR_OFFSET_ITEAL);
636 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
637 TSI148_LCSR_OFFSET_ITOFU);
638 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
639 TSI148_LCSR_OFFSET_ITOFL);
640
641 /* Convert 64-bit variables to 2x 32-bit variables */
642 reg_join(vme_base_high, vme_base_low, vme_base);
643 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
644 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
645
646 *pci_base = (dma_addr_t)(*vme_base + pci_offset);
647
648 *enabled = 0;
649 *aspace = 0;
650 *cycle = 0;
651
652 if (ctl & TSI148_LCSR_ITAT_EN)
653 *enabled = 1;
654
655 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
656 granularity = 0x10;
657 *aspace |= VME_A16;
658 }
659 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
660 granularity = 0x1000;
661 *aspace |= VME_A24;
662 }
663 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
664 granularity = 0x10000;
665 *aspace |= VME_A32;
666 }
667 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
668 granularity = 0x10000;
669 *aspace |= VME_A64;
670 }
671
672 /* Need granularity before we set the size */
673 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
674
675 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
676 *cycle |= VME_2eSST160;
677 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
678 *cycle |= VME_2eSST267;
679 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
680 *cycle |= VME_2eSST320;
681
682 if (ctl & TSI148_LCSR_ITAT_BLT)
683 *cycle |= VME_BLT;
684 if (ctl & TSI148_LCSR_ITAT_MBLT)
685 *cycle |= VME_MBLT;
686 if (ctl & TSI148_LCSR_ITAT_2eVME)
687 *cycle |= VME_2eVME;
688 if (ctl & TSI148_LCSR_ITAT_2eSST)
689 *cycle |= VME_2eSST;
690 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
691 *cycle |= VME_2eSSTB;
692
693 if (ctl & TSI148_LCSR_ITAT_SUPR)
694 *cycle |= VME_SUPER;
695 if (ctl & TSI148_LCSR_ITAT_NPRIV)
696 *cycle |= VME_USER;
697 if (ctl & TSI148_LCSR_ITAT_PGM)
698 *cycle |= VME_PROG;
699 if (ctl & TSI148_LCSR_ITAT_DATA)
700 *cycle |= VME_DATA;
701
702 return 0;
703 }
704
705 /*
706 * Allocate and map PCI Resource
707 */
tsi148_alloc_resource(struct vme_master_resource * image,unsigned long long size)708 static int tsi148_alloc_resource(struct vme_master_resource *image,
709 unsigned long long size)
710 {
711 unsigned long long existing_size;
712 int retval = 0;
713 struct pci_dev *pdev;
714 struct vme_bridge *tsi148_bridge;
715
716 tsi148_bridge = image->parent;
717
718 pdev = to_pci_dev(tsi148_bridge->parent);
719
720 existing_size = (unsigned long long)(image->bus_resource.end -
721 image->bus_resource.start);
722
723 /* If the existing size is OK, return */
724 if ((size != 0) && (existing_size == (size - 1)))
725 return 0;
726
727 if (existing_size != 0) {
728 iounmap(image->kern_base);
729 image->kern_base = NULL;
730 kfree(image->bus_resource.name);
731 release_resource(&image->bus_resource);
732 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
733 }
734
735 /* Exit here if size is zero */
736 if (size == 0)
737 return 0;
738
739 if (!image->bus_resource.name) {
740 image->bus_resource.name = kmalloc(VMENAMSIZ + 3, GFP_ATOMIC);
741 if (!image->bus_resource.name) {
742 retval = -ENOMEM;
743 goto err_name;
744 }
745 }
746
747 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
748 image->number);
749
750 image->bus_resource.start = 0;
751 image->bus_resource.end = (unsigned long)size;
752 image->bus_resource.flags = IORESOURCE_MEM;
753
754 retval = pci_bus_alloc_resource(pdev->bus,
755 &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
756 0, NULL, NULL);
757 if (retval) {
758 dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
759 image->number, (unsigned long)size,
760 (unsigned long)image->bus_resource.start);
761 goto err_resource;
762 }
763
764 image->kern_base = ioremap(
765 image->bus_resource.start, size);
766 if (!image->kern_base) {
767 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
768 retval = -ENOMEM;
769 goto err_remap;
770 }
771
772 return 0;
773
774 err_remap:
775 release_resource(&image->bus_resource);
776 err_resource:
777 kfree(image->bus_resource.name);
778 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
779 err_name:
780 return retval;
781 }
782
783 /*
784 * Free and unmap PCI Resource
785 */
tsi148_free_resource(struct vme_master_resource * image)786 static void tsi148_free_resource(struct vme_master_resource *image)
787 {
788 iounmap(image->kern_base);
789 image->kern_base = NULL;
790 release_resource(&image->bus_resource);
791 kfree(image->bus_resource.name);
792 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
793 }
794
795 /*
796 * Set the attributes of an outbound window.
797 */
tsi148_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)798 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
799 unsigned long long vme_base, unsigned long long size, u32 aspace,
800 u32 cycle, u32 dwidth)
801 {
802 int retval = 0;
803 unsigned int i;
804 unsigned int temp_ctl = 0;
805 unsigned int pci_base_low, pci_base_high;
806 unsigned int pci_bound_low, pci_bound_high;
807 unsigned int vme_offset_low, vme_offset_high;
808 unsigned long long pci_bound, vme_offset, pci_base;
809 struct vme_bridge *tsi148_bridge;
810 struct tsi148_driver *bridge;
811 struct pci_bus_region region;
812 struct pci_dev *pdev;
813
814 tsi148_bridge = image->parent;
815
816 bridge = tsi148_bridge->driver_priv;
817
818 pdev = to_pci_dev(tsi148_bridge->parent);
819
820 /* Verify input data */
821 if (vme_base & 0xFFFF) {
822 dev_err(tsi148_bridge->parent, "Invalid VME Window alignment\n");
823 retval = -EINVAL;
824 goto err_window;
825 }
826
827 if ((size == 0) && (enabled != 0)) {
828 dev_err(tsi148_bridge->parent, "Size must be non-zero for enabled windows\n");
829 retval = -EINVAL;
830 goto err_window;
831 }
832
833 spin_lock(&image->lock);
834
835 /* Let's allocate the resource here rather than further up the stack as
836 * it avoids pushing loads of bus dependent stuff up the stack. If size
837 * is zero, any existing resource will be freed.
838 */
839 retval = tsi148_alloc_resource(image, size);
840 if (retval) {
841 spin_unlock(&image->lock);
842 dev_err(tsi148_bridge->parent, "Unable to allocate memory for resource\n");
843 goto err_res;
844 }
845
846 if (size == 0) {
847 pci_base = 0;
848 pci_bound = 0;
849 vme_offset = 0;
850 } else {
851 pcibios_resource_to_bus(pdev->bus, ®ion,
852 &image->bus_resource);
853 pci_base = region.start;
854
855 /*
856 * Bound address is a valid address for the window, adjust
857 * according to window granularity.
858 */
859 pci_bound = pci_base + (size - 0x10000);
860 vme_offset = vme_base - pci_base;
861 }
862
863 /* Convert 64-bit variables to 2x 32-bit variables */
864 reg_split(pci_base, &pci_base_high, &pci_base_low);
865 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
866 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
867
868 if (pci_base_low & 0xFFFF) {
869 spin_unlock(&image->lock);
870 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
871 retval = -EINVAL;
872 goto err_gran;
873 }
874 if (pci_bound_low & 0xFFFF) {
875 spin_unlock(&image->lock);
876 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
877 retval = -EINVAL;
878 goto err_gran;
879 }
880 if (vme_offset_low & 0xFFFF) {
881 spin_unlock(&image->lock);
882 dev_err(tsi148_bridge->parent, "Invalid VME Offset alignment\n");
883 retval = -EINVAL;
884 goto err_gran;
885 }
886
887 i = image->number;
888
889 /* Disable while we are mucking around */
890 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
891 TSI148_LCSR_OFFSET_OTAT);
892 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
893 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
894 TSI148_LCSR_OFFSET_OTAT);
895
896 /* Setup 2eSST speeds */
897 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
898 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
899 case VME_2eSST160:
900 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
901 break;
902 case VME_2eSST267:
903 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
904 break;
905 case VME_2eSST320:
906 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
907 break;
908 }
909
910 /* Setup cycle types */
911 if (cycle & VME_BLT) {
912 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
913 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
914 }
915 if (cycle & VME_MBLT) {
916 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
917 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
918 }
919 if (cycle & VME_2eVME) {
920 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
921 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
922 }
923 if (cycle & VME_2eSST) {
924 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
925 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
926 }
927 if (cycle & VME_2eSSTB) {
928 dev_warn(tsi148_bridge->parent, "Currently not setting Broadcast Select Registers\n");
929 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
930 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
931 }
932
933 /* Setup data width */
934 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
935 switch (dwidth) {
936 case VME_D16:
937 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
938 break;
939 case VME_D32:
940 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
941 break;
942 default:
943 spin_unlock(&image->lock);
944 dev_err(tsi148_bridge->parent, "Invalid data width\n");
945 retval = -EINVAL;
946 goto err_dwidth;
947 }
948
949 /* Setup address space */
950 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
951 switch (aspace) {
952 case VME_A16:
953 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
954 break;
955 case VME_A24:
956 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
957 break;
958 case VME_A32:
959 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
960 break;
961 case VME_A64:
962 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
963 break;
964 case VME_CRCSR:
965 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
966 break;
967 case VME_USER1:
968 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
969 break;
970 case VME_USER2:
971 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
972 break;
973 case VME_USER3:
974 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
975 break;
976 case VME_USER4:
977 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
978 break;
979 default:
980 spin_unlock(&image->lock);
981 dev_err(tsi148_bridge->parent, "Invalid address space\n");
982 retval = -EINVAL;
983 goto err_aspace;
984 }
985
986 temp_ctl &= ~(3 << 4);
987 if (cycle & VME_SUPER)
988 temp_ctl |= TSI148_LCSR_OTAT_SUP;
989 if (cycle & VME_PROG)
990 temp_ctl |= TSI148_LCSR_OTAT_PGM;
991
992 /* Setup mapping */
993 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
994 TSI148_LCSR_OFFSET_OTSAU);
995 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
996 TSI148_LCSR_OFFSET_OTSAL);
997 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
998 TSI148_LCSR_OFFSET_OTEAU);
999 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1000 TSI148_LCSR_OFFSET_OTEAL);
1001 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1002 TSI148_LCSR_OFFSET_OTOFU);
1003 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1004 TSI148_LCSR_OFFSET_OTOFL);
1005
1006 /* Write ctl reg without enable */
1007 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1008 TSI148_LCSR_OFFSET_OTAT);
1009
1010 if (enabled)
1011 temp_ctl |= TSI148_LCSR_OTAT_EN;
1012
1013 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1014 TSI148_LCSR_OFFSET_OTAT);
1015
1016 spin_unlock(&image->lock);
1017 return 0;
1018
1019 err_aspace:
1020 err_dwidth:
1021 err_gran:
1022 tsi148_free_resource(image);
1023 err_res:
1024 err_window:
1025 return retval;
1026 }
1027
1028 /*
1029 * Set the attributes of an outbound window.
1030 *
1031 * XXX Not parsing prefetch information.
1032 */
__tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1033 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1034 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1035 u32 *cycle, u32 *dwidth)
1036 {
1037 unsigned int i, ctl;
1038 unsigned int pci_base_low, pci_base_high;
1039 unsigned int pci_bound_low, pci_bound_high;
1040 unsigned int vme_offset_low, vme_offset_high;
1041
1042 unsigned long long pci_base, pci_bound, vme_offset;
1043 struct tsi148_driver *bridge;
1044
1045 bridge = image->parent->driver_priv;
1046
1047 i = image->number;
1048
1049 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1050 TSI148_LCSR_OFFSET_OTAT);
1051
1052 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1053 TSI148_LCSR_OFFSET_OTSAU);
1054 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1055 TSI148_LCSR_OFFSET_OTSAL);
1056 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1057 TSI148_LCSR_OFFSET_OTEAU);
1058 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1059 TSI148_LCSR_OFFSET_OTEAL);
1060 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1061 TSI148_LCSR_OFFSET_OTOFU);
1062 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1063 TSI148_LCSR_OFFSET_OTOFL);
1064
1065 /* Convert 64-bit variables to 2x 32-bit variables */
1066 reg_join(pci_base_high, pci_base_low, &pci_base);
1067 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1068 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1069
1070 *vme_base = pci_base + vme_offset;
1071 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1072
1073 *enabled = 0;
1074 *aspace = 0;
1075 *cycle = 0;
1076 *dwidth = 0;
1077
1078 if (ctl & TSI148_LCSR_OTAT_EN)
1079 *enabled = 1;
1080
1081 /* Setup address space */
1082 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1083 *aspace |= VME_A16;
1084 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1085 *aspace |= VME_A24;
1086 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1087 *aspace |= VME_A32;
1088 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1089 *aspace |= VME_A64;
1090 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1091 *aspace |= VME_CRCSR;
1092 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1093 *aspace |= VME_USER1;
1094 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1095 *aspace |= VME_USER2;
1096 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1097 *aspace |= VME_USER3;
1098 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1099 *aspace |= VME_USER4;
1100
1101 /* Setup 2eSST speeds */
1102 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1103 *cycle |= VME_2eSST160;
1104 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1105 *cycle |= VME_2eSST267;
1106 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1107 *cycle |= VME_2eSST320;
1108
1109 /* Setup cycle types */
1110 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1111 *cycle |= VME_SCT;
1112 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1113 *cycle |= VME_BLT;
1114 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1115 *cycle |= VME_MBLT;
1116 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1117 *cycle |= VME_2eVME;
1118 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1119 *cycle |= VME_2eSST;
1120 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1121 *cycle |= VME_2eSSTB;
1122
1123 if (ctl & TSI148_LCSR_OTAT_SUP)
1124 *cycle |= VME_SUPER;
1125 else
1126 *cycle |= VME_USER;
1127
1128 if (ctl & TSI148_LCSR_OTAT_PGM)
1129 *cycle |= VME_PROG;
1130 else
1131 *cycle |= VME_DATA;
1132
1133 /* Setup data width */
1134 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1135 *dwidth = VME_D16;
1136 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1137 *dwidth = VME_D32;
1138
1139 return 0;
1140 }
1141
tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1142 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1143 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1144 u32 *cycle, u32 *dwidth)
1145 {
1146 int retval;
1147
1148 spin_lock(&image->lock);
1149
1150 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1151 cycle, dwidth);
1152
1153 spin_unlock(&image->lock);
1154
1155 return retval;
1156 }
1157
tsi148_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1158 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1159 size_t count, loff_t offset)
1160 {
1161 int retval, enabled;
1162 unsigned long long vme_base, size;
1163 u32 aspace, cycle, dwidth;
1164 struct vme_error_handler *handler = NULL;
1165 struct vme_bridge *tsi148_bridge;
1166 void __iomem *addr = image->kern_base + offset;
1167 unsigned int done = 0;
1168 unsigned int count32;
1169
1170 tsi148_bridge = image->parent;
1171
1172 spin_lock(&image->lock);
1173
1174 if (err_chk) {
1175 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1176 &cycle, &dwidth);
1177 handler = vme_register_error_handler(tsi148_bridge, aspace,
1178 vme_base + offset, count);
1179 if (!handler) {
1180 spin_unlock(&image->lock);
1181 return -ENOMEM;
1182 }
1183 }
1184
1185 /* The following code handles VME address alignment. We cannot use
1186 * memcpy_xxx here because it may cut data transfers in to 8-bit
1187 * cycles when D16 or D32 cycles are required on the VME bus.
1188 * On the other hand, the bridge itself assures that the maximum data
1189 * cycle configured for the transfer is used and splits it
1190 * automatically for non-aligned addresses, so we don't want the
1191 * overhead of needlessly forcing small transfers for the entire cycle.
1192 */
1193 if ((uintptr_t)addr & 0x1) {
1194 *(u8 *)buf = ioread8(addr);
1195 done += 1;
1196 if (done == count)
1197 goto out;
1198 }
1199 if ((uintptr_t)(addr + done) & 0x2) {
1200 if ((count - done) < 2) {
1201 *(u8 *)(buf + done) = ioread8(addr + done);
1202 done += 1;
1203 goto out;
1204 } else {
1205 *(u16 *)(buf + done) = ioread16(addr + done);
1206 done += 2;
1207 }
1208 }
1209
1210 count32 = (count - done) & ~0x3;
1211 while (done < count32) {
1212 *(u32 *)(buf + done) = ioread32(addr + done);
1213 done += 4;
1214 }
1215
1216 if ((count - done) & 0x2) {
1217 *(u16 *)(buf + done) = ioread16(addr + done);
1218 done += 2;
1219 }
1220 if ((count - done) & 0x1) {
1221 *(u8 *)(buf + done) = ioread8(addr + done);
1222 done += 1;
1223 }
1224
1225 out:
1226 retval = count;
1227
1228 if (err_chk) {
1229 if (handler->num_errors) {
1230 dev_err(image->parent->parent,
1231 "First VME read error detected an at address 0x%llx\n",
1232 handler->first_error);
1233 retval = handler->first_error - (vme_base + offset);
1234 }
1235 vme_unregister_error_handler(handler);
1236 }
1237
1238 spin_unlock(&image->lock);
1239
1240 return retval;
1241 }
1242
tsi148_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1243 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1244 size_t count, loff_t offset)
1245 {
1246 int retval = 0, enabled;
1247 unsigned long long vme_base, size;
1248 u32 aspace, cycle, dwidth;
1249 void __iomem *addr = image->kern_base + offset;
1250 unsigned int done = 0;
1251 unsigned int count32;
1252
1253 struct vme_error_handler *handler = NULL;
1254 struct vme_bridge *tsi148_bridge;
1255 struct tsi148_driver *bridge;
1256
1257 tsi148_bridge = image->parent;
1258
1259 bridge = tsi148_bridge->driver_priv;
1260
1261 spin_lock(&image->lock);
1262
1263 if (err_chk) {
1264 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1265 &cycle, &dwidth);
1266 handler = vme_register_error_handler(tsi148_bridge, aspace,
1267 vme_base + offset, count);
1268 if (!handler) {
1269 spin_unlock(&image->lock);
1270 return -ENOMEM;
1271 }
1272 }
1273
1274 /* Here we apply for the same strategy we do in master_read
1275 * function in order to assure the correct cycles.
1276 */
1277 if ((uintptr_t)addr & 0x1) {
1278 iowrite8(*(u8 *)buf, addr);
1279 done += 1;
1280 if (done == count)
1281 goto out;
1282 }
1283 if ((uintptr_t)(addr + done) & 0x2) {
1284 if ((count - done) < 2) {
1285 iowrite8(*(u8 *)(buf + done), addr + done);
1286 done += 1;
1287 goto out;
1288 } else {
1289 iowrite16(*(u16 *)(buf + done), addr + done);
1290 done += 2;
1291 }
1292 }
1293
1294 count32 = (count - done) & ~0x3;
1295 while (done < count32) {
1296 iowrite32(*(u32 *)(buf + done), addr + done);
1297 done += 4;
1298 }
1299
1300 if ((count - done) & 0x2) {
1301 iowrite16(*(u16 *)(buf + done), addr + done);
1302 done += 2;
1303 }
1304 if ((count - done) & 0x1) {
1305 iowrite8(*(u8 *)(buf + done), addr + done);
1306 done += 1;
1307 }
1308
1309 out:
1310 retval = count;
1311
1312 /*
1313 * Writes are posted. We need to do a read on the VME bus to flush out
1314 * all of the writes before we check for errors. We can't guarantee
1315 * that reading the data we have just written is safe. It is believed
1316 * that there isn't any read, write re-ordering, so we can read any
1317 * location in VME space, so lets read the Device ID from the tsi148's
1318 * own registers as mapped into CR/CSR space.
1319 *
1320 * We check for saved errors in the written address range/space.
1321 */
1322
1323 if (err_chk) {
1324 ioread16(bridge->flush_image->kern_base + 0x7F000);
1325
1326 if (handler->num_errors) {
1327 dev_warn(tsi148_bridge->parent,
1328 "First VME write error detected an at address 0x%llx\n",
1329 handler->first_error);
1330 retval = handler->first_error - (vme_base + offset);
1331 }
1332 vme_unregister_error_handler(handler);
1333 }
1334
1335 spin_unlock(&image->lock);
1336
1337 return retval;
1338 }
1339
1340 /*
1341 * Perform an RMW cycle on the VME bus.
1342 *
1343 * Requires a previously configured master window, returns final value.
1344 */
tsi148_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)1345 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1346 unsigned int mask, unsigned int compare, unsigned int swap,
1347 loff_t offset)
1348 {
1349 unsigned long long pci_addr;
1350 unsigned int pci_addr_high, pci_addr_low;
1351 u32 tmp, result;
1352 int i;
1353 struct tsi148_driver *bridge;
1354
1355 bridge = image->parent->driver_priv;
1356
1357 /* Find the PCI address that maps to the desired VME address */
1358 i = image->number;
1359
1360 /* Locking as we can only do one of these at a time */
1361 mutex_lock(&bridge->vme_rmw);
1362
1363 /* Lock image */
1364 spin_lock(&image->lock);
1365
1366 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1367 TSI148_LCSR_OFFSET_OTSAU);
1368 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1369 TSI148_LCSR_OFFSET_OTSAL);
1370
1371 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1372 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1373
1374 /* Configure registers */
1375 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1376 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1377 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1378 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1379 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1380
1381 /* Enable RMW */
1382 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1383 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1384 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1385
1386 /* Kick process off with a read to the required address. */
1387 result = ioread32be(image->kern_base + offset);
1388
1389 /* Disable RMW */
1390 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1391 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1392 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1393
1394 spin_unlock(&image->lock);
1395
1396 mutex_unlock(&bridge->vme_rmw);
1397
1398 return result;
1399 }
1400
tsi148_dma_set_vme_src_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1401 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1402 u32 aspace, u32 cycle, u32 dwidth)
1403 {
1404 u32 val;
1405
1406 val = be32_to_cpu(*attr);
1407
1408 /* Setup 2eSST speeds */
1409 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1410 case VME_2eSST160:
1411 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1412 break;
1413 case VME_2eSST267:
1414 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1415 break;
1416 case VME_2eSST320:
1417 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1418 break;
1419 }
1420
1421 /* Setup cycle types */
1422 if (cycle & VME_SCT)
1423 val |= TSI148_LCSR_DSAT_TM_SCT;
1424
1425 if (cycle & VME_BLT)
1426 val |= TSI148_LCSR_DSAT_TM_BLT;
1427
1428 if (cycle & VME_MBLT)
1429 val |= TSI148_LCSR_DSAT_TM_MBLT;
1430
1431 if (cycle & VME_2eVME)
1432 val |= TSI148_LCSR_DSAT_TM_2eVME;
1433
1434 if (cycle & VME_2eSST)
1435 val |= TSI148_LCSR_DSAT_TM_2eSST;
1436
1437 if (cycle & VME_2eSSTB) {
1438 dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1439 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1440 }
1441
1442 /* Setup data width */
1443 switch (dwidth) {
1444 case VME_D16:
1445 val |= TSI148_LCSR_DSAT_DBW_16;
1446 break;
1447 case VME_D32:
1448 val |= TSI148_LCSR_DSAT_DBW_32;
1449 break;
1450 default:
1451 dev_err(dev, "Invalid data width\n");
1452 return -EINVAL;
1453 }
1454
1455 /* Setup address space */
1456 switch (aspace) {
1457 case VME_A16:
1458 val |= TSI148_LCSR_DSAT_AMODE_A16;
1459 break;
1460 case VME_A24:
1461 val |= TSI148_LCSR_DSAT_AMODE_A24;
1462 break;
1463 case VME_A32:
1464 val |= TSI148_LCSR_DSAT_AMODE_A32;
1465 break;
1466 case VME_A64:
1467 val |= TSI148_LCSR_DSAT_AMODE_A64;
1468 break;
1469 case VME_CRCSR:
1470 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1471 break;
1472 case VME_USER1:
1473 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1474 break;
1475 case VME_USER2:
1476 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1477 break;
1478 case VME_USER3:
1479 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1480 break;
1481 case VME_USER4:
1482 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1483 break;
1484 default:
1485 dev_err(dev, "Invalid address space\n");
1486 return -EINVAL;
1487 }
1488
1489 if (cycle & VME_SUPER)
1490 val |= TSI148_LCSR_DSAT_SUP;
1491 if (cycle & VME_PROG)
1492 val |= TSI148_LCSR_DSAT_PGM;
1493
1494 *attr = cpu_to_be32(val);
1495
1496 return 0;
1497 }
1498
tsi148_dma_set_vme_dest_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1499 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1500 u32 aspace, u32 cycle, u32 dwidth)
1501 {
1502 u32 val;
1503
1504 val = be32_to_cpu(*attr);
1505
1506 /* Setup 2eSST speeds */
1507 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1508 case VME_2eSST160:
1509 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1510 break;
1511 case VME_2eSST267:
1512 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1513 break;
1514 case VME_2eSST320:
1515 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1516 break;
1517 }
1518
1519 /* Setup cycle types */
1520 if (cycle & VME_SCT)
1521 val |= TSI148_LCSR_DDAT_TM_SCT;
1522
1523 if (cycle & VME_BLT)
1524 val |= TSI148_LCSR_DDAT_TM_BLT;
1525
1526 if (cycle & VME_MBLT)
1527 val |= TSI148_LCSR_DDAT_TM_MBLT;
1528
1529 if (cycle & VME_2eVME)
1530 val |= TSI148_LCSR_DDAT_TM_2eVME;
1531
1532 if (cycle & VME_2eSST)
1533 val |= TSI148_LCSR_DDAT_TM_2eSST;
1534
1535 if (cycle & VME_2eSSTB) {
1536 dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1537 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1538 }
1539
1540 /* Setup data width */
1541 switch (dwidth) {
1542 case VME_D16:
1543 val |= TSI148_LCSR_DDAT_DBW_16;
1544 break;
1545 case VME_D32:
1546 val |= TSI148_LCSR_DDAT_DBW_32;
1547 break;
1548 default:
1549 dev_err(dev, "Invalid data width\n");
1550 return -EINVAL;
1551 }
1552
1553 /* Setup address space */
1554 switch (aspace) {
1555 case VME_A16:
1556 val |= TSI148_LCSR_DDAT_AMODE_A16;
1557 break;
1558 case VME_A24:
1559 val |= TSI148_LCSR_DDAT_AMODE_A24;
1560 break;
1561 case VME_A32:
1562 val |= TSI148_LCSR_DDAT_AMODE_A32;
1563 break;
1564 case VME_A64:
1565 val |= TSI148_LCSR_DDAT_AMODE_A64;
1566 break;
1567 case VME_CRCSR:
1568 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1569 break;
1570 case VME_USER1:
1571 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1572 break;
1573 case VME_USER2:
1574 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1575 break;
1576 case VME_USER3:
1577 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1578 break;
1579 case VME_USER4:
1580 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1581 break;
1582 default:
1583 dev_err(dev, "Invalid address space\n");
1584 return -EINVAL;
1585 }
1586
1587 if (cycle & VME_SUPER)
1588 val |= TSI148_LCSR_DDAT_SUP;
1589 if (cycle & VME_PROG)
1590 val |= TSI148_LCSR_DDAT_PGM;
1591
1592 *attr = cpu_to_be32(val);
1593
1594 return 0;
1595 }
1596
1597 /*
1598 * Add a link list descriptor to the list
1599 *
1600 * Note: DMA engine expects the DMA descriptor to be big endian.
1601 */
tsi148_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1602 static int tsi148_dma_list_add(struct vme_dma_list *list,
1603 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1604 {
1605 struct tsi148_dma_entry *entry, *prev;
1606 u32 address_high, address_low, val;
1607 struct vme_dma_pattern *pattern_attr;
1608 struct vme_dma_pci *pci_attr;
1609 struct vme_dma_vme *vme_attr;
1610 int retval = 0;
1611 struct vme_bridge *tsi148_bridge;
1612
1613 tsi148_bridge = list->parent->parent;
1614
1615 /* Descriptor must be aligned on 64-bit boundaries */
1616 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1617 if (!entry) {
1618 retval = -ENOMEM;
1619 goto err_mem;
1620 }
1621
1622 /* Test descriptor alignment */
1623 if ((unsigned long)&entry->descriptor & 0x7) {
1624 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 byte boundary as required: %p\n",
1625 &entry->descriptor);
1626 retval = -EINVAL;
1627 goto err_align;
1628 }
1629
1630 /* Given we are going to fill out the structure, we probably don't
1631 * need to zero it, but better safe than sorry for now.
1632 */
1633 memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1634
1635 /* Fill out source part */
1636 switch (src->type) {
1637 case VME_DMA_PATTERN:
1638 pattern_attr = src->private;
1639
1640 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1641
1642 val = TSI148_LCSR_DSAT_TYP_PAT;
1643
1644 /* Default behaviour is 32 bit pattern */
1645 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1646 val |= TSI148_LCSR_DSAT_PSZ;
1647
1648 /* It seems that the default behaviour is to increment */
1649 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1650 val |= TSI148_LCSR_DSAT_NIN;
1651 entry->descriptor.dsat = cpu_to_be32(val);
1652 break;
1653 case VME_DMA_PCI:
1654 pci_attr = src->private;
1655
1656 reg_split((unsigned long long)pci_attr->address, &address_high,
1657 &address_low);
1658 entry->descriptor.dsau = cpu_to_be32(address_high);
1659 entry->descriptor.dsal = cpu_to_be32(address_low);
1660 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1661 break;
1662 case VME_DMA_VME:
1663 vme_attr = src->private;
1664
1665 reg_split((unsigned long long)vme_attr->address, &address_high,
1666 &address_low);
1667 entry->descriptor.dsau = cpu_to_be32(address_high);
1668 entry->descriptor.dsal = cpu_to_be32(address_low);
1669 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1670
1671 retval = tsi148_dma_set_vme_src_attributes(
1672 tsi148_bridge->parent, &entry->descriptor.dsat,
1673 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1674 if (retval < 0)
1675 goto err_source;
1676 break;
1677 default:
1678 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1679 retval = -EINVAL;
1680 goto err_source;
1681 }
1682
1683 /* Assume last link - this will be over-written by adding another */
1684 entry->descriptor.dnlau = cpu_to_be32(0);
1685 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1686
1687 /* Fill out destination part */
1688 switch (dest->type) {
1689 case VME_DMA_PCI:
1690 pci_attr = dest->private;
1691
1692 reg_split((unsigned long long)pci_attr->address, &address_high,
1693 &address_low);
1694 entry->descriptor.ddau = cpu_to_be32(address_high);
1695 entry->descriptor.ddal = cpu_to_be32(address_low);
1696 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1697 break;
1698 case VME_DMA_VME:
1699 vme_attr = dest->private;
1700
1701 reg_split((unsigned long long)vme_attr->address, &address_high,
1702 &address_low);
1703 entry->descriptor.ddau = cpu_to_be32(address_high);
1704 entry->descriptor.ddal = cpu_to_be32(address_low);
1705 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1706
1707 retval = tsi148_dma_set_vme_dest_attributes(
1708 tsi148_bridge->parent, &entry->descriptor.ddat,
1709 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1710 if (retval < 0)
1711 goto err_dest;
1712 break;
1713 default:
1714 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1715 retval = -EINVAL;
1716 goto err_dest;
1717 }
1718
1719 /* Fill out count */
1720 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1721
1722 /* Add to list */
1723 list_add_tail(&entry->list, &list->entries);
1724
1725 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1726 &entry->descriptor,
1727 sizeof(entry->descriptor),
1728 DMA_TO_DEVICE);
1729 if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1730 dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1731 retval = -EINVAL;
1732 goto err_dma;
1733 }
1734
1735 /* Fill out previous descriptors "Next Address" */
1736 if (entry->list.prev != &list->entries) {
1737 reg_split((unsigned long long)entry->dma_handle, &address_high,
1738 &address_low);
1739 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1740 list);
1741 prev->descriptor.dnlau = cpu_to_be32(address_high);
1742 prev->descriptor.dnlal = cpu_to_be32(address_low);
1743 }
1744
1745 return 0;
1746
1747 err_dma:
1748 list_del(&entry->list);
1749 err_dest:
1750 err_source:
1751 err_align:
1752 kfree(entry);
1753 err_mem:
1754 return retval;
1755 }
1756
1757 /*
1758 * Check to see if the provided DMA channel is busy.
1759 */
tsi148_dma_busy(struct vme_bridge * tsi148_bridge,int channel)1760 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1761 {
1762 u32 tmp;
1763 struct tsi148_driver *bridge;
1764
1765 bridge = tsi148_bridge->driver_priv;
1766
1767 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1768 TSI148_LCSR_OFFSET_DSTA);
1769
1770 if (tmp & TSI148_LCSR_DSTA_BSY)
1771 return 0;
1772 else
1773 return 1;
1774 }
1775
1776 /*
1777 * Execute a previously generated link list
1778 *
1779 * XXX Need to provide control register configuration.
1780 */
tsi148_dma_list_exec(struct vme_dma_list * list)1781 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1782 {
1783 struct vme_dma_resource *ctrlr;
1784 int channel, retval;
1785 struct tsi148_dma_entry *entry;
1786 u32 bus_addr_high, bus_addr_low;
1787 u32 val, dctlreg = 0;
1788 struct vme_bridge *tsi148_bridge;
1789 struct tsi148_driver *bridge;
1790
1791 ctrlr = list->parent;
1792
1793 tsi148_bridge = ctrlr->parent;
1794
1795 bridge = tsi148_bridge->driver_priv;
1796
1797 mutex_lock(&ctrlr->mtx);
1798
1799 channel = ctrlr->number;
1800
1801 if (!list_empty(&ctrlr->running)) {
1802 /*
1803 * XXX We have an active DMA transfer and currently haven't
1804 * sorted out the mechanism for "pending" DMA transfers.
1805 * Return busy.
1806 */
1807 /* Need to add to pending here */
1808 mutex_unlock(&ctrlr->mtx);
1809 return -EBUSY;
1810 }
1811
1812 list_add(&list->list, &ctrlr->running);
1813
1814 /* Get first bus address and write into registers */
1815 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1816 list);
1817
1818 mutex_unlock(&ctrlr->mtx);
1819
1820 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1821
1822 iowrite32be(bus_addr_high, bridge->base +
1823 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1824 iowrite32be(bus_addr_low, bridge->base +
1825 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1826
1827 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1828 TSI148_LCSR_OFFSET_DCTL);
1829
1830 /* Start the operation */
1831 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1832 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1833
1834 retval = wait_event_interruptible(bridge->dma_queue[channel],
1835 tsi148_dma_busy(ctrlr->parent, channel));
1836
1837 if (retval) {
1838 iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1839 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1840 /* Wait for the operation to abort */
1841 wait_event(bridge->dma_queue[channel],
1842 tsi148_dma_busy(ctrlr->parent, channel));
1843 retval = -EINTR;
1844 goto exit;
1845 }
1846
1847 /*
1848 * Read status register, this register is valid until we kick off a
1849 * new transfer.
1850 */
1851 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1852 TSI148_LCSR_OFFSET_DSTA);
1853
1854 if (val & TSI148_LCSR_DSTA_VBE) {
1855 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1856 retval = -EIO;
1857 }
1858
1859 exit:
1860 /* Remove list from running list */
1861 mutex_lock(&ctrlr->mtx);
1862 list_del(&list->list);
1863 mutex_unlock(&ctrlr->mtx);
1864
1865 return retval;
1866 }
1867
1868 /*
1869 * Clean up a previously generated link list
1870 *
1871 * We have a separate function, don't assume that the chain can't be reused.
1872 */
tsi148_dma_list_empty(struct vme_dma_list * list)1873 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1874 {
1875 struct list_head *pos, *temp;
1876 struct tsi148_dma_entry *entry;
1877
1878 struct vme_bridge *tsi148_bridge = list->parent->parent;
1879
1880 /* detach and free each entry */
1881 list_for_each_safe(pos, temp, &list->entries) {
1882 list_del(pos);
1883 entry = list_entry(pos, struct tsi148_dma_entry, list);
1884
1885 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1886 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1887 kfree(entry);
1888 }
1889
1890 return 0;
1891 }
1892
1893 /*
1894 * All 4 location monitors reside at the same base - this is therefore a
1895 * system wide configuration.
1896 *
1897 * This does not enable the LM monitor - that should be done when the first
1898 * callback is attached and disabled when the last callback is removed.
1899 */
tsi148_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1900 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1901 u32 aspace, u32 cycle)
1902 {
1903 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1904 int i;
1905 struct vme_bridge *tsi148_bridge;
1906 struct tsi148_driver *bridge;
1907
1908 tsi148_bridge = lm->parent;
1909
1910 bridge = tsi148_bridge->driver_priv;
1911
1912 mutex_lock(&lm->mtx);
1913
1914 /* If we already have a callback attached, we can't move it! */
1915 for (i = 0; i < lm->monitors; i++) {
1916 if (bridge->lm_callback[i]) {
1917 mutex_unlock(&lm->mtx);
1918 dev_err(tsi148_bridge->parent, "Location monitor callback attached, can't reset\n");
1919 return -EBUSY;
1920 }
1921 }
1922
1923 switch (aspace) {
1924 case VME_A16:
1925 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1926 break;
1927 case VME_A24:
1928 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1929 break;
1930 case VME_A32:
1931 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1932 break;
1933 case VME_A64:
1934 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1935 break;
1936 default:
1937 mutex_unlock(&lm->mtx);
1938 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1939 return -EINVAL;
1940 }
1941
1942 if (cycle & VME_SUPER)
1943 lm_ctl |= TSI148_LCSR_LMAT_SUPR;
1944 if (cycle & VME_USER)
1945 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1946 if (cycle & VME_PROG)
1947 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1948 if (cycle & VME_DATA)
1949 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1950
1951 reg_split(lm_base, &lm_base_high, &lm_base_low);
1952
1953 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1954 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1955 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1956
1957 mutex_unlock(&lm->mtx);
1958
1959 return 0;
1960 }
1961
1962 /* Get configuration of the callback monitor and return whether it is enabled
1963 * or disabled.
1964 */
tsi148_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1965 static int tsi148_lm_get(struct vme_lm_resource *lm,
1966 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1967 {
1968 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1969 struct tsi148_driver *bridge;
1970
1971 bridge = lm->parent->driver_priv;
1972
1973 mutex_lock(&lm->mtx);
1974
1975 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1976 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1977 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1978
1979 reg_join(lm_base_high, lm_base_low, lm_base);
1980
1981 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1982 enabled = 1;
1983
1984 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
1985 *aspace |= VME_A16;
1986
1987 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
1988 *aspace |= VME_A24;
1989
1990 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
1991 *aspace |= VME_A32;
1992
1993 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
1994 *aspace |= VME_A64;
1995
1996 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1997 *cycle |= VME_SUPER;
1998 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1999 *cycle |= VME_USER;
2000 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2001 *cycle |= VME_PROG;
2002 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2003 *cycle |= VME_DATA;
2004
2005 mutex_unlock(&lm->mtx);
2006
2007 return enabled;
2008 }
2009
2010 /*
2011 * Attach a callback to a specific location monitor.
2012 *
2013 * Callback will be passed the monitor triggered.
2014 */
tsi148_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(void *),void * data)2015 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2016 void (*callback)(void *), void *data)
2017 {
2018 u32 lm_ctl, tmp;
2019 struct vme_bridge *tsi148_bridge;
2020 struct tsi148_driver *bridge;
2021
2022 tsi148_bridge = lm->parent;
2023
2024 bridge = tsi148_bridge->driver_priv;
2025
2026 mutex_lock(&lm->mtx);
2027
2028 /* Ensure that the location monitor is configured - need PGM or DATA */
2029 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2030 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2031 mutex_unlock(&lm->mtx);
2032 dev_err(tsi148_bridge->parent, "Location monitor not properly configured\n");
2033 return -EINVAL;
2034 }
2035
2036 /* Check that a callback isn't already attached */
2037 if (bridge->lm_callback[monitor]) {
2038 mutex_unlock(&lm->mtx);
2039 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2040 return -EBUSY;
2041 }
2042
2043 /* Attach callback */
2044 bridge->lm_callback[monitor] = callback;
2045 bridge->lm_data[monitor] = data;
2046
2047 /* Enable Location Monitor interrupt */
2048 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2049 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2050 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2051
2052 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2053 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2054 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2055
2056 /* Ensure that global Location Monitor Enable set */
2057 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2058 lm_ctl |= TSI148_LCSR_LMAT_EN;
2059 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2060 }
2061
2062 mutex_unlock(&lm->mtx);
2063
2064 return 0;
2065 }
2066
2067 /*
2068 * Detach a callback function forn a specific location monitor.
2069 */
tsi148_lm_detach(struct vme_lm_resource * lm,int monitor)2070 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2071 {
2072 u32 lm_en, tmp;
2073 struct tsi148_driver *bridge;
2074
2075 bridge = lm->parent->driver_priv;
2076
2077 mutex_lock(&lm->mtx);
2078
2079 /* Disable Location Monitor and ensure previous interrupts are clear */
2080 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2081 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2082 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2083
2084 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2085 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2086 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2087
2088 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2089 bridge->base + TSI148_LCSR_INTC);
2090
2091 /* Detach callback */
2092 bridge->lm_callback[monitor] = NULL;
2093 bridge->lm_data[monitor] = NULL;
2094
2095 /* If all location monitors disabled, disable global Location Monitor */
2096 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2097 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2098 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2099 tmp &= ~TSI148_LCSR_LMAT_EN;
2100 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2101 }
2102
2103 mutex_unlock(&lm->mtx);
2104
2105 return 0;
2106 }
2107
2108 /*
2109 * Determine Geographical Addressing
2110 */
tsi148_slot_get(struct vme_bridge * tsi148_bridge)2111 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2112 {
2113 u32 slot = 0;
2114 struct tsi148_driver *bridge;
2115
2116 bridge = tsi148_bridge->driver_priv;
2117
2118 if (!geoid) {
2119 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2120 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2121 } else
2122 slot = geoid;
2123
2124 return (int)slot;
2125 }
2126
tsi148_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)2127 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2128 dma_addr_t *dma)
2129 {
2130 struct pci_dev *pdev;
2131
2132 /* Find pci_dev container of dev */
2133 pdev = to_pci_dev(parent);
2134
2135 return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
2136 }
2137
tsi148_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)2138 static void tsi148_free_consistent(struct device *parent, size_t size,
2139 void *vaddr, dma_addr_t dma)
2140 {
2141 struct pci_dev *pdev;
2142
2143 /* Find pci_dev container of dev */
2144 pdev = to_pci_dev(parent);
2145
2146 dma_free_coherent(&pdev->dev, size, vaddr, dma);
2147 }
2148
2149 /*
2150 * Configure CR/CSR space
2151 *
2152 * Access to the CR/CSR can be configured at power-up. The location of the
2153 * CR/CSR registers in the CR/CSR address space is determined by the boards
2154 * Auto-ID or Geographic address. This function ensures that the window is
2155 * enabled at an offset consistent with the boards geopgraphic address.
2156 *
2157 * Each board has a 512kB window, with the highest 4kB being used for the
2158 * boards registers, this means there is a fix length 508kB window which must
2159 * be mapped onto PCI memory.
2160 */
tsi148_crcsr_init(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2161 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2162 struct pci_dev *pdev)
2163 {
2164 u32 cbar, crat, vstat;
2165 u32 crcsr_bus_high, crcsr_bus_low;
2166 int retval;
2167 struct tsi148_driver *bridge;
2168
2169 bridge = tsi148_bridge->driver_priv;
2170
2171 /* Allocate mem for CR/CSR image */
2172 bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
2173 VME_CRCSR_BUF_SIZE,
2174 &bridge->crcsr_bus, GFP_KERNEL);
2175 if (!bridge->crcsr_kernel) {
2176 dev_err(tsi148_bridge->parent, "Failed to allocate memory for CR/CSR image\n");
2177 return -ENOMEM;
2178 }
2179
2180 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2181
2182 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2183 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2184
2185 /* Ensure that the CR/CSR is configured at the correct offset */
2186 cbar = ioread32be(bridge->base + TSI148_CBAR);
2187 cbar = (cbar & TSI148_CRCSR_CBAR_M) >> 3;
2188
2189 vstat = tsi148_slot_get(tsi148_bridge);
2190
2191 if (cbar != vstat) {
2192 cbar = vstat;
2193 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2194 iowrite32be(cbar << 3, bridge->base + TSI148_CBAR);
2195 }
2196 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2197
2198 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2199 if (crat & TSI148_LCSR_CRAT_EN)
2200 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2201 else {
2202 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2203 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2204 bridge->base + TSI148_LCSR_CRAT);
2205 }
2206
2207 /* If we want flushed, error-checked writes, set up a window
2208 * over the CR/CSR registers. We read from here to safely flush
2209 * through VME writes.
2210 */
2211 if (err_chk) {
2212 retval = tsi148_master_set(bridge->flush_image, 1,
2213 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2214 VME_D16);
2215 if (retval)
2216 dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
2217 }
2218
2219 return 0;
2220 }
2221
tsi148_crcsr_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2222 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2223 struct pci_dev *pdev)
2224 {
2225 u32 crat;
2226 struct tsi148_driver *bridge;
2227
2228 bridge = tsi148_bridge->driver_priv;
2229
2230 /* Turn off CR/CSR space */
2231 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2232 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2233 bridge->base + TSI148_LCSR_CRAT);
2234
2235 /* Free image */
2236 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2237 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2238
2239 dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
2240 bridge->crcsr_kernel, bridge->crcsr_bus);
2241 }
2242
tsi148_probe(struct pci_dev * pdev,const struct pci_device_id * id)2243 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2244 {
2245 int retval, i, master_num;
2246 u32 data;
2247 struct list_head *pos = NULL, *n;
2248 struct vme_bridge *tsi148_bridge;
2249 struct tsi148_driver *tsi148_device;
2250 struct vme_master_resource *master_image;
2251 struct vme_slave_resource *slave_image;
2252 struct vme_dma_resource *dma_ctrlr;
2253 struct vme_lm_resource *lm;
2254
2255 if (geoid < 0 || geoid >= VME_MAX_SLOTS) {
2256 dev_err(&pdev->dev, "VME geographical address must be between 0 and %d (exclusive), but got %d\n",
2257 VME_MAX_SLOTS, geoid);
2258 return -EINVAL;
2259 }
2260
2261 /* If we want to support more than one of each bridge, we need to
2262 * dynamically generate this so we get one per device
2263 */
2264 tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
2265 if (!tsi148_bridge) {
2266 retval = -ENOMEM;
2267 goto err_struct;
2268 }
2269 vme_init_bridge(tsi148_bridge);
2270
2271 tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
2272 if (!tsi148_device) {
2273 retval = -ENOMEM;
2274 goto err_driver;
2275 }
2276
2277 tsi148_bridge->driver_priv = tsi148_device;
2278
2279 /* Enable the device */
2280 retval = pci_enable_device(pdev);
2281 if (retval) {
2282 dev_err(&pdev->dev, "Unable to enable device\n");
2283 goto err_enable;
2284 }
2285
2286 /* Map Registers */
2287 retval = pci_request_regions(pdev, driver_name);
2288 if (retval) {
2289 dev_err(&pdev->dev, "Unable to reserve resources\n");
2290 goto err_resource;
2291 }
2292
2293 /* map registers in BAR 0 */
2294 tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
2295 4096);
2296 if (!tsi148_device->base) {
2297 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2298 retval = -EIO;
2299 goto err_remap;
2300 }
2301
2302 /* Check to see if the mapping worked out */
2303 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2304 if (data != PCI_VENDOR_ID_TUNDRA) {
2305 dev_err(&pdev->dev, "CRG region check failed\n");
2306 retval = -EIO;
2307 goto err_test;
2308 }
2309
2310 /* Initialize wait queues & mutual exclusion flags */
2311 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2312 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2313 init_waitqueue_head(&tsi148_device->iack_queue);
2314 mutex_init(&tsi148_device->vme_int);
2315 mutex_init(&tsi148_device->vme_rmw);
2316
2317 tsi148_bridge->parent = &pdev->dev;
2318 strcpy(tsi148_bridge->name, driver_name);
2319
2320 /* Setup IRQ */
2321 retval = tsi148_irq_init(tsi148_bridge);
2322 if (retval != 0) {
2323 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2324 goto err_irq;
2325 }
2326
2327 /* If we are going to flush writes, we need to read from the VME bus.
2328 * We need to do this safely, thus we read the devices own CR/CSR
2329 * register. To do this we must set up a window in CR/CSR space and
2330 * hence have one less master window resource available.
2331 */
2332 master_num = TSI148_MAX_MASTER;
2333 if (err_chk) {
2334 master_num--;
2335
2336 tsi148_device->flush_image =
2337 kmalloc(sizeof(*tsi148_device->flush_image),
2338 GFP_KERNEL);
2339 if (!tsi148_device->flush_image) {
2340 retval = -ENOMEM;
2341 goto err_master;
2342 }
2343 tsi148_device->flush_image->parent = tsi148_bridge;
2344 spin_lock_init(&tsi148_device->flush_image->lock);
2345 tsi148_device->flush_image->locked = 1;
2346 tsi148_device->flush_image->number = master_num;
2347 memset(&tsi148_device->flush_image->bus_resource, 0,
2348 sizeof(tsi148_device->flush_image->bus_resource));
2349 tsi148_device->flush_image->kern_base = NULL;
2350 }
2351
2352 /* Add master windows to list */
2353 for (i = 0; i < master_num; i++) {
2354 master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
2355 if (!master_image) {
2356 retval = -ENOMEM;
2357 goto err_master;
2358 }
2359 master_image->parent = tsi148_bridge;
2360 spin_lock_init(&master_image->lock);
2361 master_image->locked = 0;
2362 master_image->number = i;
2363 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2364 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2365 VME_USER3 | VME_USER4;
2366 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2367 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2368 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2369 VME_PROG | VME_DATA;
2370 master_image->width_attr = VME_D16 | VME_D32;
2371 memset(&master_image->bus_resource, 0,
2372 sizeof(master_image->bus_resource));
2373 master_image->kern_base = NULL;
2374 list_add_tail(&master_image->list,
2375 &tsi148_bridge->master_resources);
2376 }
2377
2378 /* Add slave windows to list */
2379 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2380 slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
2381 if (!slave_image) {
2382 retval = -ENOMEM;
2383 goto err_slave;
2384 }
2385 slave_image->parent = tsi148_bridge;
2386 mutex_init(&slave_image->mtx);
2387 slave_image->locked = 0;
2388 slave_image->number = i;
2389 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2390 VME_A64;
2391 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2392 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2393 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2394 VME_PROG | VME_DATA;
2395 list_add_tail(&slave_image->list,
2396 &tsi148_bridge->slave_resources);
2397 }
2398
2399 /* Add dma engines to list */
2400 for (i = 0; i < TSI148_MAX_DMA; i++) {
2401 dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
2402 if (!dma_ctrlr) {
2403 retval = -ENOMEM;
2404 goto err_dma;
2405 }
2406 dma_ctrlr->parent = tsi148_bridge;
2407 mutex_init(&dma_ctrlr->mtx);
2408 dma_ctrlr->locked = 0;
2409 dma_ctrlr->number = i;
2410 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2411 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2412 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2413 VME_DMA_PATTERN_TO_MEM;
2414 INIT_LIST_HEAD(&dma_ctrlr->pending);
2415 INIT_LIST_HEAD(&dma_ctrlr->running);
2416 list_add_tail(&dma_ctrlr->list,
2417 &tsi148_bridge->dma_resources);
2418 }
2419
2420 /* Add location monitor to list */
2421 lm = kmalloc(sizeof(*lm), GFP_KERNEL);
2422 if (!lm) {
2423 retval = -ENOMEM;
2424 goto err_lm;
2425 }
2426 lm->parent = tsi148_bridge;
2427 mutex_init(&lm->mtx);
2428 lm->locked = 0;
2429 lm->number = 1;
2430 lm->monitors = 4;
2431 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2432
2433 tsi148_bridge->slave_get = tsi148_slave_get;
2434 tsi148_bridge->slave_set = tsi148_slave_set;
2435 tsi148_bridge->master_get = tsi148_master_get;
2436 tsi148_bridge->master_set = tsi148_master_set;
2437 tsi148_bridge->master_read = tsi148_master_read;
2438 tsi148_bridge->master_write = tsi148_master_write;
2439 tsi148_bridge->master_rmw = tsi148_master_rmw;
2440 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2441 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2442 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2443 tsi148_bridge->irq_set = tsi148_irq_set;
2444 tsi148_bridge->irq_generate = tsi148_irq_generate;
2445 tsi148_bridge->lm_set = tsi148_lm_set;
2446 tsi148_bridge->lm_get = tsi148_lm_get;
2447 tsi148_bridge->lm_attach = tsi148_lm_attach;
2448 tsi148_bridge->lm_detach = tsi148_lm_detach;
2449 tsi148_bridge->slot_get = tsi148_slot_get;
2450 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2451 tsi148_bridge->free_consistent = tsi148_free_consistent;
2452
2453 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2454 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2455 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2456 if (!geoid)
2457 dev_info(&pdev->dev, "VME geographical address is %d\n",
2458 data & TSI148_LCSR_VSTAT_GA_M);
2459 else
2460 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2461 geoid);
2462
2463 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2464 err_chk ? "enabled" : "disabled");
2465
2466 retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2467 if (retval) {
2468 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2469 goto err_crcsr;
2470 }
2471
2472 retval = vme_register_bridge(tsi148_bridge);
2473 if (retval != 0) {
2474 dev_err(&pdev->dev, "Chip Registration failed.\n");
2475 goto err_reg;
2476 }
2477
2478 pci_set_drvdata(pdev, tsi148_bridge);
2479
2480 /* Clear VME bus "board fail", and "power-up reset" lines */
2481 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2482 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2483 data |= TSI148_LCSR_VSTAT_CPURST;
2484 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2485
2486 return 0;
2487
2488 err_reg:
2489 tsi148_crcsr_exit(tsi148_bridge, pdev);
2490 err_crcsr:
2491 err_lm:
2492 /* resources are stored in link list */
2493 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2494 lm = list_entry(pos, struct vme_lm_resource, list);
2495 list_del(pos);
2496 kfree(lm);
2497 }
2498 err_dma:
2499 /* resources are stored in link list */
2500 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2501 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2502 list_del(pos);
2503 kfree(dma_ctrlr);
2504 }
2505 err_slave:
2506 /* resources are stored in link list */
2507 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2508 slave_image = list_entry(pos, struct vme_slave_resource, list);
2509 list_del(pos);
2510 kfree(slave_image);
2511 }
2512 err_master:
2513 /* resources are stored in link list */
2514 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2515 master_image = list_entry(pos, struct vme_master_resource,
2516 list);
2517 list_del(pos);
2518 kfree(master_image);
2519 }
2520
2521 tsi148_irq_exit(tsi148_bridge, pdev);
2522 err_irq:
2523 err_test:
2524 iounmap(tsi148_device->base);
2525 err_remap:
2526 pci_release_regions(pdev);
2527 err_resource:
2528 pci_disable_device(pdev);
2529 err_enable:
2530 kfree(tsi148_device);
2531 err_driver:
2532 kfree(tsi148_bridge);
2533 err_struct:
2534 return retval;
2535 }
2536
tsi148_remove(struct pci_dev * pdev)2537 static void tsi148_remove(struct pci_dev *pdev)
2538 {
2539 struct list_head *pos = NULL;
2540 struct list_head *tmplist;
2541 struct vme_master_resource *master_image;
2542 struct vme_slave_resource *slave_image;
2543 struct vme_dma_resource *dma_ctrlr;
2544 int i;
2545 struct tsi148_driver *bridge;
2546 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2547
2548 bridge = tsi148_bridge->driver_priv;
2549
2550 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2551
2552 /*
2553 * Shutdown all inbound and outbound windows.
2554 */
2555 for (i = 0; i < 8; i++) {
2556 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2557 TSI148_LCSR_OFFSET_ITAT);
2558 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2559 TSI148_LCSR_OFFSET_OTAT);
2560 }
2561
2562 /*
2563 * Shutdown Location monitor.
2564 */
2565 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2566
2567 /*
2568 * Shutdown CRG map.
2569 */
2570 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2571
2572 /*
2573 * Clear error status.
2574 */
2575 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2576 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2577 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2578
2579 /*
2580 * Remove VIRQ interrupt (if any)
2581 */
2582 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2583 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2584
2585 /*
2586 * Map all Interrupts to PCI INTA
2587 */
2588 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2589 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2590
2591 tsi148_irq_exit(tsi148_bridge, pdev);
2592
2593 vme_unregister_bridge(tsi148_bridge);
2594
2595 tsi148_crcsr_exit(tsi148_bridge, pdev);
2596
2597 /* resources are stored in link list */
2598 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2599 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2600 list_del(pos);
2601 kfree(dma_ctrlr);
2602 }
2603
2604 /* resources are stored in link list */
2605 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2606 slave_image = list_entry(pos, struct vme_slave_resource, list);
2607 list_del(pos);
2608 kfree(slave_image);
2609 }
2610
2611 /* resources are stored in link list */
2612 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2613 master_image = list_entry(pos, struct vme_master_resource,
2614 list);
2615 list_del(pos);
2616 kfree(master_image);
2617 }
2618
2619 iounmap(bridge->base);
2620
2621 pci_release_regions(pdev);
2622
2623 pci_disable_device(pdev);
2624
2625 kfree(tsi148_bridge->driver_priv);
2626
2627 kfree(tsi148_bridge);
2628 }
2629
2630 module_pci_driver(tsi148_driver);
2631
2632 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2633 module_param(err_chk, bool, 0);
2634
2635 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2636 module_param(geoid, int, 0);
2637
2638 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2639 MODULE_LICENSE("GPL");
2640