xref: /openbmc/linux/drivers/net/wan/wanxl.c (revision f0328a1922906be3540611e344914b9682fff350)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * wanXL serial card driver for Linux
4  * host part
5  *
6  * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
7  *
8  * Status:
9  *   - Only DTE (external clock) support with NRZ and NRZI encodings
10  *   - wanXL100 will require minor driver modifications, no access to hw
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20 #include <linux/fcntl.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/netdevice.h>
27 #include <linux/hdlc.h>
28 #include <linux/pci.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/delay.h>
31 #include <asm/io.h>
32 
33 #include "wanxl.h"
34 
35 static const char *version = "wanXL serial card driver version: 0.48";
36 
37 #define PLX_CTL_RESET   0x40000000 /* adapter reset */
38 
39 #undef DEBUG_PKT
40 #undef DEBUG_PCI
41 
42 /* MAILBOX #1 - PUTS COMMANDS */
43 #define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
44 #ifdef __LITTLE_ENDIAN
45 #define MBX1_CMD_BSWAP  0x8C000001 /* little-endian Byte Swap Mode */
46 #else
47 #define MBX1_CMD_BSWAP  0x8C000000 /* big-endian Byte Swap Mode */
48 #endif
49 
50 /* MAILBOX #2 - DRAM SIZE */
51 #define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
52 
53 struct port {
54 	struct net_device *dev;
55 	struct card *card;
56 	spinlock_t lock;	/* for wanxl_xmit */
57         int node;		/* physical port #0 - 3 */
58 	unsigned int clock_type;
59 	int tx_in, tx_out;
60 	struct sk_buff *tx_skbs[TX_BUFFERS];
61 };
62 
63 struct card_status {
64 	desc_t rx_descs[RX_QUEUE_LENGTH];
65 	port_status_t port_status[4];
66 };
67 
68 struct card {
69 	int n_ports;		/* 1, 2 or 4 ports */
70 	u8 irq;
71 
72 	u8 __iomem *plx;	/* PLX PCI9060 virtual base address */
73 	struct pci_dev *pdev;	/* for pci_name(pdev) */
74 	int rx_in;
75 	struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
76 	struct card_status *status;	/* shared between host and card */
77 	dma_addr_t status_address;
78 	struct port ports[];	/* 1 - 4 port structures follow */
79 };
80 
81 static inline struct port *dev_to_port(struct net_device *dev)
82 {
83 	return (struct port *)dev_to_hdlc(dev)->priv;
84 }
85 
86 static inline port_status_t *get_status(struct port *port)
87 {
88 	return &port->card->status->port_status[port->node];
89 }
90 
91 #ifdef DEBUG_PCI
92 static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
93 					      size_t size, int direction)
94 {
95 	dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
96 
97 	if (addr + size > 0x100000000LL)
98 		pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
99 			pci_name(pdev), (unsigned long long)addr);
100 	return addr;
101 }
102 
103 #undef pci_map_single
104 #define pci_map_single pci_map_single_debug
105 #endif
106 
107 /* Cable and/or personality module change interrupt service */
108 static inline void wanxl_cable_intr(struct port *port)
109 {
110 	u32 value = get_status(port)->cable;
111 	int valid = 1;
112 	const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
113 
114 	switch(value & 0x7) {
115 	case STATUS_CABLE_V35: cable = "V.35"; break;
116 	case STATUS_CABLE_X21: cable = "X.21"; break;
117 	case STATUS_CABLE_V24: cable = "V.24"; break;
118 	case STATUS_CABLE_EIA530: cable = "EIA530"; break;
119 	case STATUS_CABLE_NONE: cable = "no"; break;
120 	default: cable = "invalid";
121 	}
122 
123 	switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
124 	case STATUS_CABLE_V35: pm = "V.35"; break;
125 	case STATUS_CABLE_X21: pm = "X.21"; break;
126 	case STATUS_CABLE_V24: pm = "V.24"; break;
127 	case STATUS_CABLE_EIA530: pm = "EIA530"; break;
128 	case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
129 	default: pm = "invalid personality"; valid = 0;
130 	}
131 
132 	if (valid) {
133 		if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
134 			dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
135 				", DSR off";
136 			dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
137 				", carrier off";
138 		}
139 		dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
140 	}
141 	netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
142 		    pm, dte, cable, dsr, dcd);
143 
144 	if (value & STATUS_CABLE_DCD)
145 		netif_carrier_on(port->dev);
146 	else
147 		netif_carrier_off(port->dev);
148 }
149 
150 /* Transmit complete interrupt service */
151 static inline void wanxl_tx_intr(struct port *port)
152 {
153 	struct net_device *dev = port->dev;
154 
155 	while (1) {
156                 desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
157 		struct sk_buff *skb = port->tx_skbs[port->tx_in];
158 
159 		switch (desc->stat) {
160 		case PACKET_FULL:
161 		case PACKET_EMPTY:
162 			netif_wake_queue(dev);
163 			return;
164 
165 		case PACKET_UNDERRUN:
166 			dev->stats.tx_errors++;
167 			dev->stats.tx_fifo_errors++;
168 			break;
169 
170 		default:
171 			dev->stats.tx_packets++;
172 			dev->stats.tx_bytes += skb->len;
173 		}
174                 desc->stat = PACKET_EMPTY; /* Free descriptor */
175 		dma_unmap_single(&port->card->pdev->dev, desc->address,
176 				 skb->len, DMA_TO_DEVICE);
177 		dev_consume_skb_irq(skb);
178                 port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
179         }
180 }
181 
182 /* Receive complete interrupt service */
183 static inline void wanxl_rx_intr(struct card *card)
184 {
185 	desc_t *desc;
186 
187 	while (desc = &card->status->rx_descs[card->rx_in],
188 	       desc->stat != PACKET_EMPTY) {
189 		if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
190 			pr_crit("%s: received packet for nonexistent port\n",
191 				pci_name(card->pdev));
192 		else {
193 			struct sk_buff *skb = card->rx_skbs[card->rx_in];
194 			struct port *port = &card->ports[desc->stat &
195 						    PACKET_PORT_MASK];
196 			struct net_device *dev = port->dev;
197 
198 			if (!skb)
199 				dev->stats.rx_dropped++;
200 			else {
201 				dma_unmap_single(&card->pdev->dev,
202 						 desc->address, BUFFER_LENGTH,
203 						 DMA_FROM_DEVICE);
204 				skb_put(skb, desc->length);
205 
206 #ifdef DEBUG_PKT
207 				printk(KERN_DEBUG "%s RX(%i):", dev->name,
208 				       skb->len);
209 				debug_frame(skb);
210 #endif
211 				dev->stats.rx_packets++;
212 				dev->stats.rx_bytes += skb->len;
213 				skb->protocol = hdlc_type_trans(skb, dev);
214 				netif_rx(skb);
215 				skb = NULL;
216 			}
217 
218 			if (!skb) {
219 				skb = dev_alloc_skb(BUFFER_LENGTH);
220 				desc->address = skb ?
221 					dma_map_single(&card->pdev->dev,
222 						       skb->data,
223 						       BUFFER_LENGTH,
224 						       DMA_FROM_DEVICE) : 0;
225 				card->rx_skbs[card->rx_in] = skb;
226 			}
227 		}
228 		desc->stat = PACKET_EMPTY; /* Free descriptor */
229 		card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
230 	}
231 }
232 
233 static irqreturn_t wanxl_intr(int irq, void *dev_id)
234 {
235 	struct card *card = dev_id;
236         int i;
237         u32 stat;
238         int handled = 0;
239 
240         while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
241                 handled = 1;
242 		writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
243 
244                 for (i = 0; i < card->n_ports; i++) {
245 			if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
246 				wanxl_tx_intr(&card->ports[i]);
247 			if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
248 				wanxl_cable_intr(&card->ports[i]);
249 		}
250 		if (stat & (1 << DOORBELL_FROM_CARD_RX))
251 			wanxl_rx_intr(card);
252         }
253 
254         return IRQ_RETVAL(handled);
255 }
256 
257 static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
258 {
259 	struct port *port = dev_to_port(dev);
260 	desc_t *desc;
261 
262         spin_lock(&port->lock);
263 
264 	desc = &get_status(port)->tx_descs[port->tx_out];
265         if (desc->stat != PACKET_EMPTY) {
266                 /* should never happen - previous xmit should stop queue */
267 #ifdef DEBUG_PKT
268                 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
269 #endif
270 		netif_stop_queue(dev);
271 		spin_unlock(&port->lock);
272 		return NETDEV_TX_BUSY;       /* request packet to be queued */
273 	}
274 
275 #ifdef DEBUG_PKT
276 	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
277 	debug_frame(skb);
278 #endif
279 
280 	port->tx_skbs[port->tx_out] = skb;
281 	desc->address = dma_map_single(&port->card->pdev->dev, skb->data,
282 				       skb->len, DMA_TO_DEVICE);
283 	desc->length = skb->len;
284 	desc->stat = PACKET_FULL;
285 	writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
286 	       port->card->plx + PLX_DOORBELL_TO_CARD);
287 
288 	port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
289 
290 	if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
291 		netif_stop_queue(dev);
292 #ifdef DEBUG_PKT
293 		printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
294 #endif
295 	}
296 
297 	spin_unlock(&port->lock);
298 	return NETDEV_TX_OK;
299 }
300 
301 static int wanxl_attach(struct net_device *dev, unsigned short encoding,
302 			unsigned short parity)
303 {
304 	struct port *port = dev_to_port(dev);
305 
306 	if (encoding != ENCODING_NRZ &&
307 	    encoding != ENCODING_NRZI)
308 		return -EINVAL;
309 
310 	if (parity != PARITY_NONE &&
311 	    parity != PARITY_CRC32_PR1_CCITT &&
312 	    parity != PARITY_CRC16_PR1_CCITT &&
313 	    parity != PARITY_CRC32_PR0_CCITT &&
314 	    parity != PARITY_CRC16_PR0_CCITT)
315 		return -EINVAL;
316 
317 	get_status(port)->encoding = encoding;
318 	get_status(port)->parity = parity;
319 	return 0;
320 }
321 
322 static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
323 {
324 	const size_t size = sizeof(sync_serial_settings);
325 	sync_serial_settings line;
326 	struct port *port = dev_to_port(dev);
327 
328 	if (cmd != SIOCWANDEV)
329 		return hdlc_ioctl(dev, ifr, cmd);
330 
331 	switch (ifr->ifr_settings.type) {
332 	case IF_GET_IFACE:
333 		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
334 		if (ifr->ifr_settings.size < size) {
335 			ifr->ifr_settings.size = size; /* data size wanted */
336 			return -ENOBUFS;
337 		}
338 		memset(&line, 0, sizeof(line));
339 		line.clock_type = get_status(port)->clocking;
340 		line.clock_rate = 0;
341 		line.loopback = 0;
342 
343 		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
344 			return -EFAULT;
345 		return 0;
346 
347 	case IF_IFACE_SYNC_SERIAL:
348 		if (!capable(CAP_NET_ADMIN))
349 			return -EPERM;
350 		if (dev->flags & IFF_UP)
351 			return -EBUSY;
352 
353 		if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
354 				   size))
355 			return -EFAULT;
356 
357 		if (line.clock_type != CLOCK_EXT &&
358 		    line.clock_type != CLOCK_TXFROMRX)
359 			return -EINVAL; /* No such clock setting */
360 
361 		if (line.loopback != 0)
362 			return -EINVAL;
363 
364 		get_status(port)->clocking = line.clock_type;
365 		return 0;
366 
367 	default:
368 		return hdlc_ioctl(dev, ifr, cmd);
369         }
370 }
371 
372 static int wanxl_open(struct net_device *dev)
373 {
374 	struct port *port = dev_to_port(dev);
375 	u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
376 	unsigned long timeout;
377 	int i;
378 
379 	if (get_status(port)->open) {
380 		netdev_err(dev, "port already open\n");
381 		return -EIO;
382 	}
383 	if ((i = hdlc_open(dev)) != 0)
384 		return i;
385 
386 	port->tx_in = port->tx_out = 0;
387 	for (i = 0; i < TX_BUFFERS; i++)
388 		get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
389 	/* signal the card */
390 	writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
391 
392 	timeout = jiffies + HZ;
393 	do {
394 		if (get_status(port)->open) {
395 			netif_start_queue(dev);
396 			return 0;
397 		}
398 	} while (time_after(timeout, jiffies));
399 
400 	netdev_err(dev, "unable to open port\n");
401 	/* ask the card to close the port, should it be still alive */
402 	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
403 	return -EFAULT;
404 }
405 
406 static int wanxl_close(struct net_device *dev)
407 {
408 	struct port *port = dev_to_port(dev);
409 	unsigned long timeout;
410 	int i;
411 
412 	hdlc_close(dev);
413 	/* signal the card */
414 	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
415 	       port->card->plx + PLX_DOORBELL_TO_CARD);
416 
417 	timeout = jiffies + HZ;
418 	do {
419 		if (!get_status(port)->open)
420 			break;
421 	} while (time_after(timeout, jiffies));
422 
423 	if (get_status(port)->open)
424 		netdev_err(dev, "unable to close port\n");
425 
426 	netif_stop_queue(dev);
427 
428 	for (i = 0; i < TX_BUFFERS; i++) {
429 		desc_t *desc = &get_status(port)->tx_descs[i];
430 
431 		if (desc->stat != PACKET_EMPTY) {
432 			desc->stat = PACKET_EMPTY;
433 			dma_unmap_single(&port->card->pdev->dev,
434 					 desc->address, port->tx_skbs[i]->len,
435 					 DMA_TO_DEVICE);
436 			dev_kfree_skb(port->tx_skbs[i]);
437 		}
438 	}
439 	return 0;
440 }
441 
442 static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
443 {
444 	struct port *port = dev_to_port(dev);
445 
446 	dev->stats.rx_over_errors = get_status(port)->rx_overruns;
447 	dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
448 	dev->stats.rx_errors = dev->stats.rx_over_errors +
449 		dev->stats.rx_frame_errors;
450 	return &dev->stats;
451 }
452 
453 static int wanxl_puts_command(struct card *card, u32 cmd)
454 {
455 	unsigned long timeout = jiffies + 5 * HZ;
456 
457 	writel(cmd, card->plx + PLX_MAILBOX_1);
458 	do {
459 		if (readl(card->plx + PLX_MAILBOX_1) == 0)
460 			return 0;
461 
462 		schedule();
463 	}while (time_after(timeout, jiffies));
464 
465 	return -1;
466 }
467 
468 static void wanxl_reset(struct card *card)
469 {
470 	u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
471 
472 	writel(0x80, card->plx + PLX_MAILBOX_0);
473 	writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
474 	readl(card->plx + PLX_CONTROL); /* wait for posted write */
475 	udelay(1);
476 	writel(old_value, card->plx + PLX_CONTROL);
477 	readl(card->plx + PLX_CONTROL); /* wait for posted write */
478 }
479 
480 static void wanxl_pci_remove_one(struct pci_dev *pdev)
481 {
482 	struct card *card = pci_get_drvdata(pdev);
483 	int i;
484 
485 	for (i = 0; i < card->n_ports; i++) {
486 		unregister_hdlc_device(card->ports[i].dev);
487 		free_netdev(card->ports[i].dev);
488 	}
489 
490 	/* unregister and free all host resources */
491 	if (card->irq)
492 		free_irq(card->irq, card);
493 
494 	wanxl_reset(card);
495 
496 	for (i = 0; i < RX_QUEUE_LENGTH; i++)
497 		if (card->rx_skbs[i]) {
498 			dma_unmap_single(&card->pdev->dev,
499 					 card->status->rx_descs[i].address,
500 					 BUFFER_LENGTH, DMA_FROM_DEVICE);
501 			dev_kfree_skb(card->rx_skbs[i]);
502 		}
503 
504 	if (card->plx)
505 		iounmap(card->plx);
506 
507 	if (card->status)
508 		dma_free_coherent(&pdev->dev, sizeof(struct card_status),
509 				  card->status, card->status_address);
510 
511 	pci_release_regions(pdev);
512 	pci_disable_device(pdev);
513 	kfree(card);
514 }
515 
516 #include "wanxlfw.inc"
517 
518 static const struct net_device_ops wanxl_ops = {
519 	.ndo_open       = wanxl_open,
520 	.ndo_stop       = wanxl_close,
521 	.ndo_start_xmit = hdlc_start_xmit,
522 	.ndo_do_ioctl   = wanxl_ioctl,
523 	.ndo_get_stats  = wanxl_get_stats,
524 };
525 
526 static int wanxl_pci_init_one(struct pci_dev *pdev,
527 			      const struct pci_device_id *ent)
528 {
529 	struct card *card;
530 	u32 ramsize, stat;
531 	unsigned long timeout;
532 	u32 plx_phy;		/* PLX PCI base address */
533 	u32 mem_phy;		/* memory PCI base addr */
534 	u8 __iomem *mem;	/* memory virtual base addr */
535 	int i, ports;
536 
537 #ifndef MODULE
538 	pr_info_once("%s\n", version);
539 #endif
540 
541 	i = pci_enable_device(pdev);
542 	if (i)
543 		return i;
544 
545 	/* QUICC can only access first 256 MB of host RAM directly,
546 	   but PLX9060 DMA does 32-bits for actual packet data transfers */
547 
548 	/* FIXME when PCI/DMA subsystems are fixed.
549 	   We set both dma_mask and consistent_dma_mask to 28 bits
550 	   and pray pci_alloc_consistent() will use this info. It should
551 	   work on most platforms */
552 	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
553 	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
554 		pr_err("No usable DMA configuration\n");
555 		pci_disable_device(pdev);
556 		return -EIO;
557 	}
558 
559 	i = pci_request_regions(pdev, "wanXL");
560 	if (i) {
561 		pci_disable_device(pdev);
562 		return i;
563 	}
564 
565 	switch (pdev->device) {
566 	case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
567 	case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
568 	default: ports = 4;
569 	}
570 
571 	card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
572 	if (card == NULL) {
573 		pci_release_regions(pdev);
574 		pci_disable_device(pdev);
575 		return -ENOBUFS;
576 	}
577 
578 	pci_set_drvdata(pdev, card);
579 	card->pdev = pdev;
580 
581 	card->status = dma_alloc_coherent(&pdev->dev,
582 					  sizeof(struct card_status),
583 					  &card->status_address, GFP_KERNEL);
584 	if (card->status == NULL) {
585 		wanxl_pci_remove_one(pdev);
586 		return -ENOBUFS;
587 	}
588 
589 #ifdef DEBUG_PCI
590 	printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
591 	       " at 0x%LX\n", pci_name(pdev),
592 	       (unsigned long long)card->status_address);
593 #endif
594 
595 	/* FIXME when PCI/DMA subsystems are fixed.
596 	   We set both dma_mask and consistent_dma_mask back to 32 bits
597 	   to indicate the card can do 32-bit DMA addressing */
598 	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
599 	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
600 		pr_err("No usable DMA configuration\n");
601 		wanxl_pci_remove_one(pdev);
602 		return -EIO;
603 	}
604 
605 	/* set up PLX mapping */
606 	plx_phy = pci_resource_start(pdev, 0);
607 
608 	card->plx = ioremap(plx_phy, 0x70);
609 	if (!card->plx) {
610 		pr_err("ioremap() failed\n");
611 		wanxl_pci_remove_one(pdev);
612 		return -EFAULT;
613 	}
614 
615 #if RESET_WHILE_LOADING
616 	wanxl_reset(card);
617 #endif
618 
619 	timeout = jiffies + 20 * HZ;
620 	while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
621 		if (time_before(timeout, jiffies)) {
622 			pr_warn("%s: timeout waiting for PUTS to complete\n",
623 				pci_name(pdev));
624 			wanxl_pci_remove_one(pdev);
625 			return -ENODEV;
626 		}
627 
628 		switch(stat & 0xC0) {
629 		case 0x00:	/* hmm - PUTS completed with non-zero code? */
630 		case 0x80:	/* PUTS still testing the hardware */
631 			break;
632 
633 		default:
634 			pr_warn("%s: PUTS test 0x%X failed\n",
635 				pci_name(pdev), stat & 0x30);
636 			wanxl_pci_remove_one(pdev);
637 			return -ENODEV;
638 		}
639 
640 		schedule();
641 	}
642 
643 	/* get on-board memory size (PUTS detects no more than 4 MB) */
644 	ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
645 
646 	/* set up on-board RAM mapping */
647 	mem_phy = pci_resource_start(pdev, 2);
648 
649 	/* sanity check the board's reported memory size */
650 	if (ramsize < BUFFERS_ADDR +
651 	    (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
652 		pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
653 			pci_name(pdev), ramsize,
654 			BUFFERS_ADDR +
655 			(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
656 		wanxl_pci_remove_one(pdev);
657 		return -ENODEV;
658 	}
659 
660 	if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
661 		pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
662 		wanxl_pci_remove_one(pdev);
663 		return -ENODEV;
664 	}
665 
666 	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
667 		struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
668 
669 		card->rx_skbs[i] = skb;
670 		if (skb)
671 			card->status->rx_descs[i].address =
672 				dma_map_single(&card->pdev->dev, skb->data,
673 					       BUFFER_LENGTH, DMA_FROM_DEVICE);
674 	}
675 
676 	mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
677 	if (!mem) {
678 		pr_err("ioremap() failed\n");
679 		wanxl_pci_remove_one(pdev);
680 		return -EFAULT;
681 	}
682 
683 	for (i = 0; i < sizeof(firmware); i += 4)
684 		writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i);
685 
686 	for (i = 0; i < ports; i++)
687 		writel(card->status_address +
688 		       (void *)&card->status->port_status[i] -
689 		       (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
690 	writel(card->status_address, mem + PDM_OFFSET + 20);
691 	writel(PDM_OFFSET, mem);
692 	iounmap(mem);
693 
694 	writel(0, card->plx + PLX_MAILBOX_5);
695 
696 	if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
697 		pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
698 		wanxl_pci_remove_one(pdev);
699 		return -ENODEV;
700 	}
701 
702 	timeout = jiffies + 5 * HZ;
703 	do {
704 		if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
705 			break;
706 		schedule();
707 	}while (time_after(timeout, jiffies));
708 
709 	if (!stat) {
710 		pr_warn("%s: timeout while initializing card firmware\n",
711 			pci_name(pdev));
712 		wanxl_pci_remove_one(pdev);
713 		return -ENODEV;
714 	}
715 
716 #if DETECT_RAM
717 	ramsize = stat;
718 #endif
719 
720 	pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
721 		pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
722 
723 	/* Allocate IRQ */
724 	if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
725 		pr_warn("%s: could not allocate IRQ%i\n",
726 			pci_name(pdev), pdev->irq);
727 		wanxl_pci_remove_one(pdev);
728 		return -EBUSY;
729 	}
730 	card->irq = pdev->irq;
731 
732 	for (i = 0; i < ports; i++) {
733 		hdlc_device *hdlc;
734 		struct port *port = &card->ports[i];
735 		struct net_device *dev = alloc_hdlcdev(port);
736 
737 		if (!dev) {
738 			pr_err("%s: unable to allocate memory\n",
739 			       pci_name(pdev));
740 			wanxl_pci_remove_one(pdev);
741 			return -ENOMEM;
742 		}
743 
744 		port->dev = dev;
745 		hdlc = dev_to_hdlc(dev);
746 		spin_lock_init(&port->lock);
747 		dev->tx_queue_len = 50;
748 		dev->netdev_ops = &wanxl_ops;
749 		hdlc->attach = wanxl_attach;
750 		hdlc->xmit = wanxl_xmit;
751 		port->card = card;
752 		port->node = i;
753 		get_status(port)->clocking = CLOCK_EXT;
754 		if (register_hdlc_device(dev)) {
755 			pr_err("%s: unable to register hdlc device\n",
756 			       pci_name(pdev));
757 			free_netdev(dev);
758 			wanxl_pci_remove_one(pdev);
759 			return -ENOBUFS;
760 		}
761 		card->n_ports++;
762 	}
763 
764 	pr_info("%s: port", pci_name(pdev));
765 	for (i = 0; i < ports; i++)
766 		pr_cont("%s #%i: %s",
767 			i ? "," : "", i, card->ports[i].dev->name);
768 	pr_cont("\n");
769 
770 	for (i = 0; i < ports; i++)
771 		wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
772 
773 	return 0;
774 }
775 
776 static const struct pci_device_id wanxl_pci_tbl[] = {
777 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
778 	  PCI_ANY_ID, 0, 0, 0 },
779 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
780 	  PCI_ANY_ID, 0, 0, 0 },
781 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
782 	  PCI_ANY_ID, 0, 0, 0 },
783 	{ 0, }
784 };
785 
786 static struct pci_driver wanxl_pci_driver = {
787 	.name		= "wanXL",
788 	.id_table	= wanxl_pci_tbl,
789 	.probe		= wanxl_pci_init_one,
790 	.remove		= wanxl_pci_remove_one,
791 };
792 
793 static int __init wanxl_init_module(void)
794 {
795 #ifdef MODULE
796 	pr_info("%s\n", version);
797 #endif
798 	return pci_register_driver(&wanxl_pci_driver);
799 }
800 
801 static void __exit wanxl_cleanup_module(void)
802 {
803 	pci_unregister_driver(&wanxl_pci_driver);
804 }
805 
806 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
807 MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
808 MODULE_LICENSE("GPL v2");
809 MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
810 
811 module_init(wanxl_init_module);
812 module_exit(wanxl_cleanup_module);
813