xref: /openbmc/linux/drivers/net/plip/plip.c (revision 239480ab)
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5  * Authors:	Donald Becker <becker@scyld.com>
6  *		Tommy Thorn <thorn@daimi.aau.dk>
7  *		Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8  *		Alan Cox <gw4pts@gw4pts.ampr.org>
9  *		Peter Bauer <100136.3530@compuserve.com>
10  *		Niibe Yutaka <gniibe@mri.co.jp>
11  *		Nimrod Zimerman <zimerman@mailandnews.com>
12  *
13  * Enhancements:
14  *		Modularization and ifreq/ifmap support by Alan Cox.
15  *		Rewritten by Niibe Yutaka.
16  *		parport-sharing awareness code by Philip Blundell.
17  *		SMP locking by Niibe Yutaka.
18  *		Support for parallel ports with no IRQ (poll mode),
19  *		Modifications to use the parallel port API
20  *		by Nimrod Zimerman.
21  *
22  * Fixes:
23  *		Niibe Yutaka
24  *		  - Module initialization.
25  *		  - MTU fix.
26  *		  - Make sure other end is OK, before sending a packet.
27  *		  - Fix immediate timer problem.
28  *
29  *		Al Viro
30  *		  - Changed {enable,disable}_irq handling to make it work
31  *		    with new ("stack") semantics.
32  *
33  *		This program is free software; you can redistribute it and/or
34  *		modify it under the terms of the GNU General Public License
35  *		as published by the Free Software Foundation; either version
36  *		2 of the License, or (at your option) any later version.
37  */
38 
39 /*
40  * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41  * inspired by Russ Nelson's parallel port packet driver.
42  *
43  * NOTE:
44  *     Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45  *     Because of the necessity to communicate to DOS machines with the
46  *     Crynwr packet driver, Peter Bauer changed the protocol again
47  *     back to original protocol.
48  *
49  *     This version follows original PLIP protocol.
50  *     So, this PLIP can't communicate the PLIP of Linux v1.0.
51  */
52 
53 /*
54  *     To use with DOS box, please do (Turn on ARP switch):
55  *	# ifconfig plip[0-2] arp
56  */
57 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
58 
59 /*
60   Sources:
61 	Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 	"parallel.asm" parallel port packet driver.
63 
64   The "Crynwr" parallel port standard specifies the following protocol:
65     Trigger by sending nibble '0x8' (this causes interrupt on other end)
66     count-low octet
67     count-high octet
68     ... data octets
69     checksum octet
70   Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 			<wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
72 
73   The packet is encapsulated as if it were ethernet.
74 
75   The cable used is a de facto standard parallel null cable -- sold as
76   a "LapLink" cable by various places.  You'll need a 12-conductor cable to
77   make one yourself.  The wiring is:
78     SLCTIN	17 - 17
79     GROUND	25 - 25
80     D0->ERROR	2 - 15		15 - 2
81     D1->SLCT	3 - 13		13 - 3
82     D2->PAPOUT	4 - 12		12 - 4
83     D3->ACK	5 - 10		10 - 5
84     D4->BUSY	6 - 11		11 - 6
85   Do not connect the other pins.  They are
86     D5,D6,D7 are 7,8,9
87     STROBE is 1, FEED is 14, INIT is 16
88     extra grounds are 18,19,20,21,22,23,24
89 */
90 
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/string.h>
97 #include <linux/slab.h>
98 #include <linux/if_ether.h>
99 #include <linux/in.h>
100 #include <linux/errno.h>
101 #include <linux/delay.h>
102 #include <linux/init.h>
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/inetdevice.h>
106 #include <linux/skbuff.h>
107 #include <linux/if_plip.h>
108 #include <linux/workqueue.h>
109 #include <linux/spinlock.h>
110 #include <linux/completion.h>
111 #include <linux/parport.h>
112 #include <linux/bitops.h>
113 
114 #include <net/neighbour.h>
115 
116 #include <asm/irq.h>
117 #include <asm/byteorder.h>
118 
119 /* Maximum number of devices to support. */
120 #define PLIP_MAX  8
121 
122 /* Use 0 for production, 1 for verification, >2 for debug */
123 #ifndef NET_DEBUG
124 #define NET_DEBUG 1
125 #endif
126 static const unsigned int net_debug = NET_DEBUG;
127 
128 #define ENABLE(irq)  if (irq != -1) enable_irq(irq)
129 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
130 
131 /* In micro second */
132 #define PLIP_DELAY_UNIT		   1
133 
134 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
135 #define PLIP_TRIGGER_WAIT	 500
136 
137 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
138 #define PLIP_NIBBLE_WAIT        3000
139 
140 /* Bottom halves */
141 static void plip_kick_bh(struct work_struct *work);
142 static void plip_bh(struct work_struct *work);
143 static void plip_timer_bh(struct work_struct *work);
144 
145 /* Interrupt handler */
146 static void plip_interrupt(void *dev_id);
147 
148 /* Functions for DEV methods */
149 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
150 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
151                             unsigned short type, const void *daddr,
152 			    const void *saddr, unsigned len);
153 static int plip_hard_header_cache(const struct neighbour *neigh,
154                                   struct hh_cache *hh, __be16 type);
155 static int plip_open(struct net_device *dev);
156 static int plip_close(struct net_device *dev);
157 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
158 static int plip_preempt(void *handle);
159 static void plip_wakeup(void *handle);
160 
161 enum plip_connection_state {
162 	PLIP_CN_NONE=0,
163 	PLIP_CN_RECEIVE,
164 	PLIP_CN_SEND,
165 	PLIP_CN_CLOSING,
166 	PLIP_CN_ERROR
167 };
168 
169 enum plip_packet_state {
170 	PLIP_PK_DONE=0,
171 	PLIP_PK_TRIGGER,
172 	PLIP_PK_LENGTH_LSB,
173 	PLIP_PK_LENGTH_MSB,
174 	PLIP_PK_DATA,
175 	PLIP_PK_CHECKSUM
176 };
177 
178 enum plip_nibble_state {
179 	PLIP_NB_BEGIN,
180 	PLIP_NB_1,
181 	PLIP_NB_2,
182 };
183 
184 struct plip_local {
185 	enum plip_packet_state state;
186 	enum plip_nibble_state nibble;
187 	union {
188 		struct {
189 #if defined(__LITTLE_ENDIAN)
190 			unsigned char lsb;
191 			unsigned char msb;
192 #elif defined(__BIG_ENDIAN)
193 			unsigned char msb;
194 			unsigned char lsb;
195 #else
196 #error	"Please fix the endianness defines in <asm/byteorder.h>"
197 #endif
198 		} b;
199 		unsigned short h;
200 	} length;
201 	unsigned short byte;
202 	unsigned char  checksum;
203 	unsigned char  data;
204 	struct sk_buff *skb;
205 };
206 
207 struct net_local {
208 	struct net_device *dev;
209 	struct work_struct immediate;
210 	struct delayed_work deferred;
211 	struct delayed_work timer;
212 	struct plip_local snd_data;
213 	struct plip_local rcv_data;
214 	struct pardevice *pardev;
215 	unsigned long  trigger;
216 	unsigned long  nibble;
217 	enum plip_connection_state connection;
218 	unsigned short timeout_count;
219 	int is_deferred;
220 	int port_owner;
221 	int should_relinquish;
222 	spinlock_t lock;
223 	atomic_t kill_timer;
224 	struct completion killed_timer_cmp;
225 };
226 
227 static inline void enable_parport_interrupts (struct net_device *dev)
228 {
229 	if (dev->irq != -1)
230 	{
231 		struct parport *port =
232 		   ((struct net_local *)netdev_priv(dev))->pardev->port;
233 		port->ops->enable_irq (port);
234 	}
235 }
236 
237 static inline void disable_parport_interrupts (struct net_device *dev)
238 {
239 	if (dev->irq != -1)
240 	{
241 		struct parport *port =
242 		   ((struct net_local *)netdev_priv(dev))->pardev->port;
243 		port->ops->disable_irq (port);
244 	}
245 }
246 
247 static inline void write_data (struct net_device *dev, unsigned char data)
248 {
249 	struct parport *port =
250 	   ((struct net_local *)netdev_priv(dev))->pardev->port;
251 
252 	port->ops->write_data (port, data);
253 }
254 
255 static inline unsigned char read_status (struct net_device *dev)
256 {
257 	struct parport *port =
258 	   ((struct net_local *)netdev_priv(dev))->pardev->port;
259 
260 	return port->ops->read_status (port);
261 }
262 
263 static const struct header_ops plip_header_ops = {
264 	.create	= plip_hard_header,
265 	.cache  = plip_hard_header_cache,
266 };
267 
268 static const struct net_device_ops plip_netdev_ops = {
269 	.ndo_open		 = plip_open,
270 	.ndo_stop		 = plip_close,
271 	.ndo_start_xmit		 = plip_tx_packet,
272 	.ndo_do_ioctl		 = plip_ioctl,
273 	.ndo_set_mac_address	 = eth_mac_addr,
274 	.ndo_validate_addr	 = eth_validate_addr,
275 };
276 
277 /* Entry point of PLIP driver.
278    Probe the hardware, and register/initialize the driver.
279 
280    PLIP is rather weird, because of the way it interacts with the parport
281    system.  It is _not_ initialised from Space.c.  Instead, plip_init()
282    is called, and that function makes up a "struct net_device" for each port, and
283    then calls us here.
284 
285    */
286 static void
287 plip_init_netdev(struct net_device *dev)
288 {
289 	struct net_local *nl = netdev_priv(dev);
290 
291 	/* Then, override parts of it */
292 	dev->tx_queue_len 	 = 10;
293 	dev->flags	         = IFF_POINTOPOINT|IFF_NOARP;
294 	memset(dev->dev_addr, 0xfc, ETH_ALEN);
295 
296 	dev->netdev_ops		 = &plip_netdev_ops;
297 	dev->header_ops          = &plip_header_ops;
298 
299 
300 	nl->port_owner = 0;
301 
302 	/* Initialize constants */
303 	nl->trigger	= PLIP_TRIGGER_WAIT;
304 	nl->nibble	= PLIP_NIBBLE_WAIT;
305 
306 	/* Initialize task queue structures */
307 	INIT_WORK(&nl->immediate, plip_bh);
308 	INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
309 
310 	if (dev->irq == -1)
311 		INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
312 
313 	spin_lock_init(&nl->lock);
314 }
315 
316 /* Bottom half handler for the delayed request.
317    This routine is kicked by do_timer().
318    Request `plip_bh' to be invoked. */
319 static void
320 plip_kick_bh(struct work_struct *work)
321 {
322 	struct net_local *nl =
323 		container_of(work, struct net_local, deferred.work);
324 
325 	if (nl->is_deferred)
326 		schedule_work(&nl->immediate);
327 }
328 
329 /* Forward declarations of internal routines */
330 static int plip_none(struct net_device *, struct net_local *,
331 		     struct plip_local *, struct plip_local *);
332 static int plip_receive_packet(struct net_device *, struct net_local *,
333 			       struct plip_local *, struct plip_local *);
334 static int plip_send_packet(struct net_device *, struct net_local *,
335 			    struct plip_local *, struct plip_local *);
336 static int plip_connection_close(struct net_device *, struct net_local *,
337 				 struct plip_local *, struct plip_local *);
338 static int plip_error(struct net_device *, struct net_local *,
339 		      struct plip_local *, struct plip_local *);
340 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
341 				 struct plip_local *snd,
342 				 struct plip_local *rcv,
343 				 int error);
344 
345 #define OK        0
346 #define TIMEOUT   1
347 #define ERROR     2
348 #define HS_TIMEOUT	3
349 
350 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
351 			 struct plip_local *snd, struct plip_local *rcv);
352 
353 static const plip_func connection_state_table[] =
354 {
355 	plip_none,
356 	plip_receive_packet,
357 	plip_send_packet,
358 	plip_connection_close,
359 	plip_error
360 };
361 
362 /* Bottom half handler of PLIP. */
363 static void
364 plip_bh(struct work_struct *work)
365 {
366 	struct net_local *nl = container_of(work, struct net_local, immediate);
367 	struct plip_local *snd = &nl->snd_data;
368 	struct plip_local *rcv = &nl->rcv_data;
369 	plip_func f;
370 	int r;
371 
372 	nl->is_deferred = 0;
373 	f = connection_state_table[nl->connection];
374 	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
375 	    (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
376 		nl->is_deferred = 1;
377 		schedule_delayed_work(&nl->deferred, 1);
378 	}
379 }
380 
381 static void
382 plip_timer_bh(struct work_struct *work)
383 {
384 	struct net_local *nl =
385 		container_of(work, struct net_local, timer.work);
386 
387 	if (!(atomic_read (&nl->kill_timer))) {
388 		plip_interrupt (nl->dev);
389 
390 		schedule_delayed_work(&nl->timer, 1);
391 	}
392 	else {
393 		complete(&nl->killed_timer_cmp);
394 	}
395 }
396 
397 static int
398 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
399 		      struct plip_local *snd, struct plip_local *rcv,
400 		      int error)
401 {
402 	unsigned char c0;
403 	/*
404 	 * This is tricky. If we got here from the beginning of send (either
405 	 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
406 	 * already disabled. With the old variant of {enable,disable}_irq()
407 	 * extra disable_irq() was a no-op. Now it became mortal - it's
408 	 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
409 	 * that is). So we have to treat HS_TIMEOUT and ERROR from send
410 	 * in a special way.
411 	 */
412 
413 	spin_lock_irq(&nl->lock);
414 	if (nl->connection == PLIP_CN_SEND) {
415 
416 		if (error != ERROR) { /* Timeout */
417 			nl->timeout_count++;
418 			if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
419 			    nl->timeout_count <= 3) {
420 				spin_unlock_irq(&nl->lock);
421 				/* Try again later */
422 				return TIMEOUT;
423 			}
424 			c0 = read_status(dev);
425 			printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
426 			       dev->name, snd->state, c0);
427 		} else
428 			error = HS_TIMEOUT;
429 		dev->stats.tx_errors++;
430 		dev->stats.tx_aborted_errors++;
431 	} else if (nl->connection == PLIP_CN_RECEIVE) {
432 		if (rcv->state == PLIP_PK_TRIGGER) {
433 			/* Transmission was interrupted. */
434 			spin_unlock_irq(&nl->lock);
435 			return OK;
436 		}
437 		if (error != ERROR) { /* Timeout */
438 			if (++nl->timeout_count <= 3) {
439 				spin_unlock_irq(&nl->lock);
440 				/* Try again later */
441 				return TIMEOUT;
442 			}
443 			c0 = read_status(dev);
444 			printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
445 			       dev->name, rcv->state, c0);
446 		}
447 		dev->stats.rx_dropped++;
448 	}
449 	rcv->state = PLIP_PK_DONE;
450 	if (rcv->skb) {
451 		kfree_skb(rcv->skb);
452 		rcv->skb = NULL;
453 	}
454 	snd->state = PLIP_PK_DONE;
455 	if (snd->skb) {
456 		dev_kfree_skb(snd->skb);
457 		snd->skb = NULL;
458 	}
459 	spin_unlock_irq(&nl->lock);
460 	if (error == HS_TIMEOUT) {
461 		DISABLE(dev->irq);
462 		synchronize_irq(dev->irq);
463 	}
464 	disable_parport_interrupts (dev);
465 	netif_stop_queue (dev);
466 	nl->connection = PLIP_CN_ERROR;
467 	write_data (dev, 0x00);
468 
469 	return TIMEOUT;
470 }
471 
472 static int
473 plip_none(struct net_device *dev, struct net_local *nl,
474 	  struct plip_local *snd, struct plip_local *rcv)
475 {
476 	return OK;
477 }
478 
479 /* PLIP_RECEIVE --- receive a byte(two nibbles)
480    Returns OK on success, TIMEOUT on timeout */
481 static inline int
482 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
483 	     enum plip_nibble_state *ns_p, unsigned char *data_p)
484 {
485 	unsigned char c0, c1;
486 	unsigned int cx;
487 
488 	switch (*ns_p) {
489 	case PLIP_NB_BEGIN:
490 		cx = nibble_timeout;
491 		while (1) {
492 			c0 = read_status(dev);
493 			udelay(PLIP_DELAY_UNIT);
494 			if ((c0 & 0x80) == 0) {
495 				c1 = read_status(dev);
496 				if (c0 == c1)
497 					break;
498 			}
499 			if (--cx == 0)
500 				return TIMEOUT;
501 		}
502 		*data_p = (c0 >> 3) & 0x0f;
503 		write_data (dev, 0x10); /* send ACK */
504 		*ns_p = PLIP_NB_1;
505 
506 	case PLIP_NB_1:
507 		cx = nibble_timeout;
508 		while (1) {
509 			c0 = read_status(dev);
510 			udelay(PLIP_DELAY_UNIT);
511 			if (c0 & 0x80) {
512 				c1 = read_status(dev);
513 				if (c0 == c1)
514 					break;
515 			}
516 			if (--cx == 0)
517 				return TIMEOUT;
518 		}
519 		*data_p |= (c0 << 1) & 0xf0;
520 		write_data (dev, 0x00); /* send ACK */
521 		*ns_p = PLIP_NB_BEGIN;
522 	case PLIP_NB_2:
523 		break;
524 	}
525 	return OK;
526 }
527 
528 /*
529  *	Determine the packet's protocol ID. The rule here is that we
530  *	assume 802.3 if the type field is short enough to be a length.
531  *	This is normal practice and works for any 'now in use' protocol.
532  *
533  *	PLIP is ethernet ish but the daddr might not be valid if unicast.
534  *	PLIP fortunately has no bus architecture (its Point-to-point).
535  *
536  *	We can't fix the daddr thing as that quirk (more bug) is embedded
537  *	in far too many old systems not all even running Linux.
538  */
539 
540 static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
541 {
542 	struct ethhdr *eth;
543 	unsigned char *rawp;
544 
545 	skb_reset_mac_header(skb);
546 	skb_pull(skb,dev->hard_header_len);
547 	eth = eth_hdr(skb);
548 
549 	if(is_multicast_ether_addr(eth->h_dest))
550 	{
551 		if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
552 			skb->pkt_type=PACKET_BROADCAST;
553 		else
554 			skb->pkt_type=PACKET_MULTICAST;
555 	}
556 
557 	/*
558 	 *	This ALLMULTI check should be redundant by 1.4
559 	 *	so don't forget to remove it.
560 	 */
561 
562 	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
563 		return eth->h_proto;
564 
565 	rawp = skb->data;
566 
567 	/*
568 	 *	This is a magic hack to spot IPX packets. Older Novell breaks
569 	 *	the protocol design and runs IPX over 802.3 without an 802.2 LLC
570 	 *	layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
571 	 *	won't work for fault tolerant netware but does for the rest.
572 	 */
573 	if (*(unsigned short *)rawp == 0xFFFF)
574 		return htons(ETH_P_802_3);
575 
576 	/*
577 	 *	Real 802.2 LLC
578 	 */
579 	return htons(ETH_P_802_2);
580 }
581 
582 /* PLIP_RECEIVE_PACKET --- receive a packet */
583 static int
584 plip_receive_packet(struct net_device *dev, struct net_local *nl,
585 		    struct plip_local *snd, struct plip_local *rcv)
586 {
587 	unsigned short nibble_timeout = nl->nibble;
588 	unsigned char *lbuf;
589 
590 	switch (rcv->state) {
591 	case PLIP_PK_TRIGGER:
592 		DISABLE(dev->irq);
593 		/* Don't need to synchronize irq, as we can safely ignore it */
594 		disable_parport_interrupts (dev);
595 		write_data (dev, 0x01); /* send ACK */
596 		if (net_debug > 2)
597 			printk(KERN_DEBUG "%s: receive start\n", dev->name);
598 		rcv->state = PLIP_PK_LENGTH_LSB;
599 		rcv->nibble = PLIP_NB_BEGIN;
600 
601 	case PLIP_PK_LENGTH_LSB:
602 		if (snd->state != PLIP_PK_DONE) {
603 			if (plip_receive(nl->trigger, dev,
604 					 &rcv->nibble, &rcv->length.b.lsb)) {
605 				/* collision, here dev->tbusy == 1 */
606 				rcv->state = PLIP_PK_DONE;
607 				nl->is_deferred = 1;
608 				nl->connection = PLIP_CN_SEND;
609 				schedule_delayed_work(&nl->deferred, 1);
610 				enable_parport_interrupts (dev);
611 				ENABLE(dev->irq);
612 				return OK;
613 			}
614 		} else {
615 			if (plip_receive(nibble_timeout, dev,
616 					 &rcv->nibble, &rcv->length.b.lsb))
617 				return TIMEOUT;
618 		}
619 		rcv->state = PLIP_PK_LENGTH_MSB;
620 
621 	case PLIP_PK_LENGTH_MSB:
622 		if (plip_receive(nibble_timeout, dev,
623 				 &rcv->nibble, &rcv->length.b.msb))
624 			return TIMEOUT;
625 		if (rcv->length.h > dev->mtu + dev->hard_header_len ||
626 		    rcv->length.h < 8) {
627 			printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
628 			return ERROR;
629 		}
630 		/* Malloc up new buffer. */
631 		rcv->skb = dev_alloc_skb(rcv->length.h + 2);
632 		if (rcv->skb == NULL) {
633 			printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
634 			return ERROR;
635 		}
636 		skb_reserve(rcv->skb, 2);	/* Align IP on 16 byte boundaries */
637 		skb_put(rcv->skb,rcv->length.h);
638 		rcv->skb->dev = dev;
639 		rcv->state = PLIP_PK_DATA;
640 		rcv->byte = 0;
641 		rcv->checksum = 0;
642 
643 	case PLIP_PK_DATA:
644 		lbuf = rcv->skb->data;
645 		do {
646 			if (plip_receive(nibble_timeout, dev,
647 					 &rcv->nibble, &lbuf[rcv->byte]))
648 				return TIMEOUT;
649 		} while (++rcv->byte < rcv->length.h);
650 		do {
651 			rcv->checksum += lbuf[--rcv->byte];
652 		} while (rcv->byte);
653 		rcv->state = PLIP_PK_CHECKSUM;
654 
655 	case PLIP_PK_CHECKSUM:
656 		if (plip_receive(nibble_timeout, dev,
657 				 &rcv->nibble, &rcv->data))
658 			return TIMEOUT;
659 		if (rcv->data != rcv->checksum) {
660 			dev->stats.rx_crc_errors++;
661 			if (net_debug)
662 				printk(KERN_DEBUG "%s: checksum error\n", dev->name);
663 			return ERROR;
664 		}
665 		rcv->state = PLIP_PK_DONE;
666 
667 	case PLIP_PK_DONE:
668 		/* Inform the upper layer for the arrival of a packet. */
669 		rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
670 		netif_rx_ni(rcv->skb);
671 		dev->stats.rx_bytes += rcv->length.h;
672 		dev->stats.rx_packets++;
673 		rcv->skb = NULL;
674 		if (net_debug > 2)
675 			printk(KERN_DEBUG "%s: receive end\n", dev->name);
676 
677 		/* Close the connection. */
678 		write_data (dev, 0x00);
679 		spin_lock_irq(&nl->lock);
680 		if (snd->state != PLIP_PK_DONE) {
681 			nl->connection = PLIP_CN_SEND;
682 			spin_unlock_irq(&nl->lock);
683 			schedule_work(&nl->immediate);
684 			enable_parport_interrupts (dev);
685 			ENABLE(dev->irq);
686 			return OK;
687 		} else {
688 			nl->connection = PLIP_CN_NONE;
689 			spin_unlock_irq(&nl->lock);
690 			enable_parport_interrupts (dev);
691 			ENABLE(dev->irq);
692 			return OK;
693 		}
694 	}
695 	return OK;
696 }
697 
698 /* PLIP_SEND --- send a byte (two nibbles)
699    Returns OK on success, TIMEOUT when timeout    */
700 static inline int
701 plip_send(unsigned short nibble_timeout, struct net_device *dev,
702 	  enum plip_nibble_state *ns_p, unsigned char data)
703 {
704 	unsigned char c0;
705 	unsigned int cx;
706 
707 	switch (*ns_p) {
708 	case PLIP_NB_BEGIN:
709 		write_data (dev, data & 0x0f);
710 		*ns_p = PLIP_NB_1;
711 
712 	case PLIP_NB_1:
713 		write_data (dev, 0x10 | (data & 0x0f));
714 		cx = nibble_timeout;
715 		while (1) {
716 			c0 = read_status(dev);
717 			if ((c0 & 0x80) == 0)
718 				break;
719 			if (--cx == 0)
720 				return TIMEOUT;
721 			udelay(PLIP_DELAY_UNIT);
722 		}
723 		write_data (dev, 0x10 | (data >> 4));
724 		*ns_p = PLIP_NB_2;
725 
726 	case PLIP_NB_2:
727 		write_data (dev, (data >> 4));
728 		cx = nibble_timeout;
729 		while (1) {
730 			c0 = read_status(dev);
731 			if (c0 & 0x80)
732 				break;
733 			if (--cx == 0)
734 				return TIMEOUT;
735 			udelay(PLIP_DELAY_UNIT);
736 		}
737 		*ns_p = PLIP_NB_BEGIN;
738 		return OK;
739 	}
740 	return OK;
741 }
742 
743 /* PLIP_SEND_PACKET --- send a packet */
744 static int
745 plip_send_packet(struct net_device *dev, struct net_local *nl,
746 		 struct plip_local *snd, struct plip_local *rcv)
747 {
748 	unsigned short nibble_timeout = nl->nibble;
749 	unsigned char *lbuf;
750 	unsigned char c0;
751 	unsigned int cx;
752 
753 	if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
754 		printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
755 		snd->state = PLIP_PK_DONE;
756 		snd->skb = NULL;
757 		return ERROR;
758 	}
759 
760 	switch (snd->state) {
761 	case PLIP_PK_TRIGGER:
762 		if ((read_status(dev) & 0xf8) != 0x80)
763 			return HS_TIMEOUT;
764 
765 		/* Trigger remote rx interrupt. */
766 		write_data (dev, 0x08);
767 		cx = nl->trigger;
768 		while (1) {
769 			udelay(PLIP_DELAY_UNIT);
770 			spin_lock_irq(&nl->lock);
771 			if (nl->connection == PLIP_CN_RECEIVE) {
772 				spin_unlock_irq(&nl->lock);
773 				/* Interrupted. */
774 				dev->stats.collisions++;
775 				return OK;
776 			}
777 			c0 = read_status(dev);
778 			if (c0 & 0x08) {
779 				spin_unlock_irq(&nl->lock);
780 				DISABLE(dev->irq);
781 				synchronize_irq(dev->irq);
782 				if (nl->connection == PLIP_CN_RECEIVE) {
783 					/* Interrupted.
784 					   We don't need to enable irq,
785 					   as it is soon disabled.    */
786 					/* Yes, we do. New variant of
787 					   {enable,disable}_irq *counts*
788 					   them.  -- AV  */
789 					ENABLE(dev->irq);
790 					dev->stats.collisions++;
791 					return OK;
792 				}
793 				disable_parport_interrupts (dev);
794 				if (net_debug > 2)
795 					printk(KERN_DEBUG "%s: send start\n", dev->name);
796 				snd->state = PLIP_PK_LENGTH_LSB;
797 				snd->nibble = PLIP_NB_BEGIN;
798 				nl->timeout_count = 0;
799 				break;
800 			}
801 			spin_unlock_irq(&nl->lock);
802 			if (--cx == 0) {
803 				write_data (dev, 0x00);
804 				return HS_TIMEOUT;
805 			}
806 		}
807 
808 	case PLIP_PK_LENGTH_LSB:
809 		if (plip_send(nibble_timeout, dev,
810 			      &snd->nibble, snd->length.b.lsb))
811 			return TIMEOUT;
812 		snd->state = PLIP_PK_LENGTH_MSB;
813 
814 	case PLIP_PK_LENGTH_MSB:
815 		if (plip_send(nibble_timeout, dev,
816 			      &snd->nibble, snd->length.b.msb))
817 			return TIMEOUT;
818 		snd->state = PLIP_PK_DATA;
819 		snd->byte = 0;
820 		snd->checksum = 0;
821 
822 	case PLIP_PK_DATA:
823 		do {
824 			if (plip_send(nibble_timeout, dev,
825 				      &snd->nibble, lbuf[snd->byte]))
826 				return TIMEOUT;
827 		} while (++snd->byte < snd->length.h);
828 		do {
829 			snd->checksum += lbuf[--snd->byte];
830 		} while (snd->byte);
831 		snd->state = PLIP_PK_CHECKSUM;
832 
833 	case PLIP_PK_CHECKSUM:
834 		if (plip_send(nibble_timeout, dev,
835 			      &snd->nibble, snd->checksum))
836 			return TIMEOUT;
837 
838 		dev->stats.tx_bytes += snd->skb->len;
839 		dev_kfree_skb(snd->skb);
840 		dev->stats.tx_packets++;
841 		snd->state = PLIP_PK_DONE;
842 
843 	case PLIP_PK_DONE:
844 		/* Close the connection */
845 		write_data (dev, 0x00);
846 		snd->skb = NULL;
847 		if (net_debug > 2)
848 			printk(KERN_DEBUG "%s: send end\n", dev->name);
849 		nl->connection = PLIP_CN_CLOSING;
850 		nl->is_deferred = 1;
851 		schedule_delayed_work(&nl->deferred, 1);
852 		enable_parport_interrupts (dev);
853 		ENABLE(dev->irq);
854 		return OK;
855 	}
856 	return OK;
857 }
858 
859 static int
860 plip_connection_close(struct net_device *dev, struct net_local *nl,
861 		      struct plip_local *snd, struct plip_local *rcv)
862 {
863 	spin_lock_irq(&nl->lock);
864 	if (nl->connection == PLIP_CN_CLOSING) {
865 		nl->connection = PLIP_CN_NONE;
866 		netif_wake_queue (dev);
867 	}
868 	spin_unlock_irq(&nl->lock);
869 	if (nl->should_relinquish) {
870 		nl->should_relinquish = nl->port_owner = 0;
871 		parport_release(nl->pardev);
872 	}
873 	return OK;
874 }
875 
876 /* PLIP_ERROR --- wait till other end settled */
877 static int
878 plip_error(struct net_device *dev, struct net_local *nl,
879 	   struct plip_local *snd, struct plip_local *rcv)
880 {
881 	unsigned char status;
882 
883 	status = read_status(dev);
884 	if ((status & 0xf8) == 0x80) {
885 		if (net_debug > 2)
886 			printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
887 		nl->connection = PLIP_CN_NONE;
888 		nl->should_relinquish = 0;
889 		netif_start_queue (dev);
890 		enable_parport_interrupts (dev);
891 		ENABLE(dev->irq);
892 		netif_wake_queue (dev);
893 	} else {
894 		nl->is_deferred = 1;
895 		schedule_delayed_work(&nl->deferred, 1);
896 	}
897 
898 	return OK;
899 }
900 
901 /* Handle the parallel port interrupts. */
902 static void
903 plip_interrupt(void *dev_id)
904 {
905 	struct net_device *dev = dev_id;
906 	struct net_local *nl;
907 	struct plip_local *rcv;
908 	unsigned char c0;
909 	unsigned long flags;
910 
911 	nl = netdev_priv(dev);
912 	rcv = &nl->rcv_data;
913 
914 	spin_lock_irqsave (&nl->lock, flags);
915 
916 	c0 = read_status(dev);
917 	if ((c0 & 0xf8) != 0xc0) {
918 		if ((dev->irq != -1) && (net_debug > 1))
919 			printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
920 		spin_unlock_irqrestore (&nl->lock, flags);
921 		return;
922 	}
923 
924 	if (net_debug > 3)
925 		printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
926 
927 	switch (nl->connection) {
928 	case PLIP_CN_CLOSING:
929 		netif_wake_queue (dev);
930 	case PLIP_CN_NONE:
931 	case PLIP_CN_SEND:
932 		rcv->state = PLIP_PK_TRIGGER;
933 		nl->connection = PLIP_CN_RECEIVE;
934 		nl->timeout_count = 0;
935 		schedule_work(&nl->immediate);
936 		break;
937 
938 	case PLIP_CN_RECEIVE:
939 		/* May occur because there is race condition
940 		   around test and set of dev->interrupt.
941 		   Ignore this interrupt. */
942 		break;
943 
944 	case PLIP_CN_ERROR:
945 		printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
946 		break;
947 	}
948 
949 	spin_unlock_irqrestore(&nl->lock, flags);
950 }
951 
952 static int
953 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
954 {
955 	struct net_local *nl = netdev_priv(dev);
956 	struct plip_local *snd = &nl->snd_data;
957 
958 	if (netif_queue_stopped(dev))
959 		return NETDEV_TX_BUSY;
960 
961 	/* We may need to grab the bus */
962 	if (!nl->port_owner) {
963 		if (parport_claim(nl->pardev))
964 			return NETDEV_TX_BUSY;
965 		nl->port_owner = 1;
966 	}
967 
968 	netif_stop_queue (dev);
969 
970 	if (skb->len > dev->mtu + dev->hard_header_len) {
971 		printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
972 		netif_start_queue (dev);
973 		return NETDEV_TX_BUSY;
974 	}
975 
976 	if (net_debug > 2)
977 		printk(KERN_DEBUG "%s: send request\n", dev->name);
978 
979 	spin_lock_irq(&nl->lock);
980 	snd->skb = skb;
981 	snd->length.h = skb->len;
982 	snd->state = PLIP_PK_TRIGGER;
983 	if (nl->connection == PLIP_CN_NONE) {
984 		nl->connection = PLIP_CN_SEND;
985 		nl->timeout_count = 0;
986 	}
987 	schedule_work(&nl->immediate);
988 	spin_unlock_irq(&nl->lock);
989 
990 	return NETDEV_TX_OK;
991 }
992 
993 static void
994 plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
995 {
996 	const struct in_device *in_dev;
997 
998 	rcu_read_lock();
999 	in_dev = __in_dev_get_rcu(dev);
1000 	if (in_dev) {
1001 		/* Any address will do - we take the first */
1002 		const struct in_ifaddr *ifa = in_dev->ifa_list;
1003 		if (ifa) {
1004 			memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1005 			memset(eth->h_dest, 0xfc, 2);
1006 			memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1007 		}
1008 	}
1009 	rcu_read_unlock();
1010 }
1011 
1012 static int
1013 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1014 		 unsigned short type, const void *daddr,
1015 		 const void *saddr, unsigned len)
1016 {
1017 	int ret;
1018 
1019 	ret = eth_header(skb, dev, type, daddr, saddr, len);
1020 	if (ret >= 0)
1021 		plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1022 
1023 	return ret;
1024 }
1025 
1026 static int plip_hard_header_cache(const struct neighbour *neigh,
1027 				  struct hh_cache *hh, __be16 type)
1028 {
1029 	int ret;
1030 
1031 	ret = eth_header_cache(neigh, hh, type);
1032 	if (ret == 0) {
1033 		struct ethhdr *eth;
1034 
1035 		eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1036 				       HH_DATA_OFF(sizeof(*eth)));
1037 		plip_rewrite_address (neigh->dev, eth);
1038 	}
1039 
1040 	return ret;
1041 }
1042 
1043 /* Open/initialize the board.  This is called (in the current kernel)
1044    sometime after booting when the 'ifconfig' program is run.
1045 
1046    This routine gets exclusive access to the parallel port by allocating
1047    its IRQ line.
1048  */
1049 static int
1050 plip_open(struct net_device *dev)
1051 {
1052 	struct net_local *nl = netdev_priv(dev);
1053 	struct in_device *in_dev;
1054 
1055 	/* Grab the port */
1056 	if (!nl->port_owner) {
1057 		if (parport_claim(nl->pardev)) return -EAGAIN;
1058 		nl->port_owner = 1;
1059 	}
1060 
1061 	nl->should_relinquish = 0;
1062 
1063 	/* Clear the data port. */
1064 	write_data (dev, 0x00);
1065 
1066 	/* Enable rx interrupt. */
1067 	enable_parport_interrupts (dev);
1068 	if (dev->irq == -1)
1069 	{
1070 		atomic_set (&nl->kill_timer, 0);
1071 		schedule_delayed_work(&nl->timer, 1);
1072 	}
1073 
1074 	/* Initialize the state machine. */
1075 	nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1076 	nl->rcv_data.skb = nl->snd_data.skb = NULL;
1077 	nl->connection = PLIP_CN_NONE;
1078 	nl->is_deferred = 0;
1079 
1080 	/* Fill in the MAC-level header.
1081 	   We used to abuse dev->broadcast to store the point-to-point
1082 	   MAC address, but we no longer do it. Instead, we fetch the
1083 	   interface address whenever it is needed, which is cheap enough
1084 	   because we use the hh_cache. Actually, abusing dev->broadcast
1085 	   didn't work, because when using plip_open the point-to-point
1086 	   address isn't yet known.
1087 	   PLIP doesn't have a real MAC address, but we need it to be
1088 	   DOS compatible, and to properly support taps (otherwise,
1089 	   when the device address isn't identical to the address of a
1090 	   received frame, the kernel incorrectly drops it).             */
1091 
1092 	in_dev=__in_dev_get_rtnl(dev);
1093 	if (in_dev) {
1094 		/* Any address will do - we take the first. We already
1095 		   have the first two bytes filled with 0xfc, from
1096 		   plip_init_dev(). */
1097 		struct in_ifaddr *ifa=in_dev->ifa_list;
1098 		if (ifa != NULL) {
1099 			memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1100 		}
1101 	}
1102 
1103 	netif_start_queue (dev);
1104 
1105 	return 0;
1106 }
1107 
1108 /* The inverse routine to plip_open (). */
1109 static int
1110 plip_close(struct net_device *dev)
1111 {
1112 	struct net_local *nl = netdev_priv(dev);
1113 	struct plip_local *snd = &nl->snd_data;
1114 	struct plip_local *rcv = &nl->rcv_data;
1115 
1116 	netif_stop_queue (dev);
1117 	DISABLE(dev->irq);
1118 	synchronize_irq(dev->irq);
1119 
1120 	if (dev->irq == -1)
1121 	{
1122 		init_completion(&nl->killed_timer_cmp);
1123 		atomic_set (&nl->kill_timer, 1);
1124 		wait_for_completion(&nl->killed_timer_cmp);
1125 	}
1126 
1127 #ifdef NOTDEF
1128 	outb(0x00, PAR_DATA(dev));
1129 #endif
1130 	nl->is_deferred = 0;
1131 	nl->connection = PLIP_CN_NONE;
1132 	if (nl->port_owner) {
1133 		parport_release(nl->pardev);
1134 		nl->port_owner = 0;
1135 	}
1136 
1137 	snd->state = PLIP_PK_DONE;
1138 	if (snd->skb) {
1139 		dev_kfree_skb(snd->skb);
1140 		snd->skb = NULL;
1141 	}
1142 	rcv->state = PLIP_PK_DONE;
1143 	if (rcv->skb) {
1144 		kfree_skb(rcv->skb);
1145 		rcv->skb = NULL;
1146 	}
1147 
1148 #ifdef NOTDEF
1149 	/* Reset. */
1150 	outb(0x00, PAR_CONTROL(dev));
1151 #endif
1152 	return 0;
1153 }
1154 
1155 static int
1156 plip_preempt(void *handle)
1157 {
1158 	struct net_device *dev = (struct net_device *)handle;
1159 	struct net_local *nl = netdev_priv(dev);
1160 
1161 	/* Stand our ground if a datagram is on the wire */
1162 	if (nl->connection != PLIP_CN_NONE) {
1163 		nl->should_relinquish = 1;
1164 		return 1;
1165 	}
1166 
1167 	nl->port_owner = 0;	/* Remember that we released the bus */
1168 	return 0;
1169 }
1170 
1171 static void
1172 plip_wakeup(void *handle)
1173 {
1174 	struct net_device *dev = (struct net_device *)handle;
1175 	struct net_local *nl = netdev_priv(dev);
1176 
1177 	if (nl->port_owner) {
1178 		/* Why are we being woken up? */
1179 		printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1180 		if (!parport_claim(nl->pardev))
1181 			/* bus_owner is already set (but why?) */
1182 			printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1183 		else
1184 			return;
1185 	}
1186 
1187 	if (!(dev->flags & IFF_UP))
1188 		/* Don't need the port when the interface is down */
1189 		return;
1190 
1191 	if (!parport_claim(nl->pardev)) {
1192 		nl->port_owner = 1;
1193 		/* Clear the data port. */
1194 		write_data (dev, 0x00);
1195 	}
1196 }
1197 
1198 static int
1199 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1200 {
1201 	struct net_local *nl = netdev_priv(dev);
1202 	struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1203 
1204 	if (cmd != SIOCDEVPLIP)
1205 		return -EOPNOTSUPP;
1206 
1207 	switch(pc->pcmd) {
1208 	case PLIP_GET_TIMEOUT:
1209 		pc->trigger = nl->trigger;
1210 		pc->nibble  = nl->nibble;
1211 		break;
1212 	case PLIP_SET_TIMEOUT:
1213 		if(!capable(CAP_NET_ADMIN))
1214 			return -EPERM;
1215 		nl->trigger = pc->trigger;
1216 		nl->nibble  = pc->nibble;
1217 		break;
1218 	default:
1219 		return -EOPNOTSUPP;
1220 	}
1221 	return 0;
1222 }
1223 
1224 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1225 static int timid;
1226 
1227 module_param_array(parport, int, NULL, 0);
1228 module_param(timid, int, 0);
1229 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1230 
1231 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1232 
1233 static inline int
1234 plip_searchfor(int list[], int a)
1235 {
1236 	int i;
1237 	for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1238 		if (list[i] == a) return 1;
1239 	}
1240 	return 0;
1241 }
1242 
1243 /* plip_attach() is called (by the parport code) when a port is
1244  * available to use. */
1245 static void plip_attach (struct parport *port)
1246 {
1247 	static int unit;
1248 	struct net_device *dev;
1249 	struct net_local *nl;
1250 	char name[IFNAMSIZ];
1251 	struct pardev_cb plip_cb;
1252 
1253 	if ((parport[0] == -1 && (!timid || !port->devices)) ||
1254 	    plip_searchfor(parport, port->number)) {
1255 		if (unit == PLIP_MAX) {
1256 			printk(KERN_ERR "plip: too many devices\n");
1257 			return;
1258 		}
1259 
1260 		sprintf(name, "plip%d", unit);
1261 		dev = alloc_etherdev(sizeof(struct net_local));
1262 		if (!dev)
1263 			return;
1264 
1265 		strcpy(dev->name, name);
1266 
1267 		dev->irq = port->irq;
1268 		dev->base_addr = port->base;
1269 		if (port->irq == -1) {
1270 			printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1271 		                 "which is fairly inefficient!\n", port->name);
1272 		}
1273 
1274 		nl = netdev_priv(dev);
1275 		nl->dev = dev;
1276 
1277 		memset(&plip_cb, 0, sizeof(plip_cb));
1278 		plip_cb.private = dev;
1279 		plip_cb.preempt = plip_preempt;
1280 		plip_cb.wakeup = plip_wakeup;
1281 		plip_cb.irq_func = plip_interrupt;
1282 
1283 		nl->pardev = parport_register_dev_model(port, dev->name,
1284 							&plip_cb, unit);
1285 
1286 		if (!nl->pardev) {
1287 			printk(KERN_ERR "%s: parport_register failed\n", name);
1288 			goto err_free_dev;
1289 		}
1290 
1291 		plip_init_netdev(dev);
1292 
1293 		if (register_netdev(dev)) {
1294 			printk(KERN_ERR "%s: network register failed\n", name);
1295 			goto err_parport_unregister;
1296 		}
1297 
1298 		printk(KERN_INFO "%s", version);
1299 		if (dev->irq != -1)
1300 			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1301 					 "using IRQ %d.\n",
1302 				         dev->name, dev->base_addr, dev->irq);
1303 		else
1304 			printk(KERN_INFO "%s: Parallel port at %#3lx, "
1305 					 "not using IRQ.\n",
1306 					 dev->name, dev->base_addr);
1307 		dev_plip[unit++] = dev;
1308 	}
1309 	return;
1310 
1311 err_parport_unregister:
1312 	parport_unregister_device(nl->pardev);
1313 err_free_dev:
1314 	free_netdev(dev);
1315 }
1316 
1317 /* plip_detach() is called (by the parport code) when a port is
1318  * no longer available to use. */
1319 static void plip_detach (struct parport *port)
1320 {
1321 	/* Nothing to do */
1322 }
1323 
1324 static int plip_probe(struct pardevice *par_dev)
1325 {
1326 	struct device_driver *drv = par_dev->dev.driver;
1327 	int len = strlen(drv->name);
1328 
1329 	if (strncmp(par_dev->name, drv->name, len))
1330 		return -ENODEV;
1331 
1332 	return 0;
1333 }
1334 
1335 static struct parport_driver plip_driver = {
1336 	.name		= "plip",
1337 	.probe		= plip_probe,
1338 	.match_port	= plip_attach,
1339 	.detach		= plip_detach,
1340 	.devmodel	= true,
1341 };
1342 
1343 static void __exit plip_cleanup_module (void)
1344 {
1345 	struct net_device *dev;
1346 	int i;
1347 
1348 	for (i=0; i < PLIP_MAX; i++) {
1349 		if ((dev = dev_plip[i])) {
1350 			struct net_local *nl = netdev_priv(dev);
1351 			unregister_netdev(dev);
1352 			if (nl->port_owner)
1353 				parport_release(nl->pardev);
1354 			parport_unregister_device(nl->pardev);
1355 			free_netdev(dev);
1356 			dev_plip[i] = NULL;
1357 		}
1358 	}
1359 
1360 	parport_unregister_driver(&plip_driver);
1361 }
1362 
1363 #ifndef MODULE
1364 
1365 static int parport_ptr;
1366 
1367 static int __init plip_setup(char *str)
1368 {
1369 	int ints[4];
1370 
1371 	str = get_options(str, ARRAY_SIZE(ints), ints);
1372 
1373 	/* Ugh. */
1374 	if (!strncmp(str, "parport", 7)) {
1375 		int n = simple_strtoul(str+7, NULL, 10);
1376 		if (parport_ptr < PLIP_MAX)
1377 			parport[parport_ptr++] = n;
1378 		else
1379 			printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1380 			       str);
1381 	} else if (!strcmp(str, "timid")) {
1382 		timid = 1;
1383 	} else {
1384 		if (ints[0] == 0 || ints[1] == 0) {
1385 			/* disable driver on "plip=" or "plip=0" */
1386 			parport[0] = -2;
1387 		} else {
1388 			printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1389 			       ints[1]);
1390 		}
1391 	}
1392 	return 1;
1393 }
1394 
1395 __setup("plip=", plip_setup);
1396 
1397 #endif /* !MODULE */
1398 
1399 static int __init plip_init (void)
1400 {
1401 	if (parport[0] == -2)
1402 		return 0;
1403 
1404 	if (parport[0] != -1 && timid) {
1405 		printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1406 		timid = 0;
1407 	}
1408 
1409 	if (parport_register_driver (&plip_driver)) {
1410 		printk (KERN_WARNING "plip: couldn't register driver\n");
1411 		return 1;
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 module_init(plip_init);
1418 module_exit(plip_cleanup_module);
1419 MODULE_LICENSE("GPL");
1420