xref: /openbmc/linux/drivers/s390/net/lcs.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  *  Linux for S/390 Lan Channel Station Network Driver
3  *
4  *  Copyright IBM Corp. 1999, 2009
5  *  Author(s): Original Code written by
6  *			DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
7  *	       Rewritten by
8  *			Frank Pavlic <fpavlic@de.ibm.com> and
9  *			Martin Schwidefsky <schwidefsky@de.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 #define KMSG_COMPONENT		"lcs"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 
29 #include <linux/module.h>
30 #include <linux/if.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/trdevice.h>
34 #include <linux/fddidevice.h>
35 #include <linux/inetdevice.h>
36 #include <linux/in.h>
37 #include <linux/igmp.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/slab.h>
41 #include <net/arp.h>
42 #include <net/ip.h>
43 
44 #include <asm/debug.h>
45 #include <asm/idals.h>
46 #include <asm/timex.h>
47 #include <linux/device.h>
48 #include <asm/ccwgroup.h>
49 
50 #include "lcs.h"
51 
52 
53 #if !defined(CONFIG_NET_ETHERNET) && \
54     !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
55 #error Cannot compile lcs.c without some net devices switched on.
56 #endif
57 
58 /**
59  * initialization string for output
60  */
61 
62 static char version[] __initdata = "LCS driver";
63 
64 /**
65   * the root device for lcs group devices
66   */
67 static struct device *lcs_root_dev;
68 
69 /**
70  * Some prototypes.
71  */
72 static void lcs_tasklet(unsigned long);
73 static void lcs_start_kernel_thread(struct work_struct *);
74 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
75 #ifdef CONFIG_IP_MULTICAST
76 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
77 #endif /* CONFIG_IP_MULTICAST */
78 static int lcs_recovery(void *ptr);
79 
80 /**
81  * Debug Facility Stuff
82  */
83 static char debug_buffer[255];
84 static debug_info_t *lcs_dbf_setup;
85 static debug_info_t *lcs_dbf_trace;
86 
87 /**
88  *  LCS Debug Facility functions
89  */
90 static void
91 lcs_unregister_debug_facility(void)
92 {
93 	if (lcs_dbf_setup)
94 		debug_unregister(lcs_dbf_setup);
95 	if (lcs_dbf_trace)
96 		debug_unregister(lcs_dbf_trace);
97 }
98 
99 static int
100 lcs_register_debug_facility(void)
101 {
102 	lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
103 	lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
104 	if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
105 		pr_err("Not enough memory for debug facility.\n");
106 		lcs_unregister_debug_facility();
107 		return -ENOMEM;
108 	}
109 	debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
110 	debug_set_level(lcs_dbf_setup, 2);
111 	debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
112 	debug_set_level(lcs_dbf_trace, 2);
113 	return 0;
114 }
115 
116 /**
117  * Allocate io buffers.
118  */
119 static int
120 lcs_alloc_channel(struct lcs_channel *channel)
121 {
122 	int cnt;
123 
124 	LCS_DBF_TEXT(2, setup, "ichalloc");
125 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
126 		/* alloc memory fo iobuffer */
127 		channel->iob[cnt].data =
128 			kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
129 		if (channel->iob[cnt].data == NULL)
130 			break;
131 		channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
132 	}
133 	if (cnt < LCS_NUM_BUFFS) {
134 		/* Not all io buffers could be allocated. */
135 		LCS_DBF_TEXT(2, setup, "echalloc");
136 		while (cnt-- > 0)
137 			kfree(channel->iob[cnt].data);
138 		return -ENOMEM;
139 	}
140 	return 0;
141 }
142 
143 /**
144  * Free io buffers.
145  */
146 static void
147 lcs_free_channel(struct lcs_channel *channel)
148 {
149 	int cnt;
150 
151 	LCS_DBF_TEXT(2, setup, "ichfree");
152 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
153 		kfree(channel->iob[cnt].data);
154 		channel->iob[cnt].data = NULL;
155 	}
156 }
157 
158 /*
159  * Cleanup channel.
160  */
161 static void
162 lcs_cleanup_channel(struct lcs_channel *channel)
163 {
164 	LCS_DBF_TEXT(3, setup, "cleanch");
165 	/* Kill write channel tasklets. */
166 	tasklet_kill(&channel->irq_tasklet);
167 	/* Free channel buffers. */
168 	lcs_free_channel(channel);
169 }
170 
171 /**
172  * LCS free memory for card and channels.
173  */
174 static void
175 lcs_free_card(struct lcs_card *card)
176 {
177 	LCS_DBF_TEXT(2, setup, "remcard");
178 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
179 	kfree(card);
180 }
181 
182 /**
183  * LCS alloc memory for card and channels
184  */
185 static struct lcs_card *
186 lcs_alloc_card(void)
187 {
188 	struct lcs_card *card;
189 	int rc;
190 
191 	LCS_DBF_TEXT(2, setup, "alloclcs");
192 
193 	card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
194 	if (card == NULL)
195 		return NULL;
196 	card->lan_type = LCS_FRAME_TYPE_AUTO;
197 	card->pkt_seq = 0;
198 	card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
199 	/* Allocate io buffers for the read channel. */
200 	rc = lcs_alloc_channel(&card->read);
201 	if (rc){
202 		LCS_DBF_TEXT(2, setup, "iccwerr");
203 		lcs_free_card(card);
204 		return NULL;
205 	}
206 	/* Allocate io buffers for the write channel. */
207 	rc = lcs_alloc_channel(&card->write);
208 	if (rc) {
209 		LCS_DBF_TEXT(2, setup, "iccwerr");
210 		lcs_cleanup_channel(&card->read);
211 		lcs_free_card(card);
212 		return NULL;
213 	}
214 
215 #ifdef CONFIG_IP_MULTICAST
216 	INIT_LIST_HEAD(&card->ipm_list);
217 #endif
218 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
219 	return card;
220 }
221 
222 /*
223  * Setup read channel.
224  */
225 static void
226 lcs_setup_read_ccws(struct lcs_card *card)
227 {
228 	int cnt;
229 
230 	LCS_DBF_TEXT(2, setup, "ireadccw");
231 	/* Setup read ccws. */
232 	memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
233 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
234 		card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
235 		card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
236 		card->read.ccws[cnt].flags =
237 			CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
238 		/*
239 		 * Note: we have allocated the buffer with GFP_DMA, so
240 		 * we do not need to do set_normalized_cda.
241 		 */
242 		card->read.ccws[cnt].cda =
243 			(__u32) __pa(card->read.iob[cnt].data);
244 		((struct lcs_header *)
245 		 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
246 		card->read.iob[cnt].callback = lcs_get_frames_cb;
247 		card->read.iob[cnt].state = LCS_BUF_STATE_READY;
248 		card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
249 	}
250 	card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
251 	card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
252 	card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
253 	/* Last ccw is a tic (transfer in channel). */
254 	card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
255 	card->read.ccws[LCS_NUM_BUFFS].cda =
256 		(__u32) __pa(card->read.ccws);
257 	/* Setg initial state of the read channel. */
258 	card->read.state = LCS_CH_STATE_INIT;
259 
260 	card->read.io_idx = 0;
261 	card->read.buf_idx = 0;
262 }
263 
264 static void
265 lcs_setup_read(struct lcs_card *card)
266 {
267 	LCS_DBF_TEXT(3, setup, "initread");
268 
269 	lcs_setup_read_ccws(card);
270 	/* Initialize read channel tasklet. */
271 	card->read.irq_tasklet.data = (unsigned long) &card->read;
272 	card->read.irq_tasklet.func = lcs_tasklet;
273 	/* Initialize waitqueue. */
274 	init_waitqueue_head(&card->read.wait_q);
275 }
276 
277 /*
278  * Setup write channel.
279  */
280 static void
281 lcs_setup_write_ccws(struct lcs_card *card)
282 {
283 	int cnt;
284 
285 	LCS_DBF_TEXT(3, setup, "iwritccw");
286 	/* Setup write ccws. */
287 	memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
288 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
289 		card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
290 		card->write.ccws[cnt].count = 0;
291 		card->write.ccws[cnt].flags =
292 			CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
293 		/*
294 		 * Note: we have allocated the buffer with GFP_DMA, so
295 		 * we do not need to do set_normalized_cda.
296 		 */
297 		card->write.ccws[cnt].cda =
298 			(__u32) __pa(card->write.iob[cnt].data);
299 	}
300 	/* Last ccw is a tic (transfer in channel). */
301 	card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
302 	card->write.ccws[LCS_NUM_BUFFS].cda =
303 		(__u32) __pa(card->write.ccws);
304 	/* Set initial state of the write channel. */
305 	card->read.state = LCS_CH_STATE_INIT;
306 
307 	card->write.io_idx = 0;
308 	card->write.buf_idx = 0;
309 }
310 
311 static void
312 lcs_setup_write(struct lcs_card *card)
313 {
314 	LCS_DBF_TEXT(3, setup, "initwrit");
315 
316 	lcs_setup_write_ccws(card);
317 	/* Initialize write channel tasklet. */
318 	card->write.irq_tasklet.data = (unsigned long) &card->write;
319 	card->write.irq_tasklet.func = lcs_tasklet;
320 	/* Initialize waitqueue. */
321 	init_waitqueue_head(&card->write.wait_q);
322 }
323 
324 static void
325 lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
326 {
327 	unsigned long flags;
328 
329 	spin_lock_irqsave(&card->mask_lock, flags);
330 	card->thread_allowed_mask = threads;
331 	spin_unlock_irqrestore(&card->mask_lock, flags);
332 	wake_up(&card->wait_q);
333 }
334 static inline int
335 lcs_threads_running(struct lcs_card *card, unsigned long threads)
336 {
337         unsigned long flags;
338         int rc = 0;
339 
340 	spin_lock_irqsave(&card->mask_lock, flags);
341         rc = (card->thread_running_mask & threads);
342 	spin_unlock_irqrestore(&card->mask_lock, flags);
343         return rc;
344 }
345 
346 static int
347 lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
348 {
349         return wait_event_interruptible(card->wait_q,
350                         lcs_threads_running(card, threads) == 0);
351 }
352 
353 static inline int
354 lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
355 {
356         unsigned long flags;
357 
358 	spin_lock_irqsave(&card->mask_lock, flags);
359         if ( !(card->thread_allowed_mask & thread) ||
360               (card->thread_start_mask & thread) ) {
361                 spin_unlock_irqrestore(&card->mask_lock, flags);
362                 return -EPERM;
363         }
364         card->thread_start_mask |= thread;
365 	spin_unlock_irqrestore(&card->mask_lock, flags);
366         return 0;
367 }
368 
369 static void
370 lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
371 {
372         unsigned long flags;
373 
374 	spin_lock_irqsave(&card->mask_lock, flags);
375         card->thread_running_mask &= ~thread;
376 	spin_unlock_irqrestore(&card->mask_lock, flags);
377         wake_up(&card->wait_q);
378 }
379 
380 static inline int
381 __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
382 {
383         unsigned long flags;
384         int rc = 0;
385 
386 	spin_lock_irqsave(&card->mask_lock, flags);
387         if (card->thread_start_mask & thread){
388                 if ((card->thread_allowed_mask & thread) &&
389                     !(card->thread_running_mask & thread)){
390                         rc = 1;
391                         card->thread_start_mask &= ~thread;
392                         card->thread_running_mask |= thread;
393                 } else
394                         rc = -EPERM;
395         }
396 	spin_unlock_irqrestore(&card->mask_lock, flags);
397         return rc;
398 }
399 
400 static int
401 lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
402 {
403         int rc = 0;
404         wait_event(card->wait_q,
405                    (rc = __lcs_do_run_thread(card, thread)) >= 0);
406         return rc;
407 }
408 
409 static int
410 lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
411 {
412         unsigned long flags;
413         int rc = 0;
414 
415 	spin_lock_irqsave(&card->mask_lock, flags);
416         LCS_DBF_TEXT_(4, trace, "  %02x%02x%02x",
417                         (u8) card->thread_start_mask,
418                         (u8) card->thread_allowed_mask,
419                         (u8) card->thread_running_mask);
420         rc = (card->thread_start_mask & thread);
421 	spin_unlock_irqrestore(&card->mask_lock, flags);
422         return rc;
423 }
424 
425 /**
426  * Initialize channels,card and state machines.
427  */
428 static void
429 lcs_setup_card(struct lcs_card *card)
430 {
431 	LCS_DBF_TEXT(2, setup, "initcard");
432 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
433 
434 	lcs_setup_read(card);
435 	lcs_setup_write(card);
436 	/* Set cards initial state. */
437 	card->state = DEV_STATE_DOWN;
438 	card->tx_buffer = NULL;
439 	card->tx_emitted = 0;
440 
441 	init_waitqueue_head(&card->wait_q);
442 	spin_lock_init(&card->lock);
443 	spin_lock_init(&card->ipm_lock);
444 	spin_lock_init(&card->mask_lock);
445 #ifdef CONFIG_IP_MULTICAST
446 	INIT_LIST_HEAD(&card->ipm_list);
447 #endif
448 	INIT_LIST_HEAD(&card->lancmd_waiters);
449 }
450 
451 static inline void
452 lcs_clear_multicast_list(struct lcs_card *card)
453 {
454 #ifdef	CONFIG_IP_MULTICAST
455 	struct lcs_ipm_list *ipm;
456 	unsigned long flags;
457 
458 	/* Free multicast list. */
459 	LCS_DBF_TEXT(3, setup, "clmclist");
460 	spin_lock_irqsave(&card->ipm_lock, flags);
461 	while (!list_empty(&card->ipm_list)){
462 		ipm = list_entry(card->ipm_list.next,
463 				 struct lcs_ipm_list, list);
464 		list_del(&ipm->list);
465 		if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
466 			spin_unlock_irqrestore(&card->ipm_lock, flags);
467 			lcs_send_delipm(card, ipm);
468 			spin_lock_irqsave(&card->ipm_lock, flags);
469 		}
470 		kfree(ipm);
471 	}
472 	spin_unlock_irqrestore(&card->ipm_lock, flags);
473 #endif
474 }
475 /**
476  * Cleanup channels,card and state machines.
477  */
478 static void
479 lcs_cleanup_card(struct lcs_card *card)
480 {
481 
482 	LCS_DBF_TEXT(3, setup, "cleancrd");
483 	LCS_DBF_HEX(2,setup,&card,sizeof(void*));
484 
485 	if (card->dev != NULL)
486 		free_netdev(card->dev);
487 	/* Cleanup channels. */
488 	lcs_cleanup_channel(&card->write);
489 	lcs_cleanup_channel(&card->read);
490 }
491 
492 /**
493  * Start channel.
494  */
495 static int
496 lcs_start_channel(struct lcs_channel *channel)
497 {
498 	unsigned long flags;
499 	int rc;
500 
501 	LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
502 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
503 	rc = ccw_device_start(channel->ccwdev,
504 			      channel->ccws + channel->io_idx, 0, 0,
505 			      DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
506 	if (rc == 0)
507 		channel->state = LCS_CH_STATE_RUNNING;
508 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
509 	if (rc) {
510 		LCS_DBF_TEXT_(4,trace,"essh%s",
511 			      dev_name(&channel->ccwdev->dev));
512 		dev_err(&channel->ccwdev->dev,
513 			"Starting an LCS device resulted in an error,"
514 			" rc=%d!\n", rc);
515 	}
516 	return rc;
517 }
518 
519 static int
520 lcs_clear_channel(struct lcs_channel *channel)
521 {
522 	unsigned long flags;
523 	int rc;
524 
525 	LCS_DBF_TEXT(4,trace,"clearch");
526 	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
527 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
528 	rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
529 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
530 	if (rc) {
531 		LCS_DBF_TEXT_(4, trace, "ecsc%s",
532 			      dev_name(&channel->ccwdev->dev));
533 		return rc;
534 	}
535 	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
536 	channel->state = LCS_CH_STATE_STOPPED;
537 	return rc;
538 }
539 
540 
541 /**
542  * Stop channel.
543  */
544 static int
545 lcs_stop_channel(struct lcs_channel *channel)
546 {
547 	unsigned long flags;
548 	int rc;
549 
550 	if (channel->state == LCS_CH_STATE_STOPPED)
551 		return 0;
552 	LCS_DBF_TEXT(4,trace,"haltsch");
553 	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
554 	channel->state = LCS_CH_STATE_INIT;
555 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
556 	rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
557 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
558 	if (rc) {
559 		LCS_DBF_TEXT_(4, trace, "ehsc%s",
560 			      dev_name(&channel->ccwdev->dev));
561 		return rc;
562 	}
563 	/* Asynchronous halt initialted. Wait for its completion. */
564 	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
565 	lcs_clear_channel(channel);
566 	return 0;
567 }
568 
569 /**
570  * start read and write channel
571  */
572 static int
573 lcs_start_channels(struct lcs_card *card)
574 {
575 	int rc;
576 
577 	LCS_DBF_TEXT(2, trace, "chstart");
578 	/* start read channel */
579 	rc = lcs_start_channel(&card->read);
580 	if (rc)
581 		return rc;
582 	/* start write channel */
583 	rc = lcs_start_channel(&card->write);
584 	if (rc)
585 		lcs_stop_channel(&card->read);
586 	return rc;
587 }
588 
589 /**
590  * stop read and write channel
591  */
592 static int
593 lcs_stop_channels(struct lcs_card *card)
594 {
595 	LCS_DBF_TEXT(2, trace, "chhalt");
596 	lcs_stop_channel(&card->read);
597 	lcs_stop_channel(&card->write);
598 	return 0;
599 }
600 
601 /**
602  * Get empty buffer.
603  */
604 static struct lcs_buffer *
605 __lcs_get_buffer(struct lcs_channel *channel)
606 {
607 	int index;
608 
609 	LCS_DBF_TEXT(5, trace, "_getbuff");
610 	index = channel->io_idx;
611 	do {
612 		if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
613 			channel->iob[index].state = LCS_BUF_STATE_LOCKED;
614 			return channel->iob + index;
615 		}
616 		index = (index + 1) & (LCS_NUM_BUFFS - 1);
617 	} while (index != channel->io_idx);
618 	return NULL;
619 }
620 
621 static struct lcs_buffer *
622 lcs_get_buffer(struct lcs_channel *channel)
623 {
624 	struct lcs_buffer *buffer;
625 	unsigned long flags;
626 
627 	LCS_DBF_TEXT(5, trace, "getbuff");
628 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
629 	buffer = __lcs_get_buffer(channel);
630 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
631 	return buffer;
632 }
633 
634 /**
635  * Resume channel program if the channel is suspended.
636  */
637 static int
638 __lcs_resume_channel(struct lcs_channel *channel)
639 {
640 	int rc;
641 
642 	if (channel->state != LCS_CH_STATE_SUSPENDED)
643 		return 0;
644 	if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
645 		return 0;
646 	LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
647 	rc = ccw_device_resume(channel->ccwdev);
648 	if (rc) {
649 		LCS_DBF_TEXT_(4, trace, "ersc%s",
650 			      dev_name(&channel->ccwdev->dev));
651 		dev_err(&channel->ccwdev->dev,
652 			"Sending data from the LCS device to the LAN failed"
653 			" with rc=%d\n",rc);
654 	} else
655 		channel->state = LCS_CH_STATE_RUNNING;
656 	return rc;
657 
658 }
659 
660 /**
661  * Make a buffer ready for processing.
662  */
663 static inline void
664 __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
665 {
666 	int prev, next;
667 
668 	LCS_DBF_TEXT(5, trace, "rdybits");
669 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
670 	next = (index + 1) & (LCS_NUM_BUFFS - 1);
671 	/* Check if we may clear the suspend bit of this buffer. */
672 	if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
673 		/* Check if we have to set the PCI bit. */
674 		if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
675 			/* Suspend bit of the previous buffer is not set. */
676 			channel->ccws[index].flags |= CCW_FLAG_PCI;
677 		/* Suspend bit of the next buffer is set. */
678 		channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
679 	}
680 }
681 
682 static int
683 lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
684 {
685 	unsigned long flags;
686 	int index, rc;
687 
688 	LCS_DBF_TEXT(5, trace, "rdybuff");
689 	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
690 	       buffer->state != LCS_BUF_STATE_PROCESSED);
691 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
692 	buffer->state = LCS_BUF_STATE_READY;
693 	index = buffer - channel->iob;
694 	/* Set length. */
695 	channel->ccws[index].count = buffer->count;
696 	/* Check relevant PCI/suspend bits. */
697 	__lcs_ready_buffer_bits(channel, index);
698 	rc = __lcs_resume_channel(channel);
699 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
700 	return rc;
701 }
702 
703 /**
704  * Mark the buffer as processed. Take care of the suspend bit
705  * of the previous buffer. This function is called from
706  * interrupt context, so the lock must not be taken.
707  */
708 static int
709 __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
710 {
711 	int index, prev, next;
712 
713 	LCS_DBF_TEXT(5, trace, "prcsbuff");
714 	BUG_ON(buffer->state != LCS_BUF_STATE_READY);
715 	buffer->state = LCS_BUF_STATE_PROCESSED;
716 	index = buffer - channel->iob;
717 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
718 	next = (index + 1) & (LCS_NUM_BUFFS - 1);
719 	/* Set the suspend bit and clear the PCI bit of this buffer. */
720 	channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
721 	channel->ccws[index].flags &= ~CCW_FLAG_PCI;
722 	/* Check the suspend bit of the previous buffer. */
723 	if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
724 		/*
725 		 * Previous buffer is in state ready. It might have
726 		 * happened in lcs_ready_buffer that the suspend bit
727 		 * has not been cleared to avoid an endless loop.
728 		 * Do it now.
729 		 */
730 		__lcs_ready_buffer_bits(channel, prev);
731 	}
732 	/* Clear PCI bit of next buffer. */
733 	channel->ccws[next].flags &= ~CCW_FLAG_PCI;
734 	return __lcs_resume_channel(channel);
735 }
736 
737 /**
738  * Put a processed buffer back to state empty.
739  */
740 static void
741 lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
742 {
743 	unsigned long flags;
744 
745 	LCS_DBF_TEXT(5, trace, "relbuff");
746 	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
747 	       buffer->state != LCS_BUF_STATE_PROCESSED);
748 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
749 	buffer->state = LCS_BUF_STATE_EMPTY;
750 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
751 }
752 
753 /**
754  * Get buffer for a lan command.
755  */
756 static struct lcs_buffer *
757 lcs_get_lancmd(struct lcs_card *card, int count)
758 {
759 	struct lcs_buffer *buffer;
760 	struct lcs_cmd *cmd;
761 
762 	LCS_DBF_TEXT(4, trace, "getlncmd");
763 	/* Get buffer and wait if none is available. */
764 	wait_event(card->write.wait_q,
765 		   ((buffer = lcs_get_buffer(&card->write)) != NULL));
766 	count += sizeof(struct lcs_header);
767 	*(__u16 *)(buffer->data + count) = 0;
768 	buffer->count = count + sizeof(__u16);
769 	buffer->callback = lcs_release_buffer;
770 	cmd = (struct lcs_cmd *) buffer->data;
771 	cmd->offset = count;
772 	cmd->type = LCS_FRAME_TYPE_CONTROL;
773 	cmd->slot = 0;
774 	return buffer;
775 }
776 
777 
778 static void
779 lcs_get_reply(struct lcs_reply *reply)
780 {
781 	WARN_ON(atomic_read(&reply->refcnt) <= 0);
782 	atomic_inc(&reply->refcnt);
783 }
784 
785 static void
786 lcs_put_reply(struct lcs_reply *reply)
787 {
788         WARN_ON(atomic_read(&reply->refcnt) <= 0);
789         if (atomic_dec_and_test(&reply->refcnt)) {
790 		kfree(reply);
791 	}
792 
793 }
794 
795 static struct lcs_reply *
796 lcs_alloc_reply(struct lcs_cmd *cmd)
797 {
798 	struct lcs_reply *reply;
799 
800 	LCS_DBF_TEXT(4, trace, "getreply");
801 
802 	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
803 	if (!reply)
804 		return NULL;
805 	atomic_set(&reply->refcnt,1);
806 	reply->sequence_no = cmd->sequence_no;
807 	reply->received = 0;
808 	reply->rc = 0;
809 	init_waitqueue_head(&reply->wait_q);
810 
811 	return reply;
812 }
813 
814 /**
815  * Notifier function for lancmd replies. Called from read irq.
816  */
817 static void
818 lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
819 {
820 	struct list_head *l, *n;
821 	struct lcs_reply *reply;
822 
823 	LCS_DBF_TEXT(4, trace, "notiwait");
824 	spin_lock(&card->lock);
825 	list_for_each_safe(l, n, &card->lancmd_waiters) {
826 		reply = list_entry(l, struct lcs_reply, list);
827 		if (reply->sequence_no == cmd->sequence_no) {
828 			lcs_get_reply(reply);
829 			list_del_init(&reply->list);
830 			if (reply->callback != NULL)
831 				reply->callback(card, cmd);
832 			reply->received = 1;
833 			reply->rc = cmd->return_code;
834 			wake_up(&reply->wait_q);
835 			lcs_put_reply(reply);
836 			break;
837 		}
838 	}
839 	spin_unlock(&card->lock);
840 }
841 
842 /**
843  * Emit buffer of a lan comand.
844  */
845 static void
846 lcs_lancmd_timeout(unsigned long data)
847 {
848 	struct lcs_reply *reply, *list_reply, *r;
849 	unsigned long flags;
850 
851 	LCS_DBF_TEXT(4, trace, "timeout");
852 	reply = (struct lcs_reply *) data;
853 	spin_lock_irqsave(&reply->card->lock, flags);
854 	list_for_each_entry_safe(list_reply, r,
855 				 &reply->card->lancmd_waiters,list) {
856 		if (reply == list_reply) {
857 			lcs_get_reply(reply);
858 			list_del_init(&reply->list);
859 			spin_unlock_irqrestore(&reply->card->lock, flags);
860 			reply->received = 1;
861 			reply->rc = -ETIME;
862 			wake_up(&reply->wait_q);
863 			lcs_put_reply(reply);
864 			return;
865 		}
866 	}
867 	spin_unlock_irqrestore(&reply->card->lock, flags);
868 }
869 
870 static int
871 lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
872 		void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
873 {
874 	struct lcs_reply *reply;
875 	struct lcs_cmd *cmd;
876 	struct timer_list timer;
877 	unsigned long flags;
878 	int rc;
879 
880 	LCS_DBF_TEXT(4, trace, "sendcmd");
881 	cmd = (struct lcs_cmd *) buffer->data;
882 	cmd->return_code = 0;
883 	cmd->sequence_no = card->sequence_no++;
884 	reply = lcs_alloc_reply(cmd);
885 	if (!reply)
886 		return -ENOMEM;
887 	reply->callback = reply_callback;
888 	reply->card = card;
889 	spin_lock_irqsave(&card->lock, flags);
890 	list_add_tail(&reply->list, &card->lancmd_waiters);
891 	spin_unlock_irqrestore(&card->lock, flags);
892 
893 	buffer->callback = lcs_release_buffer;
894 	rc = lcs_ready_buffer(&card->write, buffer);
895 	if (rc)
896 		return rc;
897 	init_timer_on_stack(&timer);
898 	timer.function = lcs_lancmd_timeout;
899 	timer.data = (unsigned long) reply;
900 	timer.expires = jiffies + HZ*card->lancmd_timeout;
901 	add_timer(&timer);
902 	wait_event(reply->wait_q, reply->received);
903 	del_timer_sync(&timer);
904 	LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
905 	rc = reply->rc;
906 	lcs_put_reply(reply);
907 	return rc ? -EIO : 0;
908 }
909 
910 /**
911  * LCS startup command
912  */
913 static int
914 lcs_send_startup(struct lcs_card *card, __u8 initiator)
915 {
916 	struct lcs_buffer *buffer;
917 	struct lcs_cmd *cmd;
918 
919 	LCS_DBF_TEXT(2, trace, "startup");
920 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
921 	cmd = (struct lcs_cmd *) buffer->data;
922 	cmd->cmd_code = LCS_CMD_STARTUP;
923 	cmd->initiator = initiator;
924 	cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
925 	return lcs_send_lancmd(card, buffer, NULL);
926 }
927 
928 /**
929  * LCS shutdown command
930  */
931 static int
932 lcs_send_shutdown(struct lcs_card *card)
933 {
934 	struct lcs_buffer *buffer;
935 	struct lcs_cmd *cmd;
936 
937 	LCS_DBF_TEXT(2, trace, "shutdown");
938 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
939 	cmd = (struct lcs_cmd *) buffer->data;
940 	cmd->cmd_code = LCS_CMD_SHUTDOWN;
941 	cmd->initiator = LCS_INITIATOR_TCPIP;
942 	return lcs_send_lancmd(card, buffer, NULL);
943 }
944 
945 /**
946  * LCS lanstat command
947  */
948 static void
949 __lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
950 {
951 	LCS_DBF_TEXT(2, trace, "statcb");
952 	memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
953 }
954 
955 static int
956 lcs_send_lanstat(struct lcs_card *card)
957 {
958 	struct lcs_buffer *buffer;
959 	struct lcs_cmd *cmd;
960 
961 	LCS_DBF_TEXT(2,trace, "cmdstat");
962 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
963 	cmd = (struct lcs_cmd *) buffer->data;
964 	/* Setup lanstat command. */
965 	cmd->cmd_code = LCS_CMD_LANSTAT;
966 	cmd->initiator = LCS_INITIATOR_TCPIP;
967 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
968 	cmd->cmd.lcs_std_cmd.portno = card->portno;
969 	return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
970 }
971 
972 /**
973  * send stoplan command
974  */
975 static int
976 lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
977 {
978 	struct lcs_buffer *buffer;
979 	struct lcs_cmd *cmd;
980 
981 	LCS_DBF_TEXT(2, trace, "cmdstpln");
982 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
983 	cmd = (struct lcs_cmd *) buffer->data;
984 	cmd->cmd_code = LCS_CMD_STOPLAN;
985 	cmd->initiator = initiator;
986 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
987 	cmd->cmd.lcs_std_cmd.portno = card->portno;
988 	return lcs_send_lancmd(card, buffer, NULL);
989 }
990 
991 /**
992  * send startlan command
993  */
994 static void
995 __lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
996 {
997 	LCS_DBF_TEXT(2, trace, "srtlancb");
998 	card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
999 	card->portno = cmd->cmd.lcs_std_cmd.portno;
1000 }
1001 
1002 static int
1003 lcs_send_startlan(struct lcs_card *card, __u8 initiator)
1004 {
1005 	struct lcs_buffer *buffer;
1006 	struct lcs_cmd *cmd;
1007 
1008 	LCS_DBF_TEXT(2, trace, "cmdstaln");
1009 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1010 	cmd = (struct lcs_cmd *) buffer->data;
1011 	cmd->cmd_code = LCS_CMD_STARTLAN;
1012 	cmd->initiator = initiator;
1013 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
1014 	cmd->cmd.lcs_std_cmd.portno = card->portno;
1015 	return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
1016 }
1017 
1018 #ifdef CONFIG_IP_MULTICAST
1019 /**
1020  * send setipm command (Multicast)
1021  */
1022 static int
1023 lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1024 {
1025 	struct lcs_buffer *buffer;
1026 	struct lcs_cmd *cmd;
1027 
1028 	LCS_DBF_TEXT(2, trace, "cmdsetim");
1029 	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1030 	cmd = (struct lcs_cmd *) buffer->data;
1031 	cmd->cmd_code = LCS_CMD_SETIPM;
1032 	cmd->initiator = LCS_INITIATOR_TCPIP;
1033 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1034 	cmd->cmd.lcs_qipassist.portno = card->portno;
1035 	cmd->cmd.lcs_qipassist.version = 4;
1036 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1037 	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1038 	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1039 	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1040 	return lcs_send_lancmd(card, buffer, NULL);
1041 }
1042 
1043 /**
1044  * send delipm command (Multicast)
1045  */
1046 static int
1047 lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1048 {
1049 	struct lcs_buffer *buffer;
1050 	struct lcs_cmd *cmd;
1051 
1052 	LCS_DBF_TEXT(2, trace, "cmddelim");
1053 	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1054 	cmd = (struct lcs_cmd *) buffer->data;
1055 	cmd->cmd_code = LCS_CMD_DELIPM;
1056 	cmd->initiator = LCS_INITIATOR_TCPIP;
1057 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1058 	cmd->cmd.lcs_qipassist.portno = card->portno;
1059 	cmd->cmd.lcs_qipassist.version = 4;
1060 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1061 	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1062 	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1063 	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1064 	return lcs_send_lancmd(card, buffer, NULL);
1065 }
1066 
1067 /**
1068  * check if multicast is supported by LCS
1069  */
1070 static void
1071 __lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
1072 {
1073 	LCS_DBF_TEXT(2, trace, "chkmccb");
1074 	card->ip_assists_supported =
1075 		cmd->cmd.lcs_qipassist.ip_assists_supported;
1076 	card->ip_assists_enabled =
1077 		cmd->cmd.lcs_qipassist.ip_assists_enabled;
1078 }
1079 
1080 static int
1081 lcs_check_multicast_support(struct lcs_card *card)
1082 {
1083 	struct lcs_buffer *buffer;
1084 	struct lcs_cmd *cmd;
1085 	int rc;
1086 
1087 	LCS_DBF_TEXT(2, trace, "cmdqipa");
1088 	/* Send query ipassist. */
1089 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1090 	cmd = (struct lcs_cmd *) buffer->data;
1091 	cmd->cmd_code = LCS_CMD_QIPASSIST;
1092 	cmd->initiator = LCS_INITIATOR_TCPIP;
1093 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1094 	cmd->cmd.lcs_qipassist.portno = card->portno;
1095 	cmd->cmd.lcs_qipassist.version = 4;
1096 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1097 	rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
1098 	if (rc != 0) {
1099 		pr_err("Query IPAssist failed. Assuming unsupported!\n");
1100 		return -EOPNOTSUPP;
1101 	}
1102 	if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
1103 		return 0;
1104 	return -EOPNOTSUPP;
1105 }
1106 
1107 /**
1108  * set or del multicast address on LCS card
1109  */
1110 static void
1111 lcs_fix_multicast_list(struct lcs_card *card)
1112 {
1113 	struct list_head failed_list;
1114 	struct lcs_ipm_list *ipm, *tmp;
1115 	unsigned long flags;
1116 	int rc;
1117 
1118 	LCS_DBF_TEXT(4,trace, "fixipm");
1119 	INIT_LIST_HEAD(&failed_list);
1120 	spin_lock_irqsave(&card->ipm_lock, flags);
1121 list_modified:
1122 	list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
1123 		switch (ipm->ipm_state) {
1124 		case LCS_IPM_STATE_SET_REQUIRED:
1125 			/* del from ipm_list so noone else can tamper with
1126 			 * this entry */
1127 			list_del_init(&ipm->list);
1128 			spin_unlock_irqrestore(&card->ipm_lock, flags);
1129 			rc = lcs_send_setipm(card, ipm);
1130 			spin_lock_irqsave(&card->ipm_lock, flags);
1131 			if (rc) {
1132 				pr_info("Adding multicast address failed."
1133 					" Table possibly full!\n");
1134 				/* store ipm in failed list -> will be added
1135 				 * to ipm_list again, so a retry will be done
1136 				 * during the next call of this function */
1137 				list_add_tail(&ipm->list, &failed_list);
1138 			} else {
1139 				ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
1140 				/* re-insert into ipm_list */
1141 				list_add_tail(&ipm->list, &card->ipm_list);
1142 			}
1143 			goto list_modified;
1144 		case LCS_IPM_STATE_DEL_REQUIRED:
1145 			list_del(&ipm->list);
1146 			spin_unlock_irqrestore(&card->ipm_lock, flags);
1147 			lcs_send_delipm(card, ipm);
1148 			spin_lock_irqsave(&card->ipm_lock, flags);
1149 			kfree(ipm);
1150 			goto list_modified;
1151 		case LCS_IPM_STATE_ON_CARD:
1152 			break;
1153 		}
1154 	}
1155 	/* re-insert all entries from the failed_list into ipm_list */
1156 	list_for_each_entry_safe(ipm, tmp, &failed_list, list)
1157 		list_move_tail(&ipm->list, &card->ipm_list);
1158 
1159 	spin_unlock_irqrestore(&card->ipm_lock, flags);
1160 }
1161 
1162 /**
1163  * get mac address for the relevant Multicast address
1164  */
1165 static void
1166 lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
1167 {
1168 	LCS_DBF_TEXT(4,trace, "getmac");
1169 	if (dev->type == ARPHRD_IEEE802_TR)
1170 		ip_tr_mc_map(ipm, mac);
1171 	else
1172 		ip_eth_mc_map(ipm, mac);
1173 }
1174 
1175 /**
1176  * function called by net device to handle multicast address relevant things
1177  */
1178 static inline void
1179 lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1180 {
1181 	struct ip_mc_list *im4;
1182 	struct list_head *l;
1183 	struct lcs_ipm_list *ipm;
1184 	unsigned long flags;
1185 	char buf[MAX_ADDR_LEN];
1186 
1187 	LCS_DBF_TEXT(4, trace, "remmclst");
1188 	spin_lock_irqsave(&card->ipm_lock, flags);
1189 	list_for_each(l, &card->ipm_list) {
1190 		ipm = list_entry(l, struct lcs_ipm_list, list);
1191 		for (im4 = rcu_dereference(in4_dev->mc_list);
1192 		     im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
1193 			lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1194 			if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1195 			     (memcmp(buf, &ipm->ipm.mac_addr,
1196 				     LCS_MAC_LENGTH) == 0) )
1197 				break;
1198 		}
1199 		if (im4 == NULL)
1200 			ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
1201 	}
1202 	spin_unlock_irqrestore(&card->ipm_lock, flags);
1203 }
1204 
1205 static inline struct lcs_ipm_list *
1206 lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
1207 {
1208 	struct lcs_ipm_list *tmp, *ipm = NULL;
1209 	struct list_head *l;
1210 	unsigned long flags;
1211 
1212 	LCS_DBF_TEXT(4, trace, "chkmcent");
1213 	spin_lock_irqsave(&card->ipm_lock, flags);
1214 	list_for_each(l, &card->ipm_list) {
1215 		tmp = list_entry(l, struct lcs_ipm_list, list);
1216 		if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
1217 		     (memcmp(buf, &tmp->ipm.mac_addr,
1218 			     LCS_MAC_LENGTH) == 0) ) {
1219 			ipm = tmp;
1220 			break;
1221 		}
1222 	}
1223 	spin_unlock_irqrestore(&card->ipm_lock, flags);
1224 	return ipm;
1225 }
1226 
1227 static inline void
1228 lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1229 {
1230 
1231 	struct ip_mc_list *im4;
1232 	struct lcs_ipm_list *ipm;
1233 	char buf[MAX_ADDR_LEN];
1234 	unsigned long flags;
1235 
1236 	LCS_DBF_TEXT(4, trace, "setmclst");
1237 	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1238 	     im4 = rcu_dereference(im4->next_rcu)) {
1239 		lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1240 		ipm = lcs_check_addr_entry(card, im4, buf);
1241 		if (ipm != NULL)
1242 			continue;	/* Address already in list. */
1243 		ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
1244 		if (ipm == NULL) {
1245 			pr_info("Not enough memory to add"
1246 				" new multicast entry!\n");
1247 			break;
1248 		}
1249 		memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
1250 		ipm->ipm.ip_addr = im4->multiaddr;
1251 		ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
1252 		spin_lock_irqsave(&card->ipm_lock, flags);
1253 		LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
1254 		list_add(&ipm->list, &card->ipm_list);
1255 		spin_unlock_irqrestore(&card->ipm_lock, flags);
1256 	}
1257 }
1258 
1259 static int
1260 lcs_register_mc_addresses(void *data)
1261 {
1262 	struct lcs_card *card;
1263 	struct in_device *in4_dev;
1264 
1265 	card = (struct lcs_card *) data;
1266 
1267 	if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
1268 		return 0;
1269 	LCS_DBF_TEXT(4, trace, "regmulti");
1270 
1271 	in4_dev = in_dev_get(card->dev);
1272 	if (in4_dev == NULL)
1273 		goto out;
1274 	rcu_read_lock();
1275 	lcs_remove_mc_addresses(card,in4_dev);
1276 	lcs_set_mc_addresses(card, in4_dev);
1277 	rcu_read_unlock();
1278 	in_dev_put(in4_dev);
1279 
1280 	netif_carrier_off(card->dev);
1281 	netif_tx_disable(card->dev);
1282 	wait_event(card->write.wait_q,
1283 			(card->write.state != LCS_CH_STATE_RUNNING));
1284 	lcs_fix_multicast_list(card);
1285 	if (card->state == DEV_STATE_UP) {
1286 		netif_carrier_on(card->dev);
1287 		netif_wake_queue(card->dev);
1288 	}
1289 out:
1290 	lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1291 	return 0;
1292 }
1293 #endif /* CONFIG_IP_MULTICAST */
1294 
1295 /**
1296  * function called by net device to
1297  * handle multicast address relevant things
1298  */
1299 static void
1300 lcs_set_multicast_list(struct net_device *dev)
1301 {
1302 #ifdef CONFIG_IP_MULTICAST
1303         struct lcs_card *card;
1304 
1305         LCS_DBF_TEXT(4, trace, "setmulti");
1306         card = (struct lcs_card *) dev->ml_priv;
1307 
1308         if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
1309 		schedule_work(&card->kernel_thread_starter);
1310 #endif /* CONFIG_IP_MULTICAST */
1311 }
1312 
1313 static long
1314 lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1315 {
1316 	if (!IS_ERR(irb))
1317 		return 0;
1318 
1319 	switch (PTR_ERR(irb)) {
1320 	case -EIO:
1321 		dev_warn(&cdev->dev,
1322 			"An I/O-error occurred on the LCS device\n");
1323 		LCS_DBF_TEXT(2, trace, "ckirberr");
1324 		LCS_DBF_TEXT_(2, trace, "  rc%d", -EIO);
1325 		break;
1326 	case -ETIMEDOUT:
1327 		dev_warn(&cdev->dev,
1328 			"A command timed out on the LCS device\n");
1329 		LCS_DBF_TEXT(2, trace, "ckirberr");
1330 		LCS_DBF_TEXT_(2, trace, "  rc%d", -ETIMEDOUT);
1331 		break;
1332 	default:
1333 		dev_warn(&cdev->dev,
1334 			"An error occurred on the LCS device, rc=%ld\n",
1335 			PTR_ERR(irb));
1336 		LCS_DBF_TEXT(2, trace, "ckirberr");
1337 		LCS_DBF_TEXT(2, trace, "  rc???");
1338 	}
1339 	return PTR_ERR(irb);
1340 }
1341 
1342 static int
1343 lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1344 {
1345 	int dstat, cstat;
1346 	char *sense;
1347 
1348 	sense = (char *) irb->ecw;
1349 	cstat = irb->scsw.cmd.cstat;
1350 	dstat = irb->scsw.cmd.dstat;
1351 
1352 	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1353 		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1354 		     SCHN_STAT_PROT_CHECK   | SCHN_STAT_PROG_CHECK)) {
1355 		LCS_DBF_TEXT(2, trace, "CGENCHK");
1356 		return 1;
1357 	}
1358 	if (dstat & DEV_STAT_UNIT_CHECK) {
1359 		if (sense[LCS_SENSE_BYTE_1] &
1360 		    LCS_SENSE_RESETTING_EVENT) {
1361 			LCS_DBF_TEXT(2, trace, "REVIND");
1362 			return 1;
1363 		}
1364 		if (sense[LCS_SENSE_BYTE_0] &
1365 		    LCS_SENSE_CMD_REJECT) {
1366 			LCS_DBF_TEXT(2, trace, "CMDREJ");
1367 			return 0;
1368 		}
1369 		if ((!sense[LCS_SENSE_BYTE_0]) &&
1370 		    (!sense[LCS_SENSE_BYTE_1]) &&
1371 		    (!sense[LCS_SENSE_BYTE_2]) &&
1372 		    (!sense[LCS_SENSE_BYTE_3])) {
1373 			LCS_DBF_TEXT(2, trace, "ZEROSEN");
1374 			return 0;
1375 		}
1376 		LCS_DBF_TEXT(2, trace, "DGENCHK");
1377 		return 1;
1378 	}
1379 	return 0;
1380 }
1381 
1382 static void
1383 lcs_schedule_recovery(struct lcs_card *card)
1384 {
1385 	LCS_DBF_TEXT(2, trace, "startrec");
1386 	if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
1387 		schedule_work(&card->kernel_thread_starter);
1388 }
1389 
1390 /**
1391  * IRQ Handler for LCS channels
1392  */
1393 static void
1394 lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1395 {
1396 	struct lcs_card *card;
1397 	struct lcs_channel *channel;
1398 	int rc, index;
1399 	int cstat, dstat;
1400 
1401 	if (lcs_check_irb_error(cdev, irb))
1402 		return;
1403 
1404 	card = CARD_FROM_DEV(cdev);
1405 	if (card->read.ccwdev == cdev)
1406 		channel = &card->read;
1407 	else
1408 		channel = &card->write;
1409 
1410 	cstat = irb->scsw.cmd.cstat;
1411 	dstat = irb->scsw.cmd.dstat;
1412 	LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
1413 	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
1414 		      irb->scsw.cmd.dstat);
1415 	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
1416 		      irb->scsw.cmd.actl);
1417 
1418 	/* Check for channel and device errors presented */
1419 	rc = lcs_get_problem(cdev, irb);
1420 	if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
1421 		dev_warn(&cdev->dev,
1422 			"The LCS device stopped because of an error,"
1423 			" dstat=0x%X, cstat=0x%X \n",
1424 			    dstat, cstat);
1425 		if (rc) {
1426 			channel->state = LCS_CH_STATE_ERROR;
1427 		}
1428 	}
1429 	if (channel->state == LCS_CH_STATE_ERROR) {
1430 		lcs_schedule_recovery(card);
1431 		wake_up(&card->wait_q);
1432 		return;
1433 	}
1434 	/* How far in the ccw chain have we processed? */
1435 	if ((channel->state != LCS_CH_STATE_INIT) &&
1436 	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1437 	    (irb->scsw.cmd.cpa != 0)) {
1438 		index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
1439 			- channel->ccws;
1440 		if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
1441 		    (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
1442 			/* Bloody io subsystem tells us lies about cpa... */
1443 			index = (index - 1) & (LCS_NUM_BUFFS - 1);
1444 		while (channel->io_idx != index) {
1445 			__lcs_processed_buffer(channel,
1446 					       channel->iob + channel->io_idx);
1447 			channel->io_idx =
1448 				(channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
1449 		}
1450 	}
1451 
1452 	if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
1453 	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
1454 	    (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
1455 		/* Mark channel as stopped. */
1456 		channel->state = LCS_CH_STATE_STOPPED;
1457 	else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
1458 		/* CCW execution stopped on a suspend bit. */
1459 		channel->state = LCS_CH_STATE_SUSPENDED;
1460 	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1461 		if (irb->scsw.cmd.cc != 0) {
1462 			ccw_device_halt(channel->ccwdev, (addr_t) channel);
1463 			return;
1464 		}
1465 		/* The channel has been stopped by halt_IO. */
1466 		channel->state = LCS_CH_STATE_HALTED;
1467 	}
1468 	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
1469 		channel->state = LCS_CH_STATE_CLEARED;
1470 	/* Do the rest in the tasklet. */
1471 	tasklet_schedule(&channel->irq_tasklet);
1472 }
1473 
1474 /**
1475  * Tasklet for IRQ handler
1476  */
1477 static void
1478 lcs_tasklet(unsigned long data)
1479 {
1480 	unsigned long flags;
1481 	struct lcs_channel *channel;
1482 	struct lcs_buffer *iob;
1483 	int buf_idx;
1484 	int rc;
1485 
1486 	channel = (struct lcs_channel *) data;
1487 	LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
1488 
1489 	/* Check for processed buffers. */
1490 	iob = channel->iob;
1491 	buf_idx = channel->buf_idx;
1492 	while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
1493 		/* Do the callback thing. */
1494 		if (iob[buf_idx].callback != NULL)
1495 			iob[buf_idx].callback(channel, iob + buf_idx);
1496 		buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
1497 	}
1498 	channel->buf_idx = buf_idx;
1499 
1500 	if (channel->state == LCS_CH_STATE_STOPPED)
1501 		// FIXME: what if rc != 0 ??
1502 		rc = lcs_start_channel(channel);
1503 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1504 	if (channel->state == LCS_CH_STATE_SUSPENDED &&
1505 	    channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) {
1506 		// FIXME: what if rc != 0 ??
1507 		rc = __lcs_resume_channel(channel);
1508 	}
1509 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1510 
1511 	/* Something happened on the channel. Wake up waiters. */
1512 	wake_up(&channel->wait_q);
1513 }
1514 
1515 /**
1516  * Finish current tx buffer and make it ready for transmit.
1517  */
1518 static void
1519 __lcs_emit_txbuffer(struct lcs_card *card)
1520 {
1521 	LCS_DBF_TEXT(5, trace, "emittx");
1522 	*(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
1523 	card->tx_buffer->count += 2;
1524 	lcs_ready_buffer(&card->write, card->tx_buffer);
1525 	card->tx_buffer = NULL;
1526 	card->tx_emitted++;
1527 }
1528 
1529 /**
1530  * Callback for finished tx buffers.
1531  */
1532 static void
1533 lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1534 {
1535 	struct lcs_card *card;
1536 
1537 	LCS_DBF_TEXT(5, trace, "txbuffcb");
1538 	/* Put buffer back to pool. */
1539 	lcs_release_buffer(channel, buffer);
1540 	card = container_of(channel, struct lcs_card, write);
1541 	if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
1542 		netif_wake_queue(card->dev);
1543 	spin_lock(&card->lock);
1544 	card->tx_emitted--;
1545 	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1546 		/*
1547 		 * Last running tx buffer has finished. Submit partially
1548 		 * filled current buffer.
1549 		 */
1550 		__lcs_emit_txbuffer(card);
1551 	spin_unlock(&card->lock);
1552 }
1553 
1554 /**
1555  * Packet transmit function called by network stack
1556  */
1557 static int
1558 __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1559 		 struct net_device *dev)
1560 {
1561 	struct lcs_header *header;
1562 	int rc = NETDEV_TX_OK;
1563 
1564 	LCS_DBF_TEXT(5, trace, "hardxmit");
1565 	if (skb == NULL) {
1566 		card->stats.tx_dropped++;
1567 		card->stats.tx_errors++;
1568 		return NETDEV_TX_OK;
1569 	}
1570 	if (card->state != DEV_STATE_UP) {
1571 		dev_kfree_skb(skb);
1572 		card->stats.tx_dropped++;
1573 		card->stats.tx_errors++;
1574 		card->stats.tx_carrier_errors++;
1575 		return NETDEV_TX_OK;
1576 	}
1577 	if (skb->protocol == htons(ETH_P_IPV6)) {
1578 		dev_kfree_skb(skb);
1579 		return NETDEV_TX_OK;
1580 	}
1581 	netif_stop_queue(card->dev);
1582 	spin_lock(&card->lock);
1583 	if (card->tx_buffer != NULL &&
1584 	    card->tx_buffer->count + sizeof(struct lcs_header) +
1585 	    skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
1586 		/* skb too big for current tx buffer. */
1587 		__lcs_emit_txbuffer(card);
1588 	if (card->tx_buffer == NULL) {
1589 		/* Get new tx buffer */
1590 		card->tx_buffer = lcs_get_buffer(&card->write);
1591 		if (card->tx_buffer == NULL) {
1592 			card->stats.tx_dropped++;
1593 			rc = NETDEV_TX_BUSY;
1594 			goto out;
1595 		}
1596 		card->tx_buffer->callback = lcs_txbuffer_cb;
1597 		card->tx_buffer->count = 0;
1598 	}
1599 	header = (struct lcs_header *)
1600 		(card->tx_buffer->data + card->tx_buffer->count);
1601 	card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
1602 	header->offset = card->tx_buffer->count;
1603 	header->type = card->lan_type;
1604 	header->slot = card->portno;
1605 	skb_copy_from_linear_data(skb, header + 1, skb->len);
1606 	spin_unlock(&card->lock);
1607 	card->stats.tx_bytes += skb->len;
1608 	card->stats.tx_packets++;
1609 	dev_kfree_skb(skb);
1610 	netif_wake_queue(card->dev);
1611 	spin_lock(&card->lock);
1612 	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1613 		/* If this is the first tx buffer emit it immediately. */
1614 		__lcs_emit_txbuffer(card);
1615 out:
1616 	spin_unlock(&card->lock);
1617 	return rc;
1618 }
1619 
1620 static int
1621 lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
1622 {
1623 	struct lcs_card *card;
1624 	int rc;
1625 
1626 	LCS_DBF_TEXT(5, trace, "pktxmit");
1627 	card = (struct lcs_card *) dev->ml_priv;
1628 	rc = __lcs_start_xmit(card, skb, dev);
1629 	return rc;
1630 }
1631 
1632 /**
1633  * send startlan and lanstat command to make LCS device ready
1634  */
1635 static int
1636 lcs_startlan_auto(struct lcs_card *card)
1637 {
1638 	int rc;
1639 
1640 	LCS_DBF_TEXT(2, trace, "strtauto");
1641 #ifdef CONFIG_NET_ETHERNET
1642 	card->lan_type = LCS_FRAME_TYPE_ENET;
1643 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1644 	if (rc == 0)
1645 		return 0;
1646 
1647 #endif
1648 #ifdef CONFIG_TR
1649 	card->lan_type = LCS_FRAME_TYPE_TR;
1650 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1651 	if (rc == 0)
1652 		return 0;
1653 #endif
1654 #ifdef CONFIG_FDDI
1655 	card->lan_type = LCS_FRAME_TYPE_FDDI;
1656 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1657 	if (rc == 0)
1658 		return 0;
1659 #endif
1660 	return -EIO;
1661 }
1662 
1663 static int
1664 lcs_startlan(struct lcs_card *card)
1665 {
1666 	int rc, i;
1667 
1668 	LCS_DBF_TEXT(2, trace, "startlan");
1669 	rc = 0;
1670 	if (card->portno != LCS_INVALID_PORT_NO) {
1671 		if (card->lan_type == LCS_FRAME_TYPE_AUTO)
1672 			rc = lcs_startlan_auto(card);
1673 		else
1674 			rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1675 	} else {
1676                 for (i = 0; i <= 16; i++) {
1677                         card->portno = i;
1678                         if (card->lan_type != LCS_FRAME_TYPE_AUTO)
1679                                 rc = lcs_send_startlan(card,
1680                                                        LCS_INITIATOR_TCPIP);
1681                         else
1682                                 /* autodetecting lan type */
1683                                 rc = lcs_startlan_auto(card);
1684                         if (rc == 0)
1685                                 break;
1686                 }
1687         }
1688 	if (rc == 0)
1689 		return lcs_send_lanstat(card);
1690 	return rc;
1691 }
1692 
1693 /**
1694  * LCS detect function
1695  * setup channels and make them I/O ready
1696  */
1697 static int
1698 lcs_detect(struct lcs_card *card)
1699 {
1700 	int rc = 0;
1701 
1702 	LCS_DBF_TEXT(2, setup, "lcsdetct");
1703 	/* start/reset card */
1704 	if (card->dev)
1705 		netif_stop_queue(card->dev);
1706 	rc = lcs_stop_channels(card);
1707 	if (rc == 0) {
1708 		rc = lcs_start_channels(card);
1709 		if (rc == 0) {
1710 			rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
1711 			if (rc == 0)
1712 				rc = lcs_startlan(card);
1713 		}
1714 	}
1715 	if (rc == 0) {
1716 		card->state = DEV_STATE_UP;
1717 	} else {
1718 		card->state = DEV_STATE_DOWN;
1719 		card->write.state = LCS_CH_STATE_INIT;
1720 		card->read.state =  LCS_CH_STATE_INIT;
1721 	}
1722 	return rc;
1723 }
1724 
1725 /**
1726  * LCS Stop card
1727  */
1728 static int
1729 lcs_stopcard(struct lcs_card *card)
1730 {
1731 	int rc;
1732 
1733 	LCS_DBF_TEXT(3, setup, "stopcard");
1734 
1735 	if (card->read.state != LCS_CH_STATE_STOPPED &&
1736 	    card->write.state != LCS_CH_STATE_STOPPED &&
1737 	    card->read.state != LCS_CH_STATE_ERROR &&
1738 	    card->write.state != LCS_CH_STATE_ERROR &&
1739 	    card->state == DEV_STATE_UP) {
1740 		lcs_clear_multicast_list(card);
1741 		rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
1742 		rc = lcs_send_shutdown(card);
1743 	}
1744 	rc = lcs_stop_channels(card);
1745 	card->state = DEV_STATE_DOWN;
1746 
1747 	return rc;
1748 }
1749 
1750 /**
1751  * Kernel Thread helper functions for LGW initiated commands
1752  */
1753 static void
1754 lcs_start_kernel_thread(struct work_struct *work)
1755 {
1756 	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
1757 	LCS_DBF_TEXT(5, trace, "krnthrd");
1758 	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
1759 		kthread_run(lcs_recovery, card, "lcs_recover");
1760 #ifdef CONFIG_IP_MULTICAST
1761 	if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
1762 		kthread_run(lcs_register_mc_addresses, card, "regipm");
1763 #endif
1764 }
1765 
1766 /**
1767  * Process control frames.
1768  */
1769 static void
1770 lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1771 {
1772 	LCS_DBF_TEXT(5, trace, "getctrl");
1773 	if (cmd->initiator == LCS_INITIATOR_LGW) {
1774 		switch(cmd->cmd_code) {
1775 		case LCS_CMD_STARTUP:
1776 		case LCS_CMD_STARTLAN:
1777 			lcs_schedule_recovery(card);
1778 			break;
1779 		case LCS_CMD_STOPLAN:
1780 			pr_warning("Stoplan for %s initiated by LGW.\n",
1781 				   card->dev->name);
1782 			if (card->dev)
1783 				netif_carrier_off(card->dev);
1784 			break;
1785 		default:
1786 			LCS_DBF_TEXT(5, trace, "noLGWcmd");
1787 			break;
1788 		}
1789 	} else
1790 		lcs_notify_lancmd_waiters(card, cmd);
1791 }
1792 
1793 /**
1794  * Unpack network packet.
1795  */
1796 static void
1797 lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1798 {
1799 	struct sk_buff *skb;
1800 
1801 	LCS_DBF_TEXT(5, trace, "getskb");
1802 	if (card->dev == NULL ||
1803 	    card->state != DEV_STATE_UP)
1804 		/* The card isn't up. Ignore the packet. */
1805 		return;
1806 
1807 	skb = dev_alloc_skb(skb_len);
1808 	if (skb == NULL) {
1809 		dev_err(&card->dev->dev,
1810 			" Allocating a socket buffer to interface %s failed\n",
1811 			  card->dev->name);
1812 		card->stats.rx_dropped++;
1813 		return;
1814 	}
1815 	memcpy(skb_put(skb, skb_len), skb_data, skb_len);
1816 	skb->protocol =	card->lan_type_trans(skb, card->dev);
1817 	card->stats.rx_bytes += skb_len;
1818 	card->stats.rx_packets++;
1819 	if (skb->protocol == htons(ETH_P_802_2))
1820 		*((__u32 *)skb->cb) = ++card->pkt_seq;
1821 	netif_rx(skb);
1822 }
1823 
1824 /**
1825  * LCS main routine to get packets and lancmd replies from the buffers
1826  */
1827 static void
1828 lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1829 {
1830 	struct lcs_card *card;
1831 	struct lcs_header *lcs_hdr;
1832 	__u16 offset;
1833 
1834 	LCS_DBF_TEXT(5, trace, "lcsgtpkt");
1835 	lcs_hdr = (struct lcs_header *) buffer->data;
1836 	if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
1837 		LCS_DBF_TEXT(4, trace, "-eiogpkt");
1838 		return;
1839 	}
1840 	card = container_of(channel, struct lcs_card, read);
1841 	offset = 0;
1842 	while (lcs_hdr->offset != 0) {
1843 		if (lcs_hdr->offset <= 0 ||
1844 		    lcs_hdr->offset > LCS_IOBUFFERSIZE ||
1845 		    lcs_hdr->offset < offset) {
1846 			/* Offset invalid. */
1847 			card->stats.rx_length_errors++;
1848 			card->stats.rx_errors++;
1849 			return;
1850 		}
1851 		/* What kind of frame is it? */
1852 		if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
1853 			/* Control frame. */
1854 			lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
1855 		else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
1856 			 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
1857 			 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
1858 			/* Normal network packet. */
1859 			lcs_get_skb(card, (char *)(lcs_hdr + 1),
1860 				    lcs_hdr->offset - offset -
1861 				    sizeof(struct lcs_header));
1862 		else
1863 			/* Unknown frame type. */
1864 			; // FIXME: error message ?
1865 		/* Proceed to next frame. */
1866 		offset = lcs_hdr->offset;
1867 		lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
1868 		lcs_hdr = (struct lcs_header *) (buffer->data + offset);
1869 	}
1870 	/* The buffer is now empty. Make it ready again. */
1871 	lcs_ready_buffer(&card->read, buffer);
1872 }
1873 
1874 /**
1875  * get network statistics for ifconfig and other user programs
1876  */
1877 static struct net_device_stats *
1878 lcs_getstats(struct net_device *dev)
1879 {
1880 	struct lcs_card *card;
1881 
1882 	LCS_DBF_TEXT(4, trace, "netstats");
1883 	card = (struct lcs_card *) dev->ml_priv;
1884 	return &card->stats;
1885 }
1886 
1887 /**
1888  * stop lcs device
1889  * This function will be called by user doing ifconfig xxx down
1890  */
1891 static int
1892 lcs_stop_device(struct net_device *dev)
1893 {
1894 	struct lcs_card *card;
1895 	int rc;
1896 
1897 	LCS_DBF_TEXT(2, trace, "stopdev");
1898 	card   = (struct lcs_card *) dev->ml_priv;
1899 	netif_carrier_off(dev);
1900 	netif_tx_disable(dev);
1901 	dev->flags &= ~IFF_UP;
1902 	wait_event(card->write.wait_q,
1903 		(card->write.state != LCS_CH_STATE_RUNNING));
1904 	rc = lcs_stopcard(card);
1905 	if (rc)
1906 		dev_err(&card->dev->dev,
1907 			" Shutting down the LCS device failed\n ");
1908 	return rc;
1909 }
1910 
1911 /**
1912  * start lcs device and make it runnable
1913  * This function will be called by user doing ifconfig xxx up
1914  */
1915 static int
1916 lcs_open_device(struct net_device *dev)
1917 {
1918 	struct lcs_card *card;
1919 	int rc;
1920 
1921 	LCS_DBF_TEXT(2, trace, "opendev");
1922 	card = (struct lcs_card *) dev->ml_priv;
1923 	/* initialize statistics */
1924 	rc = lcs_detect(card);
1925 	if (rc) {
1926 		pr_err("Error in opening device!\n");
1927 
1928 	} else {
1929 		dev->flags |= IFF_UP;
1930 		netif_carrier_on(dev);
1931 		netif_wake_queue(dev);
1932 		card->state = DEV_STATE_UP;
1933 	}
1934 	return rc;
1935 }
1936 
1937 /**
1938  * show function for portno called by cat or similar things
1939  */
1940 static ssize_t
1941 lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
1942 {
1943         struct lcs_card *card;
1944 
1945 	card = dev_get_drvdata(dev);
1946 
1947         if (!card)
1948                 return 0;
1949 
1950         return sprintf(buf, "%d\n", card->portno);
1951 }
1952 
1953 /**
1954  * store the value which is piped to file portno
1955  */
1956 static ssize_t
1957 lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1958 {
1959         struct lcs_card *card;
1960         int value;
1961 
1962 	card = dev_get_drvdata(dev);
1963 
1964         if (!card)
1965                 return 0;
1966 
1967         sscanf(buf, "%u", &value);
1968         /* TODO: sanity checks */
1969         card->portno = value;
1970 
1971         return count;
1972 
1973 }
1974 
1975 static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1976 
1977 const char *lcs_type[] = {
1978 	"not a channel",
1979 	"2216 parallel",
1980 	"2216 channel",
1981 	"OSA LCS card",
1982 	"unknown channel type",
1983 	"unsupported channel type",
1984 };
1985 
1986 static ssize_t
1987 lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1988 {
1989 	struct ccwgroup_device *cgdev;
1990 
1991 	cgdev = to_ccwgroupdev(dev);
1992 	if (!cgdev)
1993 		return -ENODEV;
1994 
1995 	return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1996 }
1997 
1998 static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
1999 
2000 static ssize_t
2001 lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
2002 {
2003 	struct lcs_card *card;
2004 
2005 	card = dev_get_drvdata(dev);
2006 
2007 	return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
2008 }
2009 
2010 static ssize_t
2011 lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2012 {
2013         struct lcs_card *card;
2014         int value;
2015 
2016 	card = dev_get_drvdata(dev);
2017 
2018         if (!card)
2019                 return 0;
2020 
2021         sscanf(buf, "%u", &value);
2022         /* TODO: sanity checks */
2023         card->lancmd_timeout = value;
2024 
2025         return count;
2026 
2027 }
2028 
2029 static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
2030 
2031 static ssize_t
2032 lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
2033 		      const char *buf, size_t count)
2034 {
2035 	struct lcs_card *card = dev_get_drvdata(dev);
2036 	char *tmp;
2037 	int i;
2038 
2039 	if (!card)
2040 		return -EINVAL;
2041 	if (card->state != DEV_STATE_UP)
2042 		return -EPERM;
2043 	i = simple_strtoul(buf, &tmp, 16);
2044 	if (i == 1)
2045 		lcs_schedule_recovery(card);
2046 	return count;
2047 }
2048 
2049 static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
2050 
2051 static struct attribute * lcs_attrs[] = {
2052 	&dev_attr_portno.attr,
2053 	&dev_attr_type.attr,
2054 	&dev_attr_lancmd_timeout.attr,
2055 	&dev_attr_recover.attr,
2056 	NULL,
2057 };
2058 
2059 static struct attribute_group lcs_attr_group = {
2060 	.attrs = lcs_attrs,
2061 };
2062 
2063 /**
2064  * lcs_probe_device is called on establishing a new ccwgroup_device.
2065  */
2066 static int
2067 lcs_probe_device(struct ccwgroup_device *ccwgdev)
2068 {
2069 	struct lcs_card *card;
2070 	int ret;
2071 
2072 	if (!get_device(&ccwgdev->dev))
2073 		return -ENODEV;
2074 
2075 	LCS_DBF_TEXT(2, setup, "add_dev");
2076         card = lcs_alloc_card();
2077         if (!card) {
2078 		LCS_DBF_TEXT_(2, setup, "  rc%d", -ENOMEM);
2079 		put_device(&ccwgdev->dev);
2080                 return -ENOMEM;
2081         }
2082 	ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2083 	if (ret) {
2084 		lcs_free_card(card);
2085 		put_device(&ccwgdev->dev);
2086 		return ret;
2087         }
2088 	dev_set_drvdata(&ccwgdev->dev, card);
2089 	ccwgdev->cdev[0]->handler = lcs_irq;
2090 	ccwgdev->cdev[1]->handler = lcs_irq;
2091 	card->gdev = ccwgdev;
2092 	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
2093 	card->thread_start_mask = 0;
2094 	card->thread_allowed_mask = 0;
2095 	card->thread_running_mask = 0;
2096         return 0;
2097 }
2098 
2099 static int
2100 lcs_register_netdev(struct ccwgroup_device *ccwgdev)
2101 {
2102 	struct lcs_card *card;
2103 
2104 	LCS_DBF_TEXT(2, setup, "regnetdv");
2105 	card = dev_get_drvdata(&ccwgdev->dev);
2106 	if (card->dev->reg_state != NETREG_UNINITIALIZED)
2107 		return 0;
2108 	SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
2109 	return register_netdev(card->dev);
2110 }
2111 
2112 /**
2113  * lcs_new_device will be called by setting the group device online.
2114  */
2115 static const struct net_device_ops lcs_netdev_ops = {
2116 	.ndo_open		= lcs_open_device,
2117 	.ndo_stop		= lcs_stop_device,
2118 	.ndo_get_stats		= lcs_getstats,
2119 	.ndo_start_xmit		= lcs_start_xmit,
2120 };
2121 
2122 static const struct net_device_ops lcs_mc_netdev_ops = {
2123 	.ndo_open		= lcs_open_device,
2124 	.ndo_stop		= lcs_stop_device,
2125 	.ndo_get_stats		= lcs_getstats,
2126 	.ndo_start_xmit		= lcs_start_xmit,
2127 	.ndo_set_multicast_list = lcs_set_multicast_list,
2128 };
2129 
2130 static int
2131 lcs_new_device(struct ccwgroup_device *ccwgdev)
2132 {
2133 	struct  lcs_card *card;
2134 	struct net_device *dev=NULL;
2135 	enum lcs_dev_states recover_state;
2136 	int rc;
2137 
2138 	card = dev_get_drvdata(&ccwgdev->dev);
2139 	if (!card)
2140 		return -ENODEV;
2141 
2142 	LCS_DBF_TEXT(2, setup, "newdev");
2143 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2144 	card->read.ccwdev  = ccwgdev->cdev[0];
2145 	card->write.ccwdev = ccwgdev->cdev[1];
2146 
2147 	recover_state = card->state;
2148 	rc = ccw_device_set_online(card->read.ccwdev);
2149 	if (rc)
2150 		goto out_err;
2151 	rc = ccw_device_set_online(card->write.ccwdev);
2152 	if (rc)
2153 		goto out_werr;
2154 
2155 	LCS_DBF_TEXT(3, setup, "lcsnewdv");
2156 
2157 	lcs_setup_card(card);
2158 	rc = lcs_detect(card);
2159 	if (rc) {
2160 		LCS_DBF_TEXT(2, setup, "dtctfail");
2161 		dev_err(&card->dev->dev,
2162 			"Detecting a network adapter for LCS devices"
2163 			" failed with rc=%d (0x%x)\n", rc, rc);
2164 		lcs_stopcard(card);
2165 		goto out;
2166 	}
2167 	if (card->dev) {
2168 		LCS_DBF_TEXT(2, setup, "samedev");
2169 		LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2170 		goto netdev_out;
2171 	}
2172 	switch (card->lan_type) {
2173 #ifdef CONFIG_NET_ETHERNET
2174 	case LCS_FRAME_TYPE_ENET:
2175 		card->lan_type_trans = eth_type_trans;
2176 		dev = alloc_etherdev(0);
2177 		break;
2178 #endif
2179 #ifdef CONFIG_TR
2180 	case LCS_FRAME_TYPE_TR:
2181 		card->lan_type_trans = tr_type_trans;
2182 		dev = alloc_trdev(0);
2183 		break;
2184 #endif
2185 #ifdef CONFIG_FDDI
2186 	case LCS_FRAME_TYPE_FDDI:
2187 		card->lan_type_trans = fddi_type_trans;
2188 		dev = alloc_fddidev(0);
2189 		break;
2190 #endif
2191 	default:
2192 		LCS_DBF_TEXT(3, setup, "errinit");
2193 		pr_err(" Initialization failed\n");
2194 		goto out;
2195 	}
2196 	if (!dev)
2197 		goto out;
2198 	card->dev = dev;
2199 	card->dev->ml_priv = card;
2200 	card->dev->netdev_ops = &lcs_netdev_ops;
2201 	memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
2202 #ifdef CONFIG_IP_MULTICAST
2203 	if (!lcs_check_multicast_support(card))
2204 		card->dev->netdev_ops = &lcs_mc_netdev_ops;
2205 #endif
2206 netdev_out:
2207 	lcs_set_allowed_threads(card,0xffffffff);
2208 	if (recover_state == DEV_STATE_RECOVER) {
2209 		lcs_set_multicast_list(card->dev);
2210 		card->dev->flags |= IFF_UP;
2211 		netif_carrier_on(card->dev);
2212 		netif_wake_queue(card->dev);
2213 		card->state = DEV_STATE_UP;
2214 	} else {
2215 		lcs_stopcard(card);
2216 	}
2217 
2218 	if (lcs_register_netdev(ccwgdev) != 0)
2219 		goto out;
2220 
2221 	/* Print out supported assists: IPv6 */
2222 	pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
2223 		(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
2224 		"with" : "without");
2225 	/* Print out supported assist: Multicast */
2226 	pr_info("LCS device %s %s Multicast support\n", card->dev->name,
2227 		(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
2228 		"with" : "without");
2229 	return 0;
2230 out:
2231 
2232 	ccw_device_set_offline(card->write.ccwdev);
2233 out_werr:
2234 	ccw_device_set_offline(card->read.ccwdev);
2235 out_err:
2236 	return -ENODEV;
2237 }
2238 
2239 /**
2240  * lcs_shutdown_device, called when setting the group device offline.
2241  */
2242 static int
2243 __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
2244 {
2245 	struct lcs_card *card;
2246 	enum lcs_dev_states recover_state;
2247 	int ret;
2248 
2249 	LCS_DBF_TEXT(3, setup, "shtdndev");
2250 	card = dev_get_drvdata(&ccwgdev->dev);
2251 	if (!card)
2252 		return -ENODEV;
2253 	if (recovery_mode == 0) {
2254 		lcs_set_allowed_threads(card, 0);
2255 		if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
2256 			return -ERESTARTSYS;
2257 	}
2258 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2259 	recover_state = card->state;
2260 
2261 	ret = lcs_stop_device(card->dev);
2262 	ret = ccw_device_set_offline(card->read.ccwdev);
2263 	ret = ccw_device_set_offline(card->write.ccwdev);
2264 	if (recover_state == DEV_STATE_UP) {
2265 		card->state = DEV_STATE_RECOVER;
2266 	}
2267 	if (ret)
2268 		return ret;
2269 	return 0;
2270 }
2271 
2272 static int
2273 lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
2274 {
2275 	return __lcs_shutdown_device(ccwgdev, 0);
2276 }
2277 
2278 /**
2279  * drive lcs recovery after startup and startlan initiated by Lan Gateway
2280  */
2281 static int
2282 lcs_recovery(void *ptr)
2283 {
2284 	struct lcs_card *card;
2285 	struct ccwgroup_device *gdev;
2286         int rc;
2287 
2288 	card = (struct lcs_card *) ptr;
2289 
2290 	LCS_DBF_TEXT(4, trace, "recover1");
2291 	if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
2292 		return 0;
2293 	LCS_DBF_TEXT(4, trace, "recover2");
2294 	gdev = card->gdev;
2295 	dev_warn(&gdev->dev,
2296 		"A recovery process has been started for the LCS device\n");
2297 	rc = __lcs_shutdown_device(gdev, 1);
2298 	rc = lcs_new_device(gdev);
2299 	if (!rc)
2300 		pr_info("Device %s successfully recovered!\n",
2301 			card->dev->name);
2302 	else
2303 		pr_info("Device %s could not be recovered!\n",
2304 			card->dev->name);
2305 	lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
2306 	return 0;
2307 }
2308 
2309 /**
2310  * lcs_remove_device, free buffers and card
2311  */
2312 static void
2313 lcs_remove_device(struct ccwgroup_device *ccwgdev)
2314 {
2315 	struct lcs_card *card;
2316 
2317 	card = dev_get_drvdata(&ccwgdev->dev);
2318 	if (!card)
2319 		return;
2320 
2321 	LCS_DBF_TEXT(3, setup, "remdev");
2322 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2323 	if (ccwgdev->state == CCWGROUP_ONLINE) {
2324 		lcs_shutdown_device(ccwgdev);
2325 	}
2326 	if (card->dev)
2327 		unregister_netdev(card->dev);
2328 	sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2329 	lcs_cleanup_card(card);
2330 	lcs_free_card(card);
2331 	put_device(&ccwgdev->dev);
2332 }
2333 
2334 static int lcs_pm_suspend(struct lcs_card *card)
2335 {
2336 	if (card->dev)
2337 		netif_device_detach(card->dev);
2338 	lcs_set_allowed_threads(card, 0);
2339 	lcs_wait_for_threads(card, 0xffffffff);
2340 	if (card->state != DEV_STATE_DOWN)
2341 		__lcs_shutdown_device(card->gdev, 1);
2342 	return 0;
2343 }
2344 
2345 static int lcs_pm_resume(struct lcs_card *card)
2346 {
2347 	int rc = 0;
2348 
2349 	if (card->state == DEV_STATE_RECOVER)
2350 		rc = lcs_new_device(card->gdev);
2351 	if (card->dev)
2352 		netif_device_attach(card->dev);
2353 	if (rc) {
2354 		dev_warn(&card->gdev->dev, "The lcs device driver "
2355 			"failed to recover the device\n");
2356 	}
2357 	return rc;
2358 }
2359 
2360 static int lcs_prepare(struct ccwgroup_device *gdev)
2361 {
2362 	return 0;
2363 }
2364 
2365 static void lcs_complete(struct ccwgroup_device *gdev)
2366 {
2367 	return;
2368 }
2369 
2370 static int lcs_freeze(struct ccwgroup_device *gdev)
2371 {
2372 	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2373 	return lcs_pm_suspend(card);
2374 }
2375 
2376 static int lcs_thaw(struct ccwgroup_device *gdev)
2377 {
2378 	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2379 	return lcs_pm_resume(card);
2380 }
2381 
2382 static int lcs_restore(struct ccwgroup_device *gdev)
2383 {
2384 	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2385 	return lcs_pm_resume(card);
2386 }
2387 
2388 static struct ccw_device_id lcs_ids[] = {
2389 	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2390 	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2391 	{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2392 	{},
2393 };
2394 MODULE_DEVICE_TABLE(ccw, lcs_ids);
2395 
2396 static struct ccw_driver lcs_ccw_driver = {
2397 	.owner	= THIS_MODULE,
2398 	.name	= "lcs",
2399 	.ids	= lcs_ids,
2400 	.probe	= ccwgroup_probe_ccwdev,
2401 	.remove	= ccwgroup_remove_ccwdev,
2402 };
2403 
2404 /**
2405  * LCS ccwgroup driver registration
2406  */
2407 static struct ccwgroup_driver lcs_group_driver = {
2408 	.owner       = THIS_MODULE,
2409 	.name        = "lcs",
2410 	.max_slaves  = 2,
2411 	.driver_id   = 0xD3C3E2,
2412 	.probe       = lcs_probe_device,
2413 	.remove      = lcs_remove_device,
2414 	.set_online  = lcs_new_device,
2415 	.set_offline = lcs_shutdown_device,
2416 	.prepare     = lcs_prepare,
2417 	.complete    = lcs_complete,
2418 	.freeze	     = lcs_freeze,
2419 	.thaw	     = lcs_thaw,
2420 	.restore     = lcs_restore,
2421 };
2422 
2423 static ssize_t
2424 lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2425 		       size_t count)
2426 {
2427 	int err;
2428 	err = ccwgroup_create_from_string(lcs_root_dev,
2429 					  lcs_group_driver.driver_id,
2430 					  &lcs_ccw_driver, 2, buf);
2431 	return err ? err : count;
2432 }
2433 
2434 static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2435 
2436 static struct attribute *lcs_group_attrs[] = {
2437 	&driver_attr_group.attr,
2438 	NULL,
2439 };
2440 
2441 static struct attribute_group lcs_group_attr_group = {
2442 	.attrs = lcs_group_attrs,
2443 };
2444 
2445 static const struct attribute_group *lcs_group_attr_groups[] = {
2446 	&lcs_group_attr_group,
2447 	NULL,
2448 };
2449 
2450 /**
2451  *  LCS Module/Kernel initialization function
2452  */
2453 static int
2454 __init lcs_init_module(void)
2455 {
2456 	int rc;
2457 
2458 	pr_info("Loading %s\n", version);
2459 	rc = lcs_register_debug_facility();
2460 	LCS_DBF_TEXT(0, setup, "lcsinit");
2461 	if (rc)
2462 		goto out_err;
2463 	lcs_root_dev = root_device_register("lcs");
2464 	rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2465 	if (rc)
2466 		goto register_err;
2467 	rc = ccw_driver_register(&lcs_ccw_driver);
2468 	if (rc)
2469 		goto ccw_err;
2470 	lcs_group_driver.driver.groups = lcs_group_attr_groups;
2471 	rc = ccwgroup_driver_register(&lcs_group_driver);
2472 	if (rc)
2473 		goto ccwgroup_err;
2474 	return 0;
2475 
2476 ccwgroup_err:
2477 	ccw_driver_unregister(&lcs_ccw_driver);
2478 ccw_err:
2479 	root_device_unregister(lcs_root_dev);
2480 register_err:
2481 	lcs_unregister_debug_facility();
2482 out_err:
2483 	pr_err("Initializing the lcs device driver failed\n");
2484 	return rc;
2485 }
2486 
2487 
2488 /**
2489  *  LCS module cleanup function
2490  */
2491 static void
2492 __exit lcs_cleanup_module(void)
2493 {
2494 	pr_info("Terminating lcs module.\n");
2495 	LCS_DBF_TEXT(0, trace, "cleanup");
2496 	driver_remove_file(&lcs_group_driver.driver,
2497 			   &driver_attr_group);
2498 	ccwgroup_driver_unregister(&lcs_group_driver);
2499 	ccw_driver_unregister(&lcs_ccw_driver);
2500 	root_device_unregister(lcs_root_dev);
2501 	lcs_unregister_debug_facility();
2502 }
2503 
2504 module_init(lcs_init_module);
2505 module_exit(lcs_cleanup_module);
2506 
2507 MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
2508 MODULE_LICENSE("GPL");
2509 
2510