xref: /openbmc/linux/drivers/s390/net/lcs.c (revision 63dc02bd)
1 /*
2  *  Linux for S/390 Lan Channel Station Network Driver
3  *
4  *  Copyright IBM Corp. 1999, 2009
5  *  Author(s): Original Code written by
6  *			DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
7  *	       Rewritten by
8  *			Frank Pavlic <fpavlic@de.ibm.com> and
9  *			Martin Schwidefsky <schwidefsky@de.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 #define KMSG_COMPONENT		"lcs"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 
29 #include <linux/module.h>
30 #include <linux/if.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/trdevice.h>
34 #include <linux/fddidevice.h>
35 #include <linux/inetdevice.h>
36 #include <linux/in.h>
37 #include <linux/igmp.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/slab.h>
41 #include <net/arp.h>
42 #include <net/ip.h>
43 
44 #include <asm/debug.h>
45 #include <asm/idals.h>
46 #include <asm/timex.h>
47 #include <linux/device.h>
48 #include <asm/ccwgroup.h>
49 
50 #include "lcs.h"
51 
52 
53 #if !defined(CONFIG_ETHERNET) && \
54     !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
55 #error Cannot compile lcs.c without some net devices switched on.
56 #endif
57 
58 /**
59  * initialization string for output
60  */
61 
62 static char version[] __initdata = "LCS driver";
63 
64 /**
65   * the root device for lcs group devices
66   */
67 static struct device *lcs_root_dev;
68 
69 /**
70  * Some prototypes.
71  */
72 static void lcs_tasklet(unsigned long);
73 static void lcs_start_kernel_thread(struct work_struct *);
74 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
75 #ifdef CONFIG_IP_MULTICAST
76 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
77 #endif /* CONFIG_IP_MULTICAST */
78 static int lcs_recovery(void *ptr);
79 
80 /**
81  * Debug Facility Stuff
82  */
83 static char debug_buffer[255];
84 static debug_info_t *lcs_dbf_setup;
85 static debug_info_t *lcs_dbf_trace;
86 
87 /**
88  *  LCS Debug Facility functions
89  */
90 static void
91 lcs_unregister_debug_facility(void)
92 {
93 	if (lcs_dbf_setup)
94 		debug_unregister(lcs_dbf_setup);
95 	if (lcs_dbf_trace)
96 		debug_unregister(lcs_dbf_trace);
97 }
98 
99 static int
100 lcs_register_debug_facility(void)
101 {
102 	lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
103 	lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
104 	if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
105 		pr_err("Not enough memory for debug facility.\n");
106 		lcs_unregister_debug_facility();
107 		return -ENOMEM;
108 	}
109 	debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
110 	debug_set_level(lcs_dbf_setup, 2);
111 	debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
112 	debug_set_level(lcs_dbf_trace, 2);
113 	return 0;
114 }
115 
116 /**
117  * Allocate io buffers.
118  */
119 static int
120 lcs_alloc_channel(struct lcs_channel *channel)
121 {
122 	int cnt;
123 
124 	LCS_DBF_TEXT(2, setup, "ichalloc");
125 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
126 		/* alloc memory fo iobuffer */
127 		channel->iob[cnt].data =
128 			kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
129 		if (channel->iob[cnt].data == NULL)
130 			break;
131 		channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
132 	}
133 	if (cnt < LCS_NUM_BUFFS) {
134 		/* Not all io buffers could be allocated. */
135 		LCS_DBF_TEXT(2, setup, "echalloc");
136 		while (cnt-- > 0)
137 			kfree(channel->iob[cnt].data);
138 		return -ENOMEM;
139 	}
140 	return 0;
141 }
142 
143 /**
144  * Free io buffers.
145  */
146 static void
147 lcs_free_channel(struct lcs_channel *channel)
148 {
149 	int cnt;
150 
151 	LCS_DBF_TEXT(2, setup, "ichfree");
152 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
153 		kfree(channel->iob[cnt].data);
154 		channel->iob[cnt].data = NULL;
155 	}
156 }
157 
158 /*
159  * Cleanup channel.
160  */
161 static void
162 lcs_cleanup_channel(struct lcs_channel *channel)
163 {
164 	LCS_DBF_TEXT(3, setup, "cleanch");
165 	/* Kill write channel tasklets. */
166 	tasklet_kill(&channel->irq_tasklet);
167 	/* Free channel buffers. */
168 	lcs_free_channel(channel);
169 }
170 
171 /**
172  * LCS free memory for card and channels.
173  */
174 static void
175 lcs_free_card(struct lcs_card *card)
176 {
177 	LCS_DBF_TEXT(2, setup, "remcard");
178 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
179 	kfree(card);
180 }
181 
182 /**
183  * LCS alloc memory for card and channels
184  */
185 static struct lcs_card *
186 lcs_alloc_card(void)
187 {
188 	struct lcs_card *card;
189 	int rc;
190 
191 	LCS_DBF_TEXT(2, setup, "alloclcs");
192 
193 	card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
194 	if (card == NULL)
195 		return NULL;
196 	card->lan_type = LCS_FRAME_TYPE_AUTO;
197 	card->pkt_seq = 0;
198 	card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
199 	/* Allocate io buffers for the read channel. */
200 	rc = lcs_alloc_channel(&card->read);
201 	if (rc){
202 		LCS_DBF_TEXT(2, setup, "iccwerr");
203 		lcs_free_card(card);
204 		return NULL;
205 	}
206 	/* Allocate io buffers for the write channel. */
207 	rc = lcs_alloc_channel(&card->write);
208 	if (rc) {
209 		LCS_DBF_TEXT(2, setup, "iccwerr");
210 		lcs_cleanup_channel(&card->read);
211 		lcs_free_card(card);
212 		return NULL;
213 	}
214 
215 #ifdef CONFIG_IP_MULTICAST
216 	INIT_LIST_HEAD(&card->ipm_list);
217 #endif
218 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
219 	return card;
220 }
221 
222 /*
223  * Setup read channel.
224  */
225 static void
226 lcs_setup_read_ccws(struct lcs_card *card)
227 {
228 	int cnt;
229 
230 	LCS_DBF_TEXT(2, setup, "ireadccw");
231 	/* Setup read ccws. */
232 	memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
233 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
234 		card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
235 		card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
236 		card->read.ccws[cnt].flags =
237 			CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
238 		/*
239 		 * Note: we have allocated the buffer with GFP_DMA, so
240 		 * we do not need to do set_normalized_cda.
241 		 */
242 		card->read.ccws[cnt].cda =
243 			(__u32) __pa(card->read.iob[cnt].data);
244 		((struct lcs_header *)
245 		 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
246 		card->read.iob[cnt].callback = lcs_get_frames_cb;
247 		card->read.iob[cnt].state = LCS_BUF_STATE_READY;
248 		card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
249 	}
250 	card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
251 	card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
252 	card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
253 	/* Last ccw is a tic (transfer in channel). */
254 	card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
255 	card->read.ccws[LCS_NUM_BUFFS].cda =
256 		(__u32) __pa(card->read.ccws);
257 	/* Setg initial state of the read channel. */
258 	card->read.state = LCS_CH_STATE_INIT;
259 
260 	card->read.io_idx = 0;
261 	card->read.buf_idx = 0;
262 }
263 
264 static void
265 lcs_setup_read(struct lcs_card *card)
266 {
267 	LCS_DBF_TEXT(3, setup, "initread");
268 
269 	lcs_setup_read_ccws(card);
270 	/* Initialize read channel tasklet. */
271 	card->read.irq_tasklet.data = (unsigned long) &card->read;
272 	card->read.irq_tasklet.func = lcs_tasklet;
273 	/* Initialize waitqueue. */
274 	init_waitqueue_head(&card->read.wait_q);
275 }
276 
277 /*
278  * Setup write channel.
279  */
280 static void
281 lcs_setup_write_ccws(struct lcs_card *card)
282 {
283 	int cnt;
284 
285 	LCS_DBF_TEXT(3, setup, "iwritccw");
286 	/* Setup write ccws. */
287 	memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
288 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
289 		card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
290 		card->write.ccws[cnt].count = 0;
291 		card->write.ccws[cnt].flags =
292 			CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
293 		/*
294 		 * Note: we have allocated the buffer with GFP_DMA, so
295 		 * we do not need to do set_normalized_cda.
296 		 */
297 		card->write.ccws[cnt].cda =
298 			(__u32) __pa(card->write.iob[cnt].data);
299 	}
300 	/* Last ccw is a tic (transfer in channel). */
301 	card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
302 	card->write.ccws[LCS_NUM_BUFFS].cda =
303 		(__u32) __pa(card->write.ccws);
304 	/* Set initial state of the write channel. */
305 	card->read.state = LCS_CH_STATE_INIT;
306 
307 	card->write.io_idx = 0;
308 	card->write.buf_idx = 0;
309 }
310 
311 static void
312 lcs_setup_write(struct lcs_card *card)
313 {
314 	LCS_DBF_TEXT(3, setup, "initwrit");
315 
316 	lcs_setup_write_ccws(card);
317 	/* Initialize write channel tasklet. */
318 	card->write.irq_tasklet.data = (unsigned long) &card->write;
319 	card->write.irq_tasklet.func = lcs_tasklet;
320 	/* Initialize waitqueue. */
321 	init_waitqueue_head(&card->write.wait_q);
322 }
323 
324 static void
325 lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
326 {
327 	unsigned long flags;
328 
329 	spin_lock_irqsave(&card->mask_lock, flags);
330 	card->thread_allowed_mask = threads;
331 	spin_unlock_irqrestore(&card->mask_lock, flags);
332 	wake_up(&card->wait_q);
333 }
334 static inline int
335 lcs_threads_running(struct lcs_card *card, unsigned long threads)
336 {
337         unsigned long flags;
338         int rc = 0;
339 
340 	spin_lock_irqsave(&card->mask_lock, flags);
341         rc = (card->thread_running_mask & threads);
342 	spin_unlock_irqrestore(&card->mask_lock, flags);
343         return rc;
344 }
345 
346 static int
347 lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
348 {
349         return wait_event_interruptible(card->wait_q,
350                         lcs_threads_running(card, threads) == 0);
351 }
352 
353 static inline int
354 lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
355 {
356         unsigned long flags;
357 
358 	spin_lock_irqsave(&card->mask_lock, flags);
359         if ( !(card->thread_allowed_mask & thread) ||
360               (card->thread_start_mask & thread) ) {
361                 spin_unlock_irqrestore(&card->mask_lock, flags);
362                 return -EPERM;
363         }
364         card->thread_start_mask |= thread;
365 	spin_unlock_irqrestore(&card->mask_lock, flags);
366         return 0;
367 }
368 
369 static void
370 lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
371 {
372         unsigned long flags;
373 
374 	spin_lock_irqsave(&card->mask_lock, flags);
375         card->thread_running_mask &= ~thread;
376 	spin_unlock_irqrestore(&card->mask_lock, flags);
377         wake_up(&card->wait_q);
378 }
379 
380 static inline int
381 __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
382 {
383         unsigned long flags;
384         int rc = 0;
385 
386 	spin_lock_irqsave(&card->mask_lock, flags);
387         if (card->thread_start_mask & thread){
388                 if ((card->thread_allowed_mask & thread) &&
389                     !(card->thread_running_mask & thread)){
390                         rc = 1;
391                         card->thread_start_mask &= ~thread;
392                         card->thread_running_mask |= thread;
393                 } else
394                         rc = -EPERM;
395         }
396 	spin_unlock_irqrestore(&card->mask_lock, flags);
397         return rc;
398 }
399 
400 static int
401 lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
402 {
403         int rc = 0;
404         wait_event(card->wait_q,
405                    (rc = __lcs_do_run_thread(card, thread)) >= 0);
406         return rc;
407 }
408 
409 static int
410 lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
411 {
412         unsigned long flags;
413         int rc = 0;
414 
415 	spin_lock_irqsave(&card->mask_lock, flags);
416         LCS_DBF_TEXT_(4, trace, "  %02x%02x%02x",
417                         (u8) card->thread_start_mask,
418                         (u8) card->thread_allowed_mask,
419                         (u8) card->thread_running_mask);
420         rc = (card->thread_start_mask & thread);
421 	spin_unlock_irqrestore(&card->mask_lock, flags);
422         return rc;
423 }
424 
425 /**
426  * Initialize channels,card and state machines.
427  */
428 static void
429 lcs_setup_card(struct lcs_card *card)
430 {
431 	LCS_DBF_TEXT(2, setup, "initcard");
432 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
433 
434 	lcs_setup_read(card);
435 	lcs_setup_write(card);
436 	/* Set cards initial state. */
437 	card->state = DEV_STATE_DOWN;
438 	card->tx_buffer = NULL;
439 	card->tx_emitted = 0;
440 
441 	init_waitqueue_head(&card->wait_q);
442 	spin_lock_init(&card->lock);
443 	spin_lock_init(&card->ipm_lock);
444 	spin_lock_init(&card->mask_lock);
445 #ifdef CONFIG_IP_MULTICAST
446 	INIT_LIST_HEAD(&card->ipm_list);
447 #endif
448 	INIT_LIST_HEAD(&card->lancmd_waiters);
449 }
450 
451 static inline void
452 lcs_clear_multicast_list(struct lcs_card *card)
453 {
454 #ifdef	CONFIG_IP_MULTICAST
455 	struct lcs_ipm_list *ipm;
456 	unsigned long flags;
457 
458 	/* Free multicast list. */
459 	LCS_DBF_TEXT(3, setup, "clmclist");
460 	spin_lock_irqsave(&card->ipm_lock, flags);
461 	while (!list_empty(&card->ipm_list)){
462 		ipm = list_entry(card->ipm_list.next,
463 				 struct lcs_ipm_list, list);
464 		list_del(&ipm->list);
465 		if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
466 			spin_unlock_irqrestore(&card->ipm_lock, flags);
467 			lcs_send_delipm(card, ipm);
468 			spin_lock_irqsave(&card->ipm_lock, flags);
469 		}
470 		kfree(ipm);
471 	}
472 	spin_unlock_irqrestore(&card->ipm_lock, flags);
473 #endif
474 }
475 /**
476  * Cleanup channels,card and state machines.
477  */
478 static void
479 lcs_cleanup_card(struct lcs_card *card)
480 {
481 
482 	LCS_DBF_TEXT(3, setup, "cleancrd");
483 	LCS_DBF_HEX(2,setup,&card,sizeof(void*));
484 
485 	if (card->dev != NULL)
486 		free_netdev(card->dev);
487 	/* Cleanup channels. */
488 	lcs_cleanup_channel(&card->write);
489 	lcs_cleanup_channel(&card->read);
490 }
491 
492 /**
493  * Start channel.
494  */
495 static int
496 lcs_start_channel(struct lcs_channel *channel)
497 {
498 	unsigned long flags;
499 	int rc;
500 
501 	LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
502 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
503 	rc = ccw_device_start(channel->ccwdev,
504 			      channel->ccws + channel->io_idx, 0, 0,
505 			      DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
506 	if (rc == 0)
507 		channel->state = LCS_CH_STATE_RUNNING;
508 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
509 	if (rc) {
510 		LCS_DBF_TEXT_(4,trace,"essh%s",
511 			      dev_name(&channel->ccwdev->dev));
512 		dev_err(&channel->ccwdev->dev,
513 			"Starting an LCS device resulted in an error,"
514 			" rc=%d!\n", rc);
515 	}
516 	return rc;
517 }
518 
519 static int
520 lcs_clear_channel(struct lcs_channel *channel)
521 {
522 	unsigned long flags;
523 	int rc;
524 
525 	LCS_DBF_TEXT(4,trace,"clearch");
526 	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
527 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
528 	rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
529 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
530 	if (rc) {
531 		LCS_DBF_TEXT_(4, trace, "ecsc%s",
532 			      dev_name(&channel->ccwdev->dev));
533 		return rc;
534 	}
535 	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
536 	channel->state = LCS_CH_STATE_STOPPED;
537 	return rc;
538 }
539 
540 
541 /**
542  * Stop channel.
543  */
544 static int
545 lcs_stop_channel(struct lcs_channel *channel)
546 {
547 	unsigned long flags;
548 	int rc;
549 
550 	if (channel->state == LCS_CH_STATE_STOPPED)
551 		return 0;
552 	LCS_DBF_TEXT(4,trace,"haltsch");
553 	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
554 	channel->state = LCS_CH_STATE_INIT;
555 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
556 	rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
557 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
558 	if (rc) {
559 		LCS_DBF_TEXT_(4, trace, "ehsc%s",
560 			      dev_name(&channel->ccwdev->dev));
561 		return rc;
562 	}
563 	/* Asynchronous halt initialted. Wait for its completion. */
564 	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
565 	lcs_clear_channel(channel);
566 	return 0;
567 }
568 
569 /**
570  * start read and write channel
571  */
572 static int
573 lcs_start_channels(struct lcs_card *card)
574 {
575 	int rc;
576 
577 	LCS_DBF_TEXT(2, trace, "chstart");
578 	/* start read channel */
579 	rc = lcs_start_channel(&card->read);
580 	if (rc)
581 		return rc;
582 	/* start write channel */
583 	rc = lcs_start_channel(&card->write);
584 	if (rc)
585 		lcs_stop_channel(&card->read);
586 	return rc;
587 }
588 
589 /**
590  * stop read and write channel
591  */
592 static int
593 lcs_stop_channels(struct lcs_card *card)
594 {
595 	LCS_DBF_TEXT(2, trace, "chhalt");
596 	lcs_stop_channel(&card->read);
597 	lcs_stop_channel(&card->write);
598 	return 0;
599 }
600 
601 /**
602  * Get empty buffer.
603  */
604 static struct lcs_buffer *
605 __lcs_get_buffer(struct lcs_channel *channel)
606 {
607 	int index;
608 
609 	LCS_DBF_TEXT(5, trace, "_getbuff");
610 	index = channel->io_idx;
611 	do {
612 		if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
613 			channel->iob[index].state = LCS_BUF_STATE_LOCKED;
614 			return channel->iob + index;
615 		}
616 		index = (index + 1) & (LCS_NUM_BUFFS - 1);
617 	} while (index != channel->io_idx);
618 	return NULL;
619 }
620 
621 static struct lcs_buffer *
622 lcs_get_buffer(struct lcs_channel *channel)
623 {
624 	struct lcs_buffer *buffer;
625 	unsigned long flags;
626 
627 	LCS_DBF_TEXT(5, trace, "getbuff");
628 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
629 	buffer = __lcs_get_buffer(channel);
630 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
631 	return buffer;
632 }
633 
634 /**
635  * Resume channel program if the channel is suspended.
636  */
637 static int
638 __lcs_resume_channel(struct lcs_channel *channel)
639 {
640 	int rc;
641 
642 	if (channel->state != LCS_CH_STATE_SUSPENDED)
643 		return 0;
644 	if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
645 		return 0;
646 	LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
647 	rc = ccw_device_resume(channel->ccwdev);
648 	if (rc) {
649 		LCS_DBF_TEXT_(4, trace, "ersc%s",
650 			      dev_name(&channel->ccwdev->dev));
651 		dev_err(&channel->ccwdev->dev,
652 			"Sending data from the LCS device to the LAN failed"
653 			" with rc=%d\n",rc);
654 	} else
655 		channel->state = LCS_CH_STATE_RUNNING;
656 	return rc;
657 
658 }
659 
660 /**
661  * Make a buffer ready for processing.
662  */
663 static inline void
664 __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
665 {
666 	int prev, next;
667 
668 	LCS_DBF_TEXT(5, trace, "rdybits");
669 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
670 	next = (index + 1) & (LCS_NUM_BUFFS - 1);
671 	/* Check if we may clear the suspend bit of this buffer. */
672 	if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
673 		/* Check if we have to set the PCI bit. */
674 		if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
675 			/* Suspend bit of the previous buffer is not set. */
676 			channel->ccws[index].flags |= CCW_FLAG_PCI;
677 		/* Suspend bit of the next buffer is set. */
678 		channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
679 	}
680 }
681 
682 static int
683 lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
684 {
685 	unsigned long flags;
686 	int index, rc;
687 
688 	LCS_DBF_TEXT(5, trace, "rdybuff");
689 	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
690 	       buffer->state != LCS_BUF_STATE_PROCESSED);
691 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
692 	buffer->state = LCS_BUF_STATE_READY;
693 	index = buffer - channel->iob;
694 	/* Set length. */
695 	channel->ccws[index].count = buffer->count;
696 	/* Check relevant PCI/suspend bits. */
697 	__lcs_ready_buffer_bits(channel, index);
698 	rc = __lcs_resume_channel(channel);
699 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
700 	return rc;
701 }
702 
703 /**
704  * Mark the buffer as processed. Take care of the suspend bit
705  * of the previous buffer. This function is called from
706  * interrupt context, so the lock must not be taken.
707  */
708 static int
709 __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
710 {
711 	int index, prev, next;
712 
713 	LCS_DBF_TEXT(5, trace, "prcsbuff");
714 	BUG_ON(buffer->state != LCS_BUF_STATE_READY);
715 	buffer->state = LCS_BUF_STATE_PROCESSED;
716 	index = buffer - channel->iob;
717 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
718 	next = (index + 1) & (LCS_NUM_BUFFS - 1);
719 	/* Set the suspend bit and clear the PCI bit of this buffer. */
720 	channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
721 	channel->ccws[index].flags &= ~CCW_FLAG_PCI;
722 	/* Check the suspend bit of the previous buffer. */
723 	if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
724 		/*
725 		 * Previous buffer is in state ready. It might have
726 		 * happened in lcs_ready_buffer that the suspend bit
727 		 * has not been cleared to avoid an endless loop.
728 		 * Do it now.
729 		 */
730 		__lcs_ready_buffer_bits(channel, prev);
731 	}
732 	/* Clear PCI bit of next buffer. */
733 	channel->ccws[next].flags &= ~CCW_FLAG_PCI;
734 	return __lcs_resume_channel(channel);
735 }
736 
737 /**
738  * Put a processed buffer back to state empty.
739  */
740 static void
741 lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
742 {
743 	unsigned long flags;
744 
745 	LCS_DBF_TEXT(5, trace, "relbuff");
746 	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
747 	       buffer->state != LCS_BUF_STATE_PROCESSED);
748 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
749 	buffer->state = LCS_BUF_STATE_EMPTY;
750 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
751 }
752 
753 /**
754  * Get buffer for a lan command.
755  */
756 static struct lcs_buffer *
757 lcs_get_lancmd(struct lcs_card *card, int count)
758 {
759 	struct lcs_buffer *buffer;
760 	struct lcs_cmd *cmd;
761 
762 	LCS_DBF_TEXT(4, trace, "getlncmd");
763 	/* Get buffer and wait if none is available. */
764 	wait_event(card->write.wait_q,
765 		   ((buffer = lcs_get_buffer(&card->write)) != NULL));
766 	count += sizeof(struct lcs_header);
767 	*(__u16 *)(buffer->data + count) = 0;
768 	buffer->count = count + sizeof(__u16);
769 	buffer->callback = lcs_release_buffer;
770 	cmd = (struct lcs_cmd *) buffer->data;
771 	cmd->offset = count;
772 	cmd->type = LCS_FRAME_TYPE_CONTROL;
773 	cmd->slot = 0;
774 	return buffer;
775 }
776 
777 
778 static void
779 lcs_get_reply(struct lcs_reply *reply)
780 {
781 	WARN_ON(atomic_read(&reply->refcnt) <= 0);
782 	atomic_inc(&reply->refcnt);
783 }
784 
785 static void
786 lcs_put_reply(struct lcs_reply *reply)
787 {
788         WARN_ON(atomic_read(&reply->refcnt) <= 0);
789         if (atomic_dec_and_test(&reply->refcnt)) {
790 		kfree(reply);
791 	}
792 
793 }
794 
795 static struct lcs_reply *
796 lcs_alloc_reply(struct lcs_cmd *cmd)
797 {
798 	struct lcs_reply *reply;
799 
800 	LCS_DBF_TEXT(4, trace, "getreply");
801 
802 	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
803 	if (!reply)
804 		return NULL;
805 	atomic_set(&reply->refcnt,1);
806 	reply->sequence_no = cmd->sequence_no;
807 	reply->received = 0;
808 	reply->rc = 0;
809 	init_waitqueue_head(&reply->wait_q);
810 
811 	return reply;
812 }
813 
814 /**
815  * Notifier function for lancmd replies. Called from read irq.
816  */
817 static void
818 lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
819 {
820 	struct list_head *l, *n;
821 	struct lcs_reply *reply;
822 
823 	LCS_DBF_TEXT(4, trace, "notiwait");
824 	spin_lock(&card->lock);
825 	list_for_each_safe(l, n, &card->lancmd_waiters) {
826 		reply = list_entry(l, struct lcs_reply, list);
827 		if (reply->sequence_no == cmd->sequence_no) {
828 			lcs_get_reply(reply);
829 			list_del_init(&reply->list);
830 			if (reply->callback != NULL)
831 				reply->callback(card, cmd);
832 			reply->received = 1;
833 			reply->rc = cmd->return_code;
834 			wake_up(&reply->wait_q);
835 			lcs_put_reply(reply);
836 			break;
837 		}
838 	}
839 	spin_unlock(&card->lock);
840 }
841 
842 /**
843  * Emit buffer of a lan command.
844  */
845 static void
846 lcs_lancmd_timeout(unsigned long data)
847 {
848 	struct lcs_reply *reply, *list_reply, *r;
849 	unsigned long flags;
850 
851 	LCS_DBF_TEXT(4, trace, "timeout");
852 	reply = (struct lcs_reply *) data;
853 	spin_lock_irqsave(&reply->card->lock, flags);
854 	list_for_each_entry_safe(list_reply, r,
855 				 &reply->card->lancmd_waiters,list) {
856 		if (reply == list_reply) {
857 			lcs_get_reply(reply);
858 			list_del_init(&reply->list);
859 			spin_unlock_irqrestore(&reply->card->lock, flags);
860 			reply->received = 1;
861 			reply->rc = -ETIME;
862 			wake_up(&reply->wait_q);
863 			lcs_put_reply(reply);
864 			return;
865 		}
866 	}
867 	spin_unlock_irqrestore(&reply->card->lock, flags);
868 }
869 
870 static int
871 lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
872 		void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
873 {
874 	struct lcs_reply *reply;
875 	struct lcs_cmd *cmd;
876 	struct timer_list timer;
877 	unsigned long flags;
878 	int rc;
879 
880 	LCS_DBF_TEXT(4, trace, "sendcmd");
881 	cmd = (struct lcs_cmd *) buffer->data;
882 	cmd->return_code = 0;
883 	cmd->sequence_no = card->sequence_no++;
884 	reply = lcs_alloc_reply(cmd);
885 	if (!reply)
886 		return -ENOMEM;
887 	reply->callback = reply_callback;
888 	reply->card = card;
889 	spin_lock_irqsave(&card->lock, flags);
890 	list_add_tail(&reply->list, &card->lancmd_waiters);
891 	spin_unlock_irqrestore(&card->lock, flags);
892 
893 	buffer->callback = lcs_release_buffer;
894 	rc = lcs_ready_buffer(&card->write, buffer);
895 	if (rc)
896 		return rc;
897 	init_timer_on_stack(&timer);
898 	timer.function = lcs_lancmd_timeout;
899 	timer.data = (unsigned long) reply;
900 	timer.expires = jiffies + HZ*card->lancmd_timeout;
901 	add_timer(&timer);
902 	wait_event(reply->wait_q, reply->received);
903 	del_timer_sync(&timer);
904 	LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
905 	rc = reply->rc;
906 	lcs_put_reply(reply);
907 	return rc ? -EIO : 0;
908 }
909 
910 /**
911  * LCS startup command
912  */
913 static int
914 lcs_send_startup(struct lcs_card *card, __u8 initiator)
915 {
916 	struct lcs_buffer *buffer;
917 	struct lcs_cmd *cmd;
918 
919 	LCS_DBF_TEXT(2, trace, "startup");
920 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
921 	cmd = (struct lcs_cmd *) buffer->data;
922 	cmd->cmd_code = LCS_CMD_STARTUP;
923 	cmd->initiator = initiator;
924 	cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
925 	return lcs_send_lancmd(card, buffer, NULL);
926 }
927 
928 /**
929  * LCS shutdown command
930  */
931 static int
932 lcs_send_shutdown(struct lcs_card *card)
933 {
934 	struct lcs_buffer *buffer;
935 	struct lcs_cmd *cmd;
936 
937 	LCS_DBF_TEXT(2, trace, "shutdown");
938 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
939 	cmd = (struct lcs_cmd *) buffer->data;
940 	cmd->cmd_code = LCS_CMD_SHUTDOWN;
941 	cmd->initiator = LCS_INITIATOR_TCPIP;
942 	return lcs_send_lancmd(card, buffer, NULL);
943 }
944 
945 /**
946  * LCS lanstat command
947  */
948 static void
949 __lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
950 {
951 	LCS_DBF_TEXT(2, trace, "statcb");
952 	memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
953 }
954 
955 static int
956 lcs_send_lanstat(struct lcs_card *card)
957 {
958 	struct lcs_buffer *buffer;
959 	struct lcs_cmd *cmd;
960 
961 	LCS_DBF_TEXT(2,trace, "cmdstat");
962 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
963 	cmd = (struct lcs_cmd *) buffer->data;
964 	/* Setup lanstat command. */
965 	cmd->cmd_code = LCS_CMD_LANSTAT;
966 	cmd->initiator = LCS_INITIATOR_TCPIP;
967 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
968 	cmd->cmd.lcs_std_cmd.portno = card->portno;
969 	return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
970 }
971 
972 /**
973  * send stoplan command
974  */
975 static int
976 lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
977 {
978 	struct lcs_buffer *buffer;
979 	struct lcs_cmd *cmd;
980 
981 	LCS_DBF_TEXT(2, trace, "cmdstpln");
982 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
983 	cmd = (struct lcs_cmd *) buffer->data;
984 	cmd->cmd_code = LCS_CMD_STOPLAN;
985 	cmd->initiator = initiator;
986 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
987 	cmd->cmd.lcs_std_cmd.portno = card->portno;
988 	return lcs_send_lancmd(card, buffer, NULL);
989 }
990 
991 /**
992  * send startlan command
993  */
994 static void
995 __lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
996 {
997 	LCS_DBF_TEXT(2, trace, "srtlancb");
998 	card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
999 	card->portno = cmd->cmd.lcs_std_cmd.portno;
1000 }
1001 
1002 static int
1003 lcs_send_startlan(struct lcs_card *card, __u8 initiator)
1004 {
1005 	struct lcs_buffer *buffer;
1006 	struct lcs_cmd *cmd;
1007 
1008 	LCS_DBF_TEXT(2, trace, "cmdstaln");
1009 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1010 	cmd = (struct lcs_cmd *) buffer->data;
1011 	cmd->cmd_code = LCS_CMD_STARTLAN;
1012 	cmd->initiator = initiator;
1013 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
1014 	cmd->cmd.lcs_std_cmd.portno = card->portno;
1015 	return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
1016 }
1017 
1018 #ifdef CONFIG_IP_MULTICAST
1019 /**
1020  * send setipm command (Multicast)
1021  */
1022 static int
1023 lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1024 {
1025 	struct lcs_buffer *buffer;
1026 	struct lcs_cmd *cmd;
1027 
1028 	LCS_DBF_TEXT(2, trace, "cmdsetim");
1029 	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1030 	cmd = (struct lcs_cmd *) buffer->data;
1031 	cmd->cmd_code = LCS_CMD_SETIPM;
1032 	cmd->initiator = LCS_INITIATOR_TCPIP;
1033 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1034 	cmd->cmd.lcs_qipassist.portno = card->portno;
1035 	cmd->cmd.lcs_qipassist.version = 4;
1036 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1037 	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1038 	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1039 	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1040 	return lcs_send_lancmd(card, buffer, NULL);
1041 }
1042 
1043 /**
1044  * send delipm command (Multicast)
1045  */
1046 static int
1047 lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
1048 {
1049 	struct lcs_buffer *buffer;
1050 	struct lcs_cmd *cmd;
1051 
1052 	LCS_DBF_TEXT(2, trace, "cmddelim");
1053 	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
1054 	cmd = (struct lcs_cmd *) buffer->data;
1055 	cmd->cmd_code = LCS_CMD_DELIPM;
1056 	cmd->initiator = LCS_INITIATOR_TCPIP;
1057 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1058 	cmd->cmd.lcs_qipassist.portno = card->portno;
1059 	cmd->cmd.lcs_qipassist.version = 4;
1060 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1061 	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
1062 	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
1063 	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
1064 	return lcs_send_lancmd(card, buffer, NULL);
1065 }
1066 
1067 /**
1068  * check if multicast is supported by LCS
1069  */
1070 static void
1071 __lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
1072 {
1073 	LCS_DBF_TEXT(2, trace, "chkmccb");
1074 	card->ip_assists_supported =
1075 		cmd->cmd.lcs_qipassist.ip_assists_supported;
1076 	card->ip_assists_enabled =
1077 		cmd->cmd.lcs_qipassist.ip_assists_enabled;
1078 }
1079 
1080 static int
1081 lcs_check_multicast_support(struct lcs_card *card)
1082 {
1083 	struct lcs_buffer *buffer;
1084 	struct lcs_cmd *cmd;
1085 	int rc;
1086 
1087 	LCS_DBF_TEXT(2, trace, "cmdqipa");
1088 	/* Send query ipassist. */
1089 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
1090 	cmd = (struct lcs_cmd *) buffer->data;
1091 	cmd->cmd_code = LCS_CMD_QIPASSIST;
1092 	cmd->initiator = LCS_INITIATOR_TCPIP;
1093 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
1094 	cmd->cmd.lcs_qipassist.portno = card->portno;
1095 	cmd->cmd.lcs_qipassist.version = 4;
1096 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
1097 	rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
1098 	if (rc != 0) {
1099 		pr_err("Query IPAssist failed. Assuming unsupported!\n");
1100 		return -EOPNOTSUPP;
1101 	}
1102 	if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
1103 		return 0;
1104 	return -EOPNOTSUPP;
1105 }
1106 
1107 /**
1108  * set or del multicast address on LCS card
1109  */
1110 static void
1111 lcs_fix_multicast_list(struct lcs_card *card)
1112 {
1113 	struct list_head failed_list;
1114 	struct lcs_ipm_list *ipm, *tmp;
1115 	unsigned long flags;
1116 	int rc;
1117 
1118 	LCS_DBF_TEXT(4,trace, "fixipm");
1119 	INIT_LIST_HEAD(&failed_list);
1120 	spin_lock_irqsave(&card->ipm_lock, flags);
1121 list_modified:
1122 	list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
1123 		switch (ipm->ipm_state) {
1124 		case LCS_IPM_STATE_SET_REQUIRED:
1125 			/* del from ipm_list so no one else can tamper with
1126 			 * this entry */
1127 			list_del_init(&ipm->list);
1128 			spin_unlock_irqrestore(&card->ipm_lock, flags);
1129 			rc = lcs_send_setipm(card, ipm);
1130 			spin_lock_irqsave(&card->ipm_lock, flags);
1131 			if (rc) {
1132 				pr_info("Adding multicast address failed."
1133 					" Table possibly full!\n");
1134 				/* store ipm in failed list -> will be added
1135 				 * to ipm_list again, so a retry will be done
1136 				 * during the next call of this function */
1137 				list_add_tail(&ipm->list, &failed_list);
1138 			} else {
1139 				ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
1140 				/* re-insert into ipm_list */
1141 				list_add_tail(&ipm->list, &card->ipm_list);
1142 			}
1143 			goto list_modified;
1144 		case LCS_IPM_STATE_DEL_REQUIRED:
1145 			list_del(&ipm->list);
1146 			spin_unlock_irqrestore(&card->ipm_lock, flags);
1147 			lcs_send_delipm(card, ipm);
1148 			spin_lock_irqsave(&card->ipm_lock, flags);
1149 			kfree(ipm);
1150 			goto list_modified;
1151 		case LCS_IPM_STATE_ON_CARD:
1152 			break;
1153 		}
1154 	}
1155 	/* re-insert all entries from the failed_list into ipm_list */
1156 	list_for_each_entry_safe(ipm, tmp, &failed_list, list)
1157 		list_move_tail(&ipm->list, &card->ipm_list);
1158 
1159 	spin_unlock_irqrestore(&card->ipm_lock, flags);
1160 }
1161 
1162 /**
1163  * get mac address for the relevant Multicast address
1164  */
1165 static void
1166 lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
1167 {
1168 	LCS_DBF_TEXT(4,trace, "getmac");
1169 	if (dev->type == ARPHRD_IEEE802_TR)
1170 		ip_tr_mc_map(ipm, mac);
1171 	else
1172 		ip_eth_mc_map(ipm, mac);
1173 }
1174 
1175 /**
1176  * function called by net device to handle multicast address relevant things
1177  */
1178 static inline void
1179 lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1180 {
1181 	struct ip_mc_list *im4;
1182 	struct list_head *l;
1183 	struct lcs_ipm_list *ipm;
1184 	unsigned long flags;
1185 	char buf[MAX_ADDR_LEN];
1186 
1187 	LCS_DBF_TEXT(4, trace, "remmclst");
1188 	spin_lock_irqsave(&card->ipm_lock, flags);
1189 	list_for_each(l, &card->ipm_list) {
1190 		ipm = list_entry(l, struct lcs_ipm_list, list);
1191 		for (im4 = rcu_dereference(in4_dev->mc_list);
1192 		     im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
1193 			lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1194 			if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
1195 			     (memcmp(buf, &ipm->ipm.mac_addr,
1196 				     LCS_MAC_LENGTH) == 0) )
1197 				break;
1198 		}
1199 		if (im4 == NULL)
1200 			ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
1201 	}
1202 	spin_unlock_irqrestore(&card->ipm_lock, flags);
1203 }
1204 
1205 static inline struct lcs_ipm_list *
1206 lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
1207 {
1208 	struct lcs_ipm_list *tmp, *ipm = NULL;
1209 	struct list_head *l;
1210 	unsigned long flags;
1211 
1212 	LCS_DBF_TEXT(4, trace, "chkmcent");
1213 	spin_lock_irqsave(&card->ipm_lock, flags);
1214 	list_for_each(l, &card->ipm_list) {
1215 		tmp = list_entry(l, struct lcs_ipm_list, list);
1216 		if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
1217 		     (memcmp(buf, &tmp->ipm.mac_addr,
1218 			     LCS_MAC_LENGTH) == 0) ) {
1219 			ipm = tmp;
1220 			break;
1221 		}
1222 	}
1223 	spin_unlock_irqrestore(&card->ipm_lock, flags);
1224 	return ipm;
1225 }
1226 
1227 static inline void
1228 lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
1229 {
1230 
1231 	struct ip_mc_list *im4;
1232 	struct lcs_ipm_list *ipm;
1233 	char buf[MAX_ADDR_LEN];
1234 	unsigned long flags;
1235 
1236 	LCS_DBF_TEXT(4, trace, "setmclst");
1237 	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
1238 	     im4 = rcu_dereference(im4->next_rcu)) {
1239 		lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
1240 		ipm = lcs_check_addr_entry(card, im4, buf);
1241 		if (ipm != NULL)
1242 			continue;	/* Address already in list. */
1243 		ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
1244 		if (ipm == NULL) {
1245 			pr_info("Not enough memory to add"
1246 				" new multicast entry!\n");
1247 			break;
1248 		}
1249 		memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
1250 		ipm->ipm.ip_addr = im4->multiaddr;
1251 		ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
1252 		spin_lock_irqsave(&card->ipm_lock, flags);
1253 		LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
1254 		list_add(&ipm->list, &card->ipm_list);
1255 		spin_unlock_irqrestore(&card->ipm_lock, flags);
1256 	}
1257 }
1258 
1259 static int
1260 lcs_register_mc_addresses(void *data)
1261 {
1262 	struct lcs_card *card;
1263 	struct in_device *in4_dev;
1264 
1265 	card = (struct lcs_card *) data;
1266 
1267 	if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
1268 		return 0;
1269 	LCS_DBF_TEXT(4, trace, "regmulti");
1270 
1271 	in4_dev = in_dev_get(card->dev);
1272 	if (in4_dev == NULL)
1273 		goto out;
1274 	rcu_read_lock();
1275 	lcs_remove_mc_addresses(card,in4_dev);
1276 	lcs_set_mc_addresses(card, in4_dev);
1277 	rcu_read_unlock();
1278 	in_dev_put(in4_dev);
1279 
1280 	netif_carrier_off(card->dev);
1281 	netif_tx_disable(card->dev);
1282 	wait_event(card->write.wait_q,
1283 			(card->write.state != LCS_CH_STATE_RUNNING));
1284 	lcs_fix_multicast_list(card);
1285 	if (card->state == DEV_STATE_UP) {
1286 		netif_carrier_on(card->dev);
1287 		netif_wake_queue(card->dev);
1288 	}
1289 out:
1290 	lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1291 	return 0;
1292 }
1293 #endif /* CONFIG_IP_MULTICAST */
1294 
1295 /**
1296  * function called by net device to
1297  * handle multicast address relevant things
1298  */
1299 static void
1300 lcs_set_multicast_list(struct net_device *dev)
1301 {
1302 #ifdef CONFIG_IP_MULTICAST
1303         struct lcs_card *card;
1304 
1305         LCS_DBF_TEXT(4, trace, "setmulti");
1306         card = (struct lcs_card *) dev->ml_priv;
1307 
1308         if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
1309 		schedule_work(&card->kernel_thread_starter);
1310 #endif /* CONFIG_IP_MULTICAST */
1311 }
1312 
1313 static long
1314 lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1315 {
1316 	if (!IS_ERR(irb))
1317 		return 0;
1318 
1319 	switch (PTR_ERR(irb)) {
1320 	case -EIO:
1321 		dev_warn(&cdev->dev,
1322 			"An I/O-error occurred on the LCS device\n");
1323 		LCS_DBF_TEXT(2, trace, "ckirberr");
1324 		LCS_DBF_TEXT_(2, trace, "  rc%d", -EIO);
1325 		break;
1326 	case -ETIMEDOUT:
1327 		dev_warn(&cdev->dev,
1328 			"A command timed out on the LCS device\n");
1329 		LCS_DBF_TEXT(2, trace, "ckirberr");
1330 		LCS_DBF_TEXT_(2, trace, "  rc%d", -ETIMEDOUT);
1331 		break;
1332 	default:
1333 		dev_warn(&cdev->dev,
1334 			"An error occurred on the LCS device, rc=%ld\n",
1335 			PTR_ERR(irb));
1336 		LCS_DBF_TEXT(2, trace, "ckirberr");
1337 		LCS_DBF_TEXT(2, trace, "  rc???");
1338 	}
1339 	return PTR_ERR(irb);
1340 }
1341 
1342 static int
1343 lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1344 {
1345 	int dstat, cstat;
1346 	char *sense;
1347 
1348 	sense = (char *) irb->ecw;
1349 	cstat = irb->scsw.cmd.cstat;
1350 	dstat = irb->scsw.cmd.dstat;
1351 
1352 	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1353 		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1354 		     SCHN_STAT_PROT_CHECK   | SCHN_STAT_PROG_CHECK)) {
1355 		LCS_DBF_TEXT(2, trace, "CGENCHK");
1356 		return 1;
1357 	}
1358 	if (dstat & DEV_STAT_UNIT_CHECK) {
1359 		if (sense[LCS_SENSE_BYTE_1] &
1360 		    LCS_SENSE_RESETTING_EVENT) {
1361 			LCS_DBF_TEXT(2, trace, "REVIND");
1362 			return 1;
1363 		}
1364 		if (sense[LCS_SENSE_BYTE_0] &
1365 		    LCS_SENSE_CMD_REJECT) {
1366 			LCS_DBF_TEXT(2, trace, "CMDREJ");
1367 			return 0;
1368 		}
1369 		if ((!sense[LCS_SENSE_BYTE_0]) &&
1370 		    (!sense[LCS_SENSE_BYTE_1]) &&
1371 		    (!sense[LCS_SENSE_BYTE_2]) &&
1372 		    (!sense[LCS_SENSE_BYTE_3])) {
1373 			LCS_DBF_TEXT(2, trace, "ZEROSEN");
1374 			return 0;
1375 		}
1376 		LCS_DBF_TEXT(2, trace, "DGENCHK");
1377 		return 1;
1378 	}
1379 	return 0;
1380 }
1381 
1382 static void
1383 lcs_schedule_recovery(struct lcs_card *card)
1384 {
1385 	LCS_DBF_TEXT(2, trace, "startrec");
1386 	if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
1387 		schedule_work(&card->kernel_thread_starter);
1388 }
1389 
1390 /**
1391  * IRQ Handler for LCS channels
1392  */
1393 static void
1394 lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1395 {
1396 	struct lcs_card *card;
1397 	struct lcs_channel *channel;
1398 	int rc, index;
1399 	int cstat, dstat;
1400 
1401 	if (lcs_check_irb_error(cdev, irb))
1402 		return;
1403 
1404 	card = CARD_FROM_DEV(cdev);
1405 	if (card->read.ccwdev == cdev)
1406 		channel = &card->read;
1407 	else
1408 		channel = &card->write;
1409 
1410 	cstat = irb->scsw.cmd.cstat;
1411 	dstat = irb->scsw.cmd.dstat;
1412 	LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
1413 	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
1414 		      irb->scsw.cmd.dstat);
1415 	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
1416 		      irb->scsw.cmd.actl);
1417 
1418 	/* Check for channel and device errors presented */
1419 	rc = lcs_get_problem(cdev, irb);
1420 	if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
1421 		dev_warn(&cdev->dev,
1422 			"The LCS device stopped because of an error,"
1423 			" dstat=0x%X, cstat=0x%X \n",
1424 			    dstat, cstat);
1425 		if (rc) {
1426 			channel->state = LCS_CH_STATE_ERROR;
1427 		}
1428 	}
1429 	if (channel->state == LCS_CH_STATE_ERROR) {
1430 		lcs_schedule_recovery(card);
1431 		wake_up(&card->wait_q);
1432 		return;
1433 	}
1434 	/* How far in the ccw chain have we processed? */
1435 	if ((channel->state != LCS_CH_STATE_INIT) &&
1436 	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1437 	    (irb->scsw.cmd.cpa != 0)) {
1438 		index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
1439 			- channel->ccws;
1440 		if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
1441 		    (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
1442 			/* Bloody io subsystem tells us lies about cpa... */
1443 			index = (index - 1) & (LCS_NUM_BUFFS - 1);
1444 		while (channel->io_idx != index) {
1445 			__lcs_processed_buffer(channel,
1446 					       channel->iob + channel->io_idx);
1447 			channel->io_idx =
1448 				(channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
1449 		}
1450 	}
1451 
1452 	if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
1453 	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
1454 	    (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
1455 		/* Mark channel as stopped. */
1456 		channel->state = LCS_CH_STATE_STOPPED;
1457 	else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
1458 		/* CCW execution stopped on a suspend bit. */
1459 		channel->state = LCS_CH_STATE_SUSPENDED;
1460 	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1461 		if (irb->scsw.cmd.cc != 0) {
1462 			ccw_device_halt(channel->ccwdev, (addr_t) channel);
1463 			return;
1464 		}
1465 		/* The channel has been stopped by halt_IO. */
1466 		channel->state = LCS_CH_STATE_HALTED;
1467 	}
1468 	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
1469 		channel->state = LCS_CH_STATE_CLEARED;
1470 	/* Do the rest in the tasklet. */
1471 	tasklet_schedule(&channel->irq_tasklet);
1472 }
1473 
1474 /**
1475  * Tasklet for IRQ handler
1476  */
1477 static void
1478 lcs_tasklet(unsigned long data)
1479 {
1480 	unsigned long flags;
1481 	struct lcs_channel *channel;
1482 	struct lcs_buffer *iob;
1483 	int buf_idx;
1484 
1485 	channel = (struct lcs_channel *) data;
1486 	LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
1487 
1488 	/* Check for processed buffers. */
1489 	iob = channel->iob;
1490 	buf_idx = channel->buf_idx;
1491 	while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
1492 		/* Do the callback thing. */
1493 		if (iob[buf_idx].callback != NULL)
1494 			iob[buf_idx].callback(channel, iob + buf_idx);
1495 		buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
1496 	}
1497 	channel->buf_idx = buf_idx;
1498 
1499 	if (channel->state == LCS_CH_STATE_STOPPED)
1500 		lcs_start_channel(channel);
1501 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1502 	if (channel->state == LCS_CH_STATE_SUSPENDED &&
1503 	    channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
1504 		__lcs_resume_channel(channel);
1505 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1506 
1507 	/* Something happened on the channel. Wake up waiters. */
1508 	wake_up(&channel->wait_q);
1509 }
1510 
1511 /**
1512  * Finish current tx buffer and make it ready for transmit.
1513  */
1514 static void
1515 __lcs_emit_txbuffer(struct lcs_card *card)
1516 {
1517 	LCS_DBF_TEXT(5, trace, "emittx");
1518 	*(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
1519 	card->tx_buffer->count += 2;
1520 	lcs_ready_buffer(&card->write, card->tx_buffer);
1521 	card->tx_buffer = NULL;
1522 	card->tx_emitted++;
1523 }
1524 
1525 /**
1526  * Callback for finished tx buffers.
1527  */
1528 static void
1529 lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1530 {
1531 	struct lcs_card *card;
1532 
1533 	LCS_DBF_TEXT(5, trace, "txbuffcb");
1534 	/* Put buffer back to pool. */
1535 	lcs_release_buffer(channel, buffer);
1536 	card = container_of(channel, struct lcs_card, write);
1537 	if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
1538 		netif_wake_queue(card->dev);
1539 	spin_lock(&card->lock);
1540 	card->tx_emitted--;
1541 	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1542 		/*
1543 		 * Last running tx buffer has finished. Submit partially
1544 		 * filled current buffer.
1545 		 */
1546 		__lcs_emit_txbuffer(card);
1547 	spin_unlock(&card->lock);
1548 }
1549 
1550 /**
1551  * Packet transmit function called by network stack
1552  */
1553 static int
1554 __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1555 		 struct net_device *dev)
1556 {
1557 	struct lcs_header *header;
1558 	int rc = NETDEV_TX_OK;
1559 
1560 	LCS_DBF_TEXT(5, trace, "hardxmit");
1561 	if (skb == NULL) {
1562 		card->stats.tx_dropped++;
1563 		card->stats.tx_errors++;
1564 		return NETDEV_TX_OK;
1565 	}
1566 	if (card->state != DEV_STATE_UP) {
1567 		dev_kfree_skb(skb);
1568 		card->stats.tx_dropped++;
1569 		card->stats.tx_errors++;
1570 		card->stats.tx_carrier_errors++;
1571 		return NETDEV_TX_OK;
1572 	}
1573 	if (skb->protocol == htons(ETH_P_IPV6)) {
1574 		dev_kfree_skb(skb);
1575 		return NETDEV_TX_OK;
1576 	}
1577 	netif_stop_queue(card->dev);
1578 	spin_lock(&card->lock);
1579 	if (card->tx_buffer != NULL &&
1580 	    card->tx_buffer->count + sizeof(struct lcs_header) +
1581 	    skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
1582 		/* skb too big for current tx buffer. */
1583 		__lcs_emit_txbuffer(card);
1584 	if (card->tx_buffer == NULL) {
1585 		/* Get new tx buffer */
1586 		card->tx_buffer = lcs_get_buffer(&card->write);
1587 		if (card->tx_buffer == NULL) {
1588 			card->stats.tx_dropped++;
1589 			rc = NETDEV_TX_BUSY;
1590 			goto out;
1591 		}
1592 		card->tx_buffer->callback = lcs_txbuffer_cb;
1593 		card->tx_buffer->count = 0;
1594 	}
1595 	header = (struct lcs_header *)
1596 		(card->tx_buffer->data + card->tx_buffer->count);
1597 	card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
1598 	header->offset = card->tx_buffer->count;
1599 	header->type = card->lan_type;
1600 	header->slot = card->portno;
1601 	skb_copy_from_linear_data(skb, header + 1, skb->len);
1602 	spin_unlock(&card->lock);
1603 	card->stats.tx_bytes += skb->len;
1604 	card->stats.tx_packets++;
1605 	dev_kfree_skb(skb);
1606 	netif_wake_queue(card->dev);
1607 	spin_lock(&card->lock);
1608 	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1609 		/* If this is the first tx buffer emit it immediately. */
1610 		__lcs_emit_txbuffer(card);
1611 out:
1612 	spin_unlock(&card->lock);
1613 	return rc;
1614 }
1615 
1616 static int
1617 lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
1618 {
1619 	struct lcs_card *card;
1620 	int rc;
1621 
1622 	LCS_DBF_TEXT(5, trace, "pktxmit");
1623 	card = (struct lcs_card *) dev->ml_priv;
1624 	rc = __lcs_start_xmit(card, skb, dev);
1625 	return rc;
1626 }
1627 
1628 /**
1629  * send startlan and lanstat command to make LCS device ready
1630  */
1631 static int
1632 lcs_startlan_auto(struct lcs_card *card)
1633 {
1634 	int rc;
1635 
1636 	LCS_DBF_TEXT(2, trace, "strtauto");
1637 #ifdef CONFIG_ETHERNET
1638 	card->lan_type = LCS_FRAME_TYPE_ENET;
1639 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1640 	if (rc == 0)
1641 		return 0;
1642 
1643 #endif
1644 #ifdef CONFIG_TR
1645 	card->lan_type = LCS_FRAME_TYPE_TR;
1646 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1647 	if (rc == 0)
1648 		return 0;
1649 #endif
1650 #ifdef CONFIG_FDDI
1651 	card->lan_type = LCS_FRAME_TYPE_FDDI;
1652 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1653 	if (rc == 0)
1654 		return 0;
1655 #endif
1656 	return -EIO;
1657 }
1658 
1659 static int
1660 lcs_startlan(struct lcs_card *card)
1661 {
1662 	int rc, i;
1663 
1664 	LCS_DBF_TEXT(2, trace, "startlan");
1665 	rc = 0;
1666 	if (card->portno != LCS_INVALID_PORT_NO) {
1667 		if (card->lan_type == LCS_FRAME_TYPE_AUTO)
1668 			rc = lcs_startlan_auto(card);
1669 		else
1670 			rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1671 	} else {
1672                 for (i = 0; i <= 16; i++) {
1673                         card->portno = i;
1674                         if (card->lan_type != LCS_FRAME_TYPE_AUTO)
1675                                 rc = lcs_send_startlan(card,
1676                                                        LCS_INITIATOR_TCPIP);
1677                         else
1678                                 /* autodetecting lan type */
1679                                 rc = lcs_startlan_auto(card);
1680                         if (rc == 0)
1681                                 break;
1682                 }
1683         }
1684 	if (rc == 0)
1685 		return lcs_send_lanstat(card);
1686 	return rc;
1687 }
1688 
1689 /**
1690  * LCS detect function
1691  * setup channels and make them I/O ready
1692  */
1693 static int
1694 lcs_detect(struct lcs_card *card)
1695 {
1696 	int rc = 0;
1697 
1698 	LCS_DBF_TEXT(2, setup, "lcsdetct");
1699 	/* start/reset card */
1700 	if (card->dev)
1701 		netif_stop_queue(card->dev);
1702 	rc = lcs_stop_channels(card);
1703 	if (rc == 0) {
1704 		rc = lcs_start_channels(card);
1705 		if (rc == 0) {
1706 			rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
1707 			if (rc == 0)
1708 				rc = lcs_startlan(card);
1709 		}
1710 	}
1711 	if (rc == 0) {
1712 		card->state = DEV_STATE_UP;
1713 	} else {
1714 		card->state = DEV_STATE_DOWN;
1715 		card->write.state = LCS_CH_STATE_INIT;
1716 		card->read.state =  LCS_CH_STATE_INIT;
1717 	}
1718 	return rc;
1719 }
1720 
1721 /**
1722  * LCS Stop card
1723  */
1724 static int
1725 lcs_stopcard(struct lcs_card *card)
1726 {
1727 	int rc;
1728 
1729 	LCS_DBF_TEXT(3, setup, "stopcard");
1730 
1731 	if (card->read.state != LCS_CH_STATE_STOPPED &&
1732 	    card->write.state != LCS_CH_STATE_STOPPED &&
1733 	    card->read.state != LCS_CH_STATE_ERROR &&
1734 	    card->write.state != LCS_CH_STATE_ERROR &&
1735 	    card->state == DEV_STATE_UP) {
1736 		lcs_clear_multicast_list(card);
1737 		rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
1738 		rc = lcs_send_shutdown(card);
1739 	}
1740 	rc = lcs_stop_channels(card);
1741 	card->state = DEV_STATE_DOWN;
1742 
1743 	return rc;
1744 }
1745 
1746 /**
1747  * Kernel Thread helper functions for LGW initiated commands
1748  */
1749 static void
1750 lcs_start_kernel_thread(struct work_struct *work)
1751 {
1752 	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
1753 	LCS_DBF_TEXT(5, trace, "krnthrd");
1754 	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
1755 		kthread_run(lcs_recovery, card, "lcs_recover");
1756 #ifdef CONFIG_IP_MULTICAST
1757 	if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
1758 		kthread_run(lcs_register_mc_addresses, card, "regipm");
1759 #endif
1760 }
1761 
1762 /**
1763  * Process control frames.
1764  */
1765 static void
1766 lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
1767 {
1768 	LCS_DBF_TEXT(5, trace, "getctrl");
1769 	if (cmd->initiator == LCS_INITIATOR_LGW) {
1770 		switch(cmd->cmd_code) {
1771 		case LCS_CMD_STARTUP:
1772 		case LCS_CMD_STARTLAN:
1773 			lcs_schedule_recovery(card);
1774 			break;
1775 		case LCS_CMD_STOPLAN:
1776 			pr_warning("Stoplan for %s initiated by LGW.\n",
1777 				   card->dev->name);
1778 			if (card->dev)
1779 				netif_carrier_off(card->dev);
1780 			break;
1781 		default:
1782 			LCS_DBF_TEXT(5, trace, "noLGWcmd");
1783 			break;
1784 		}
1785 	} else
1786 		lcs_notify_lancmd_waiters(card, cmd);
1787 }
1788 
1789 /**
1790  * Unpack network packet.
1791  */
1792 static void
1793 lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
1794 {
1795 	struct sk_buff *skb;
1796 
1797 	LCS_DBF_TEXT(5, trace, "getskb");
1798 	if (card->dev == NULL ||
1799 	    card->state != DEV_STATE_UP)
1800 		/* The card isn't up. Ignore the packet. */
1801 		return;
1802 
1803 	skb = dev_alloc_skb(skb_len);
1804 	if (skb == NULL) {
1805 		dev_err(&card->dev->dev,
1806 			" Allocating a socket buffer to interface %s failed\n",
1807 			  card->dev->name);
1808 		card->stats.rx_dropped++;
1809 		return;
1810 	}
1811 	memcpy(skb_put(skb, skb_len), skb_data, skb_len);
1812 	skb->protocol =	card->lan_type_trans(skb, card->dev);
1813 	card->stats.rx_bytes += skb_len;
1814 	card->stats.rx_packets++;
1815 	if (skb->protocol == htons(ETH_P_802_2))
1816 		*((__u32 *)skb->cb) = ++card->pkt_seq;
1817 	netif_rx(skb);
1818 }
1819 
1820 /**
1821  * LCS main routine to get packets and lancmd replies from the buffers
1822  */
1823 static void
1824 lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1825 {
1826 	struct lcs_card *card;
1827 	struct lcs_header *lcs_hdr;
1828 	__u16 offset;
1829 
1830 	LCS_DBF_TEXT(5, trace, "lcsgtpkt");
1831 	lcs_hdr = (struct lcs_header *) buffer->data;
1832 	if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
1833 		LCS_DBF_TEXT(4, trace, "-eiogpkt");
1834 		return;
1835 	}
1836 	card = container_of(channel, struct lcs_card, read);
1837 	offset = 0;
1838 	while (lcs_hdr->offset != 0) {
1839 		if (lcs_hdr->offset <= 0 ||
1840 		    lcs_hdr->offset > LCS_IOBUFFERSIZE ||
1841 		    lcs_hdr->offset < offset) {
1842 			/* Offset invalid. */
1843 			card->stats.rx_length_errors++;
1844 			card->stats.rx_errors++;
1845 			return;
1846 		}
1847 		/* What kind of frame is it? */
1848 		if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
1849 			/* Control frame. */
1850 			lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
1851 		else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
1852 			 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
1853 			 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
1854 			/* Normal network packet. */
1855 			lcs_get_skb(card, (char *)(lcs_hdr + 1),
1856 				    lcs_hdr->offset - offset -
1857 				    sizeof(struct lcs_header));
1858 		else
1859 			/* Unknown frame type. */
1860 			; // FIXME: error message ?
1861 		/* Proceed to next frame. */
1862 		offset = lcs_hdr->offset;
1863 		lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
1864 		lcs_hdr = (struct lcs_header *) (buffer->data + offset);
1865 	}
1866 	/* The buffer is now empty. Make it ready again. */
1867 	lcs_ready_buffer(&card->read, buffer);
1868 }
1869 
1870 /**
1871  * get network statistics for ifconfig and other user programs
1872  */
1873 static struct net_device_stats *
1874 lcs_getstats(struct net_device *dev)
1875 {
1876 	struct lcs_card *card;
1877 
1878 	LCS_DBF_TEXT(4, trace, "netstats");
1879 	card = (struct lcs_card *) dev->ml_priv;
1880 	return &card->stats;
1881 }
1882 
1883 /**
1884  * stop lcs device
1885  * This function will be called by user doing ifconfig xxx down
1886  */
1887 static int
1888 lcs_stop_device(struct net_device *dev)
1889 {
1890 	struct lcs_card *card;
1891 	int rc;
1892 
1893 	LCS_DBF_TEXT(2, trace, "stopdev");
1894 	card   = (struct lcs_card *) dev->ml_priv;
1895 	netif_carrier_off(dev);
1896 	netif_tx_disable(dev);
1897 	dev->flags &= ~IFF_UP;
1898 	wait_event(card->write.wait_q,
1899 		(card->write.state != LCS_CH_STATE_RUNNING));
1900 	rc = lcs_stopcard(card);
1901 	if (rc)
1902 		dev_err(&card->dev->dev,
1903 			" Shutting down the LCS device failed\n ");
1904 	return rc;
1905 }
1906 
1907 /**
1908  * start lcs device and make it runnable
1909  * This function will be called by user doing ifconfig xxx up
1910  */
1911 static int
1912 lcs_open_device(struct net_device *dev)
1913 {
1914 	struct lcs_card *card;
1915 	int rc;
1916 
1917 	LCS_DBF_TEXT(2, trace, "opendev");
1918 	card = (struct lcs_card *) dev->ml_priv;
1919 	/* initialize statistics */
1920 	rc = lcs_detect(card);
1921 	if (rc) {
1922 		pr_err("Error in opening device!\n");
1923 
1924 	} else {
1925 		dev->flags |= IFF_UP;
1926 		netif_carrier_on(dev);
1927 		netif_wake_queue(dev);
1928 		card->state = DEV_STATE_UP;
1929 	}
1930 	return rc;
1931 }
1932 
1933 /**
1934  * show function for portno called by cat or similar things
1935  */
1936 static ssize_t
1937 lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
1938 {
1939         struct lcs_card *card;
1940 
1941 	card = dev_get_drvdata(dev);
1942 
1943         if (!card)
1944                 return 0;
1945 
1946         return sprintf(buf, "%d\n", card->portno);
1947 }
1948 
1949 /**
1950  * store the value which is piped to file portno
1951  */
1952 static ssize_t
1953 lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1954 {
1955         struct lcs_card *card;
1956         int value;
1957 
1958 	card = dev_get_drvdata(dev);
1959 
1960         if (!card)
1961                 return 0;
1962 
1963         sscanf(buf, "%u", &value);
1964         /* TODO: sanity checks */
1965         card->portno = value;
1966 
1967         return count;
1968 
1969 }
1970 
1971 static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1972 
1973 static const char *lcs_type[] = {
1974 	"not a channel",
1975 	"2216 parallel",
1976 	"2216 channel",
1977 	"OSA LCS card",
1978 	"unknown channel type",
1979 	"unsupported channel type",
1980 };
1981 
1982 static ssize_t
1983 lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1984 {
1985 	struct ccwgroup_device *cgdev;
1986 
1987 	cgdev = to_ccwgroupdev(dev);
1988 	if (!cgdev)
1989 		return -ENODEV;
1990 
1991 	return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1992 }
1993 
1994 static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
1995 
1996 static ssize_t
1997 lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
1998 {
1999 	struct lcs_card *card;
2000 
2001 	card = dev_get_drvdata(dev);
2002 
2003 	return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
2004 }
2005 
2006 static ssize_t
2007 lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2008 {
2009         struct lcs_card *card;
2010         int value;
2011 
2012 	card = dev_get_drvdata(dev);
2013 
2014         if (!card)
2015                 return 0;
2016 
2017         sscanf(buf, "%u", &value);
2018         /* TODO: sanity checks */
2019         card->lancmd_timeout = value;
2020 
2021         return count;
2022 
2023 }
2024 
2025 static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
2026 
2027 static ssize_t
2028 lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
2029 		      const char *buf, size_t count)
2030 {
2031 	struct lcs_card *card = dev_get_drvdata(dev);
2032 	char *tmp;
2033 	int i;
2034 
2035 	if (!card)
2036 		return -EINVAL;
2037 	if (card->state != DEV_STATE_UP)
2038 		return -EPERM;
2039 	i = simple_strtoul(buf, &tmp, 16);
2040 	if (i == 1)
2041 		lcs_schedule_recovery(card);
2042 	return count;
2043 }
2044 
2045 static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
2046 
2047 static struct attribute * lcs_attrs[] = {
2048 	&dev_attr_portno.attr,
2049 	&dev_attr_type.attr,
2050 	&dev_attr_lancmd_timeout.attr,
2051 	&dev_attr_recover.attr,
2052 	NULL,
2053 };
2054 
2055 static struct attribute_group lcs_attr_group = {
2056 	.attrs = lcs_attrs,
2057 };
2058 
2059 /**
2060  * lcs_probe_device is called on establishing a new ccwgroup_device.
2061  */
2062 static int
2063 lcs_probe_device(struct ccwgroup_device *ccwgdev)
2064 {
2065 	struct lcs_card *card;
2066 	int ret;
2067 
2068 	if (!get_device(&ccwgdev->dev))
2069 		return -ENODEV;
2070 
2071 	LCS_DBF_TEXT(2, setup, "add_dev");
2072         card = lcs_alloc_card();
2073         if (!card) {
2074 		LCS_DBF_TEXT_(2, setup, "  rc%d", -ENOMEM);
2075 		put_device(&ccwgdev->dev);
2076                 return -ENOMEM;
2077         }
2078 	ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2079 	if (ret) {
2080 		lcs_free_card(card);
2081 		put_device(&ccwgdev->dev);
2082 		return ret;
2083         }
2084 	dev_set_drvdata(&ccwgdev->dev, card);
2085 	ccwgdev->cdev[0]->handler = lcs_irq;
2086 	ccwgdev->cdev[1]->handler = lcs_irq;
2087 	card->gdev = ccwgdev;
2088 	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
2089 	card->thread_start_mask = 0;
2090 	card->thread_allowed_mask = 0;
2091 	card->thread_running_mask = 0;
2092         return 0;
2093 }
2094 
2095 static int
2096 lcs_register_netdev(struct ccwgroup_device *ccwgdev)
2097 {
2098 	struct lcs_card *card;
2099 
2100 	LCS_DBF_TEXT(2, setup, "regnetdv");
2101 	card = dev_get_drvdata(&ccwgdev->dev);
2102 	if (card->dev->reg_state != NETREG_UNINITIALIZED)
2103 		return 0;
2104 	SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
2105 	return register_netdev(card->dev);
2106 }
2107 
2108 /**
2109  * lcs_new_device will be called by setting the group device online.
2110  */
2111 static const struct net_device_ops lcs_netdev_ops = {
2112 	.ndo_open		= lcs_open_device,
2113 	.ndo_stop		= lcs_stop_device,
2114 	.ndo_get_stats		= lcs_getstats,
2115 	.ndo_start_xmit		= lcs_start_xmit,
2116 };
2117 
2118 static const struct net_device_ops lcs_mc_netdev_ops = {
2119 	.ndo_open		= lcs_open_device,
2120 	.ndo_stop		= lcs_stop_device,
2121 	.ndo_get_stats		= lcs_getstats,
2122 	.ndo_start_xmit		= lcs_start_xmit,
2123 	.ndo_set_rx_mode	= lcs_set_multicast_list,
2124 };
2125 
2126 static int
2127 lcs_new_device(struct ccwgroup_device *ccwgdev)
2128 {
2129 	struct  lcs_card *card;
2130 	struct net_device *dev=NULL;
2131 	enum lcs_dev_states recover_state;
2132 	int rc;
2133 
2134 	card = dev_get_drvdata(&ccwgdev->dev);
2135 	if (!card)
2136 		return -ENODEV;
2137 
2138 	LCS_DBF_TEXT(2, setup, "newdev");
2139 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2140 	card->read.ccwdev  = ccwgdev->cdev[0];
2141 	card->write.ccwdev = ccwgdev->cdev[1];
2142 
2143 	recover_state = card->state;
2144 	rc = ccw_device_set_online(card->read.ccwdev);
2145 	if (rc)
2146 		goto out_err;
2147 	rc = ccw_device_set_online(card->write.ccwdev);
2148 	if (rc)
2149 		goto out_werr;
2150 
2151 	LCS_DBF_TEXT(3, setup, "lcsnewdv");
2152 
2153 	lcs_setup_card(card);
2154 	rc = lcs_detect(card);
2155 	if (rc) {
2156 		LCS_DBF_TEXT(2, setup, "dtctfail");
2157 		dev_err(&card->dev->dev,
2158 			"Detecting a network adapter for LCS devices"
2159 			" failed with rc=%d (0x%x)\n", rc, rc);
2160 		lcs_stopcard(card);
2161 		goto out;
2162 	}
2163 	if (card->dev) {
2164 		LCS_DBF_TEXT(2, setup, "samedev");
2165 		LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2166 		goto netdev_out;
2167 	}
2168 	switch (card->lan_type) {
2169 #ifdef CONFIG_ETHERNET
2170 	case LCS_FRAME_TYPE_ENET:
2171 		card->lan_type_trans = eth_type_trans;
2172 		dev = alloc_etherdev(0);
2173 		break;
2174 #endif
2175 #ifdef CONFIG_TR
2176 	case LCS_FRAME_TYPE_TR:
2177 		card->lan_type_trans = tr_type_trans;
2178 		dev = alloc_trdev(0);
2179 		break;
2180 #endif
2181 #ifdef CONFIG_FDDI
2182 	case LCS_FRAME_TYPE_FDDI:
2183 		card->lan_type_trans = fddi_type_trans;
2184 		dev = alloc_fddidev(0);
2185 		break;
2186 #endif
2187 	default:
2188 		LCS_DBF_TEXT(3, setup, "errinit");
2189 		pr_err(" Initialization failed\n");
2190 		goto out;
2191 	}
2192 	if (!dev)
2193 		goto out;
2194 	card->dev = dev;
2195 	card->dev->ml_priv = card;
2196 	card->dev->netdev_ops = &lcs_netdev_ops;
2197 	memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
2198 #ifdef CONFIG_IP_MULTICAST
2199 	if (!lcs_check_multicast_support(card))
2200 		card->dev->netdev_ops = &lcs_mc_netdev_ops;
2201 #endif
2202 netdev_out:
2203 	lcs_set_allowed_threads(card,0xffffffff);
2204 	if (recover_state == DEV_STATE_RECOVER) {
2205 		lcs_set_multicast_list(card->dev);
2206 		card->dev->flags |= IFF_UP;
2207 		netif_carrier_on(card->dev);
2208 		netif_wake_queue(card->dev);
2209 		card->state = DEV_STATE_UP;
2210 	} else {
2211 		lcs_stopcard(card);
2212 	}
2213 
2214 	if (lcs_register_netdev(ccwgdev) != 0)
2215 		goto out;
2216 
2217 	/* Print out supported assists: IPv6 */
2218 	pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
2219 		(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
2220 		"with" : "without");
2221 	/* Print out supported assist: Multicast */
2222 	pr_info("LCS device %s %s Multicast support\n", card->dev->name,
2223 		(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
2224 		"with" : "without");
2225 	return 0;
2226 out:
2227 
2228 	ccw_device_set_offline(card->write.ccwdev);
2229 out_werr:
2230 	ccw_device_set_offline(card->read.ccwdev);
2231 out_err:
2232 	return -ENODEV;
2233 }
2234 
2235 /**
2236  * lcs_shutdown_device, called when setting the group device offline.
2237  */
2238 static int
2239 __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
2240 {
2241 	struct lcs_card *card;
2242 	enum lcs_dev_states recover_state;
2243 	int ret = 0, ret2 = 0, ret3 = 0;
2244 
2245 	LCS_DBF_TEXT(3, setup, "shtdndev");
2246 	card = dev_get_drvdata(&ccwgdev->dev);
2247 	if (!card)
2248 		return -ENODEV;
2249 	if (recovery_mode == 0) {
2250 		lcs_set_allowed_threads(card, 0);
2251 		if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
2252 			return -ERESTARTSYS;
2253 	}
2254 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2255 	recover_state = card->state;
2256 
2257 	ret = lcs_stop_device(card->dev);
2258 	ret2 = ccw_device_set_offline(card->read.ccwdev);
2259 	ret3 = ccw_device_set_offline(card->write.ccwdev);
2260 	if (!ret)
2261 		ret = (ret2) ? ret2 : ret3;
2262 	if (ret)
2263 		LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
2264 	if (recover_state == DEV_STATE_UP) {
2265 		card->state = DEV_STATE_RECOVER;
2266 	}
2267 	return 0;
2268 }
2269 
2270 static int
2271 lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
2272 {
2273 	return __lcs_shutdown_device(ccwgdev, 0);
2274 }
2275 
2276 /**
2277  * drive lcs recovery after startup and startlan initiated by Lan Gateway
2278  */
2279 static int
2280 lcs_recovery(void *ptr)
2281 {
2282 	struct lcs_card *card;
2283 	struct ccwgroup_device *gdev;
2284         int rc;
2285 
2286 	card = (struct lcs_card *) ptr;
2287 
2288 	LCS_DBF_TEXT(4, trace, "recover1");
2289 	if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
2290 		return 0;
2291 	LCS_DBF_TEXT(4, trace, "recover2");
2292 	gdev = card->gdev;
2293 	dev_warn(&gdev->dev,
2294 		"A recovery process has been started for the LCS device\n");
2295 	rc = __lcs_shutdown_device(gdev, 1);
2296 	rc = lcs_new_device(gdev);
2297 	if (!rc)
2298 		pr_info("Device %s successfully recovered!\n",
2299 			card->dev->name);
2300 	else
2301 		pr_info("Device %s could not be recovered!\n",
2302 			card->dev->name);
2303 	lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
2304 	return 0;
2305 }
2306 
2307 /**
2308  * lcs_remove_device, free buffers and card
2309  */
2310 static void
2311 lcs_remove_device(struct ccwgroup_device *ccwgdev)
2312 {
2313 	struct lcs_card *card;
2314 
2315 	card = dev_get_drvdata(&ccwgdev->dev);
2316 	if (!card)
2317 		return;
2318 
2319 	LCS_DBF_TEXT(3, setup, "remdev");
2320 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
2321 	if (ccwgdev->state == CCWGROUP_ONLINE) {
2322 		lcs_shutdown_device(ccwgdev);
2323 	}
2324 	if (card->dev)
2325 		unregister_netdev(card->dev);
2326 	sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2327 	lcs_cleanup_card(card);
2328 	lcs_free_card(card);
2329 	put_device(&ccwgdev->dev);
2330 }
2331 
2332 static int lcs_pm_suspend(struct lcs_card *card)
2333 {
2334 	if (card->dev)
2335 		netif_device_detach(card->dev);
2336 	lcs_set_allowed_threads(card, 0);
2337 	lcs_wait_for_threads(card, 0xffffffff);
2338 	if (card->state != DEV_STATE_DOWN)
2339 		__lcs_shutdown_device(card->gdev, 1);
2340 	return 0;
2341 }
2342 
2343 static int lcs_pm_resume(struct lcs_card *card)
2344 {
2345 	int rc = 0;
2346 
2347 	if (card->state == DEV_STATE_RECOVER)
2348 		rc = lcs_new_device(card->gdev);
2349 	if (card->dev)
2350 		netif_device_attach(card->dev);
2351 	if (rc) {
2352 		dev_warn(&card->gdev->dev, "The lcs device driver "
2353 			"failed to recover the device\n");
2354 	}
2355 	return rc;
2356 }
2357 
2358 static int lcs_prepare(struct ccwgroup_device *gdev)
2359 {
2360 	return 0;
2361 }
2362 
2363 static void lcs_complete(struct ccwgroup_device *gdev)
2364 {
2365 	return;
2366 }
2367 
2368 static int lcs_freeze(struct ccwgroup_device *gdev)
2369 {
2370 	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2371 	return lcs_pm_suspend(card);
2372 }
2373 
2374 static int lcs_thaw(struct ccwgroup_device *gdev)
2375 {
2376 	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2377 	return lcs_pm_resume(card);
2378 }
2379 
2380 static int lcs_restore(struct ccwgroup_device *gdev)
2381 {
2382 	struct lcs_card *card = dev_get_drvdata(&gdev->dev);
2383 	return lcs_pm_resume(card);
2384 }
2385 
2386 static struct ccw_device_id lcs_ids[] = {
2387 	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2388 	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2389 	{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2390 	{},
2391 };
2392 MODULE_DEVICE_TABLE(ccw, lcs_ids);
2393 
2394 static struct ccw_driver lcs_ccw_driver = {
2395 	.driver = {
2396 		.owner	= THIS_MODULE,
2397 		.name	= "lcs",
2398 	},
2399 	.ids	= lcs_ids,
2400 	.probe	= ccwgroup_probe_ccwdev,
2401 	.remove	= ccwgroup_remove_ccwdev,
2402 	.int_class = IOINT_LCS,
2403 };
2404 
2405 /**
2406  * LCS ccwgroup driver registration
2407  */
2408 static struct ccwgroup_driver lcs_group_driver = {
2409 	.driver = {
2410 		.owner	= THIS_MODULE,
2411 		.name	= "lcs",
2412 	},
2413 	.max_slaves  = 2,
2414 	.driver_id   = 0xD3C3E2,
2415 	.probe       = lcs_probe_device,
2416 	.remove      = lcs_remove_device,
2417 	.set_online  = lcs_new_device,
2418 	.set_offline = lcs_shutdown_device,
2419 	.prepare     = lcs_prepare,
2420 	.complete    = lcs_complete,
2421 	.freeze	     = lcs_freeze,
2422 	.thaw	     = lcs_thaw,
2423 	.restore     = lcs_restore,
2424 };
2425 
2426 static ssize_t
2427 lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2428 		       size_t count)
2429 {
2430 	int err;
2431 	err = ccwgroup_create_from_string(lcs_root_dev,
2432 					  lcs_group_driver.driver_id,
2433 					  &lcs_ccw_driver, 2, buf);
2434 	return err ? err : count;
2435 }
2436 
2437 static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2438 
2439 static struct attribute *lcs_group_attrs[] = {
2440 	&driver_attr_group.attr,
2441 	NULL,
2442 };
2443 
2444 static struct attribute_group lcs_group_attr_group = {
2445 	.attrs = lcs_group_attrs,
2446 };
2447 
2448 static const struct attribute_group *lcs_group_attr_groups[] = {
2449 	&lcs_group_attr_group,
2450 	NULL,
2451 };
2452 
2453 /**
2454  *  LCS Module/Kernel initialization function
2455  */
2456 static int
2457 __init lcs_init_module(void)
2458 {
2459 	int rc;
2460 
2461 	pr_info("Loading %s\n", version);
2462 	rc = lcs_register_debug_facility();
2463 	LCS_DBF_TEXT(0, setup, "lcsinit");
2464 	if (rc)
2465 		goto out_err;
2466 	lcs_root_dev = root_device_register("lcs");
2467 	rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2468 	if (rc)
2469 		goto register_err;
2470 	rc = ccw_driver_register(&lcs_ccw_driver);
2471 	if (rc)
2472 		goto ccw_err;
2473 	lcs_group_driver.driver.groups = lcs_group_attr_groups;
2474 	rc = ccwgroup_driver_register(&lcs_group_driver);
2475 	if (rc)
2476 		goto ccwgroup_err;
2477 	return 0;
2478 
2479 ccwgroup_err:
2480 	ccw_driver_unregister(&lcs_ccw_driver);
2481 ccw_err:
2482 	root_device_unregister(lcs_root_dev);
2483 register_err:
2484 	lcs_unregister_debug_facility();
2485 out_err:
2486 	pr_err("Initializing the lcs device driver failed\n");
2487 	return rc;
2488 }
2489 
2490 
2491 /**
2492  *  LCS module cleanup function
2493  */
2494 static void
2495 __exit lcs_cleanup_module(void)
2496 {
2497 	pr_info("Terminating lcs module.\n");
2498 	LCS_DBF_TEXT(0, trace, "cleanup");
2499 	driver_remove_file(&lcs_group_driver.driver,
2500 			   &driver_attr_group);
2501 	ccwgroup_driver_unregister(&lcs_group_driver);
2502 	ccw_driver_unregister(&lcs_ccw_driver);
2503 	root_device_unregister(lcs_root_dev);
2504 	lcs_unregister_debug_facility();
2505 }
2506 
2507 module_init(lcs_init_module);
2508 module_exit(lcs_cleanup_module);
2509 
2510 MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
2511 MODULE_LICENSE("GPL");
2512 
2513