1 /* 2 * Linux for S/390 Lan Channel Station Network Driver 3 * 4 * Copyright IBM Corp. 1999, 2009 5 * Author(s): Original Code written by 6 * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com> 7 * Rewritten by 8 * Frank Pavlic <fpavlic@de.ibm.com> and 9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26 #define KMSG_COMPONENT "lcs" 27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28 29 #include <linux/module.h> 30 #include <linux/if.h> 31 #include <linux/netdevice.h> 32 #include <linux/etherdevice.h> 33 #include <linux/fddidevice.h> 34 #include <linux/inetdevice.h> 35 #include <linux/in.h> 36 #include <linux/igmp.h> 37 #include <linux/delay.h> 38 #include <linux/kthread.h> 39 #include <linux/slab.h> 40 #include <net/arp.h> 41 #include <net/ip.h> 42 43 #include <asm/debug.h> 44 #include <asm/idals.h> 45 #include <asm/timex.h> 46 #include <linux/device.h> 47 #include <asm/ccwgroup.h> 48 49 #include "lcs.h" 50 51 52 #if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI) 53 #error Cannot compile lcs.c without some net devices switched on. 54 #endif 55 56 /** 57 * initialization string for output 58 */ 59 60 static char version[] __initdata = "LCS driver"; 61 62 /** 63 * the root device for lcs group devices 64 */ 65 static struct device *lcs_root_dev; 66 67 /** 68 * Some prototypes. 69 */ 70 static void lcs_tasklet(unsigned long); 71 static void lcs_start_kernel_thread(struct work_struct *); 72 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); 73 #ifdef CONFIG_IP_MULTICAST 74 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); 75 #endif /* CONFIG_IP_MULTICAST */ 76 static int lcs_recovery(void *ptr); 77 78 /** 79 * Debug Facility Stuff 80 */ 81 static char debug_buffer[255]; 82 static debug_info_t *lcs_dbf_setup; 83 static debug_info_t *lcs_dbf_trace; 84 85 /** 86 * LCS Debug Facility functions 87 */ 88 static void 89 lcs_unregister_debug_facility(void) 90 { 91 debug_unregister(lcs_dbf_setup); 92 debug_unregister(lcs_dbf_trace); 93 } 94 95 static int 96 lcs_register_debug_facility(void) 97 { 98 lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); 99 lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); 100 if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { 101 pr_err("Not enough memory for debug facility.\n"); 102 lcs_unregister_debug_facility(); 103 return -ENOMEM; 104 } 105 debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); 106 debug_set_level(lcs_dbf_setup, 2); 107 debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); 108 debug_set_level(lcs_dbf_trace, 2); 109 return 0; 110 } 111 112 /** 113 * Allocate io buffers. 114 */ 115 static int 116 lcs_alloc_channel(struct lcs_channel *channel) 117 { 118 int cnt; 119 120 LCS_DBF_TEXT(2, setup, "ichalloc"); 121 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { 122 /* alloc memory fo iobuffer */ 123 channel->iob[cnt].data = 124 kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); 125 if (channel->iob[cnt].data == NULL) 126 break; 127 channel->iob[cnt].state = LCS_BUF_STATE_EMPTY; 128 } 129 if (cnt < LCS_NUM_BUFFS) { 130 /* Not all io buffers could be allocated. */ 131 LCS_DBF_TEXT(2, setup, "echalloc"); 132 while (cnt-- > 0) 133 kfree(channel->iob[cnt].data); 134 return -ENOMEM; 135 } 136 return 0; 137 } 138 139 /** 140 * Free io buffers. 141 */ 142 static void 143 lcs_free_channel(struct lcs_channel *channel) 144 { 145 int cnt; 146 147 LCS_DBF_TEXT(2, setup, "ichfree"); 148 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { 149 kfree(channel->iob[cnt].data); 150 channel->iob[cnt].data = NULL; 151 } 152 } 153 154 /* 155 * Cleanup channel. 156 */ 157 static void 158 lcs_cleanup_channel(struct lcs_channel *channel) 159 { 160 LCS_DBF_TEXT(3, setup, "cleanch"); 161 /* Kill write channel tasklets. */ 162 tasklet_kill(&channel->irq_tasklet); 163 /* Free channel buffers. */ 164 lcs_free_channel(channel); 165 } 166 167 /** 168 * LCS free memory for card and channels. 169 */ 170 static void 171 lcs_free_card(struct lcs_card *card) 172 { 173 LCS_DBF_TEXT(2, setup, "remcard"); 174 LCS_DBF_HEX(2, setup, &card, sizeof(void*)); 175 kfree(card); 176 } 177 178 /** 179 * LCS alloc memory for card and channels 180 */ 181 static struct lcs_card * 182 lcs_alloc_card(void) 183 { 184 struct lcs_card *card; 185 int rc; 186 187 LCS_DBF_TEXT(2, setup, "alloclcs"); 188 189 card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); 190 if (card == NULL) 191 return NULL; 192 card->lan_type = LCS_FRAME_TYPE_AUTO; 193 card->pkt_seq = 0; 194 card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; 195 /* Allocate io buffers for the read channel. */ 196 rc = lcs_alloc_channel(&card->read); 197 if (rc){ 198 LCS_DBF_TEXT(2, setup, "iccwerr"); 199 lcs_free_card(card); 200 return NULL; 201 } 202 /* Allocate io buffers for the write channel. */ 203 rc = lcs_alloc_channel(&card->write); 204 if (rc) { 205 LCS_DBF_TEXT(2, setup, "iccwerr"); 206 lcs_cleanup_channel(&card->read); 207 lcs_free_card(card); 208 return NULL; 209 } 210 211 #ifdef CONFIG_IP_MULTICAST 212 INIT_LIST_HEAD(&card->ipm_list); 213 #endif 214 LCS_DBF_HEX(2, setup, &card, sizeof(void*)); 215 return card; 216 } 217 218 /* 219 * Setup read channel. 220 */ 221 static void 222 lcs_setup_read_ccws(struct lcs_card *card) 223 { 224 int cnt; 225 226 LCS_DBF_TEXT(2, setup, "ireadccw"); 227 /* Setup read ccws. */ 228 memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1)); 229 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { 230 card->read.ccws[cnt].cmd_code = LCS_CCW_READ; 231 card->read.ccws[cnt].count = LCS_IOBUFFERSIZE; 232 card->read.ccws[cnt].flags = 233 CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI; 234 /* 235 * Note: we have allocated the buffer with GFP_DMA, so 236 * we do not need to do set_normalized_cda. 237 */ 238 card->read.ccws[cnt].cda = 239 (__u32) __pa(card->read.iob[cnt].data); 240 ((struct lcs_header *) 241 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; 242 card->read.iob[cnt].callback = lcs_get_frames_cb; 243 card->read.iob[cnt].state = LCS_BUF_STATE_READY; 244 card->read.iob[cnt].count = LCS_IOBUFFERSIZE; 245 } 246 card->read.ccws[0].flags &= ~CCW_FLAG_PCI; 247 card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI; 248 card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND; 249 /* Last ccw is a tic (transfer in channel). */ 250 card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; 251 card->read.ccws[LCS_NUM_BUFFS].cda = 252 (__u32) __pa(card->read.ccws); 253 /* Setg initial state of the read channel. */ 254 card->read.state = LCS_CH_STATE_INIT; 255 256 card->read.io_idx = 0; 257 card->read.buf_idx = 0; 258 } 259 260 static void 261 lcs_setup_read(struct lcs_card *card) 262 { 263 LCS_DBF_TEXT(3, setup, "initread"); 264 265 lcs_setup_read_ccws(card); 266 /* Initialize read channel tasklet. */ 267 card->read.irq_tasklet.data = (unsigned long) &card->read; 268 card->read.irq_tasklet.func = lcs_tasklet; 269 /* Initialize waitqueue. */ 270 init_waitqueue_head(&card->read.wait_q); 271 } 272 273 /* 274 * Setup write channel. 275 */ 276 static void 277 lcs_setup_write_ccws(struct lcs_card *card) 278 { 279 int cnt; 280 281 LCS_DBF_TEXT(3, setup, "iwritccw"); 282 /* Setup write ccws. */ 283 memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1)); 284 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { 285 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; 286 card->write.ccws[cnt].count = 0; 287 card->write.ccws[cnt].flags = 288 CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI; 289 /* 290 * Note: we have allocated the buffer with GFP_DMA, so 291 * we do not need to do set_normalized_cda. 292 */ 293 card->write.ccws[cnt].cda = 294 (__u32) __pa(card->write.iob[cnt].data); 295 } 296 /* Last ccw is a tic (transfer in channel). */ 297 card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; 298 card->write.ccws[LCS_NUM_BUFFS].cda = 299 (__u32) __pa(card->write.ccws); 300 /* Set initial state of the write channel. */ 301 card->read.state = LCS_CH_STATE_INIT; 302 303 card->write.io_idx = 0; 304 card->write.buf_idx = 0; 305 } 306 307 static void 308 lcs_setup_write(struct lcs_card *card) 309 { 310 LCS_DBF_TEXT(3, setup, "initwrit"); 311 312 lcs_setup_write_ccws(card); 313 /* Initialize write channel tasklet. */ 314 card->write.irq_tasklet.data = (unsigned long) &card->write; 315 card->write.irq_tasklet.func = lcs_tasklet; 316 /* Initialize waitqueue. */ 317 init_waitqueue_head(&card->write.wait_q); 318 } 319 320 static void 321 lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) 322 { 323 unsigned long flags; 324 325 spin_lock_irqsave(&card->mask_lock, flags); 326 card->thread_allowed_mask = threads; 327 spin_unlock_irqrestore(&card->mask_lock, flags); 328 wake_up(&card->wait_q); 329 } 330 static int lcs_threads_running(struct lcs_card *card, unsigned long threads) 331 { 332 unsigned long flags; 333 int rc = 0; 334 335 spin_lock_irqsave(&card->mask_lock, flags); 336 rc = (card->thread_running_mask & threads); 337 spin_unlock_irqrestore(&card->mask_lock, flags); 338 return rc; 339 } 340 341 static int 342 lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) 343 { 344 return wait_event_interruptible(card->wait_q, 345 lcs_threads_running(card, threads) == 0); 346 } 347 348 static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) 349 { 350 unsigned long flags; 351 352 spin_lock_irqsave(&card->mask_lock, flags); 353 if ( !(card->thread_allowed_mask & thread) || 354 (card->thread_start_mask & thread) ) { 355 spin_unlock_irqrestore(&card->mask_lock, flags); 356 return -EPERM; 357 } 358 card->thread_start_mask |= thread; 359 spin_unlock_irqrestore(&card->mask_lock, flags); 360 return 0; 361 } 362 363 static void 364 lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) 365 { 366 unsigned long flags; 367 368 spin_lock_irqsave(&card->mask_lock, flags); 369 card->thread_running_mask &= ~thread; 370 spin_unlock_irqrestore(&card->mask_lock, flags); 371 wake_up(&card->wait_q); 372 } 373 374 static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) 375 { 376 unsigned long flags; 377 int rc = 0; 378 379 spin_lock_irqsave(&card->mask_lock, flags); 380 if (card->thread_start_mask & thread){ 381 if ((card->thread_allowed_mask & thread) && 382 !(card->thread_running_mask & thread)){ 383 rc = 1; 384 card->thread_start_mask &= ~thread; 385 card->thread_running_mask |= thread; 386 } else 387 rc = -EPERM; 388 } 389 spin_unlock_irqrestore(&card->mask_lock, flags); 390 return rc; 391 } 392 393 static int 394 lcs_do_run_thread(struct lcs_card *card, unsigned long thread) 395 { 396 int rc = 0; 397 wait_event(card->wait_q, 398 (rc = __lcs_do_run_thread(card, thread)) >= 0); 399 return rc; 400 } 401 402 static int 403 lcs_do_start_thread(struct lcs_card *card, unsigned long thread) 404 { 405 unsigned long flags; 406 int rc = 0; 407 408 spin_lock_irqsave(&card->mask_lock, flags); 409 LCS_DBF_TEXT_(4, trace, " %02x%02x%02x", 410 (u8) card->thread_start_mask, 411 (u8) card->thread_allowed_mask, 412 (u8) card->thread_running_mask); 413 rc = (card->thread_start_mask & thread); 414 spin_unlock_irqrestore(&card->mask_lock, flags); 415 return rc; 416 } 417 418 /** 419 * Initialize channels,card and state machines. 420 */ 421 static void 422 lcs_setup_card(struct lcs_card *card) 423 { 424 LCS_DBF_TEXT(2, setup, "initcard"); 425 LCS_DBF_HEX(2, setup, &card, sizeof(void*)); 426 427 lcs_setup_read(card); 428 lcs_setup_write(card); 429 /* Set cards initial state. */ 430 card->state = DEV_STATE_DOWN; 431 card->tx_buffer = NULL; 432 card->tx_emitted = 0; 433 434 init_waitqueue_head(&card->wait_q); 435 spin_lock_init(&card->lock); 436 spin_lock_init(&card->ipm_lock); 437 spin_lock_init(&card->mask_lock); 438 #ifdef CONFIG_IP_MULTICAST 439 INIT_LIST_HEAD(&card->ipm_list); 440 #endif 441 INIT_LIST_HEAD(&card->lancmd_waiters); 442 } 443 444 static void lcs_clear_multicast_list(struct lcs_card *card) 445 { 446 #ifdef CONFIG_IP_MULTICAST 447 struct lcs_ipm_list *ipm; 448 unsigned long flags; 449 450 /* Free multicast list. */ 451 LCS_DBF_TEXT(3, setup, "clmclist"); 452 spin_lock_irqsave(&card->ipm_lock, flags); 453 while (!list_empty(&card->ipm_list)){ 454 ipm = list_entry(card->ipm_list.next, 455 struct lcs_ipm_list, list); 456 list_del(&ipm->list); 457 if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){ 458 spin_unlock_irqrestore(&card->ipm_lock, flags); 459 lcs_send_delipm(card, ipm); 460 spin_lock_irqsave(&card->ipm_lock, flags); 461 } 462 kfree(ipm); 463 } 464 spin_unlock_irqrestore(&card->ipm_lock, flags); 465 #endif 466 } 467 /** 468 * Cleanup channels,card and state machines. 469 */ 470 static void 471 lcs_cleanup_card(struct lcs_card *card) 472 { 473 474 LCS_DBF_TEXT(3, setup, "cleancrd"); 475 LCS_DBF_HEX(2,setup,&card,sizeof(void*)); 476 477 if (card->dev != NULL) 478 free_netdev(card->dev); 479 /* Cleanup channels. */ 480 lcs_cleanup_channel(&card->write); 481 lcs_cleanup_channel(&card->read); 482 } 483 484 /** 485 * Start channel. 486 */ 487 static int 488 lcs_start_channel(struct lcs_channel *channel) 489 { 490 unsigned long flags; 491 int rc; 492 493 LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev)); 494 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 495 rc = ccw_device_start(channel->ccwdev, 496 channel->ccws + channel->io_idx, 0, 0, 497 DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND); 498 if (rc == 0) 499 channel->state = LCS_CH_STATE_RUNNING; 500 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 501 if (rc) { 502 LCS_DBF_TEXT_(4,trace,"essh%s", 503 dev_name(&channel->ccwdev->dev)); 504 dev_err(&channel->ccwdev->dev, 505 "Starting an LCS device resulted in an error," 506 " rc=%d!\n", rc); 507 } 508 return rc; 509 } 510 511 static int 512 lcs_clear_channel(struct lcs_channel *channel) 513 { 514 unsigned long flags; 515 int rc; 516 517 LCS_DBF_TEXT(4,trace,"clearch"); 518 LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); 519 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 520 rc = ccw_device_clear(channel->ccwdev, (addr_t) channel); 521 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 522 if (rc) { 523 LCS_DBF_TEXT_(4, trace, "ecsc%s", 524 dev_name(&channel->ccwdev->dev)); 525 return rc; 526 } 527 wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED)); 528 channel->state = LCS_CH_STATE_STOPPED; 529 return rc; 530 } 531 532 533 /** 534 * Stop channel. 535 */ 536 static int 537 lcs_stop_channel(struct lcs_channel *channel) 538 { 539 unsigned long flags; 540 int rc; 541 542 if (channel->state == LCS_CH_STATE_STOPPED) 543 return 0; 544 LCS_DBF_TEXT(4,trace,"haltsch"); 545 LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); 546 channel->state = LCS_CH_STATE_INIT; 547 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 548 rc = ccw_device_halt(channel->ccwdev, (addr_t) channel); 549 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 550 if (rc) { 551 LCS_DBF_TEXT_(4, trace, "ehsc%s", 552 dev_name(&channel->ccwdev->dev)); 553 return rc; 554 } 555 /* Asynchronous halt initialted. Wait for its completion. */ 556 wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED)); 557 lcs_clear_channel(channel); 558 return 0; 559 } 560 561 /** 562 * start read and write channel 563 */ 564 static int 565 lcs_start_channels(struct lcs_card *card) 566 { 567 int rc; 568 569 LCS_DBF_TEXT(2, trace, "chstart"); 570 /* start read channel */ 571 rc = lcs_start_channel(&card->read); 572 if (rc) 573 return rc; 574 /* start write channel */ 575 rc = lcs_start_channel(&card->write); 576 if (rc) 577 lcs_stop_channel(&card->read); 578 return rc; 579 } 580 581 /** 582 * stop read and write channel 583 */ 584 static int 585 lcs_stop_channels(struct lcs_card *card) 586 { 587 LCS_DBF_TEXT(2, trace, "chhalt"); 588 lcs_stop_channel(&card->read); 589 lcs_stop_channel(&card->write); 590 return 0; 591 } 592 593 /** 594 * Get empty buffer. 595 */ 596 static struct lcs_buffer * 597 __lcs_get_buffer(struct lcs_channel *channel) 598 { 599 int index; 600 601 LCS_DBF_TEXT(5, trace, "_getbuff"); 602 index = channel->io_idx; 603 do { 604 if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) { 605 channel->iob[index].state = LCS_BUF_STATE_LOCKED; 606 return channel->iob + index; 607 } 608 index = (index + 1) & (LCS_NUM_BUFFS - 1); 609 } while (index != channel->io_idx); 610 return NULL; 611 } 612 613 static struct lcs_buffer * 614 lcs_get_buffer(struct lcs_channel *channel) 615 { 616 struct lcs_buffer *buffer; 617 unsigned long flags; 618 619 LCS_DBF_TEXT(5, trace, "getbuff"); 620 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 621 buffer = __lcs_get_buffer(channel); 622 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 623 return buffer; 624 } 625 626 /** 627 * Resume channel program if the channel is suspended. 628 */ 629 static int 630 __lcs_resume_channel(struct lcs_channel *channel) 631 { 632 int rc; 633 634 if (channel->state != LCS_CH_STATE_SUSPENDED) 635 return 0; 636 if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) 637 return 0; 638 LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev)); 639 rc = ccw_device_resume(channel->ccwdev); 640 if (rc) { 641 LCS_DBF_TEXT_(4, trace, "ersc%s", 642 dev_name(&channel->ccwdev->dev)); 643 dev_err(&channel->ccwdev->dev, 644 "Sending data from the LCS device to the LAN failed" 645 " with rc=%d\n",rc); 646 } else 647 channel->state = LCS_CH_STATE_RUNNING; 648 return rc; 649 650 } 651 652 /** 653 * Make a buffer ready for processing. 654 */ 655 static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) 656 { 657 int prev, next; 658 659 LCS_DBF_TEXT(5, trace, "rdybits"); 660 prev = (index - 1) & (LCS_NUM_BUFFS - 1); 661 next = (index + 1) & (LCS_NUM_BUFFS - 1); 662 /* Check if we may clear the suspend bit of this buffer. */ 663 if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) { 664 /* Check if we have to set the PCI bit. */ 665 if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND)) 666 /* Suspend bit of the previous buffer is not set. */ 667 channel->ccws[index].flags |= CCW_FLAG_PCI; 668 /* Suspend bit of the next buffer is set. */ 669 channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND; 670 } 671 } 672 673 static int 674 lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) 675 { 676 unsigned long flags; 677 int index, rc; 678 679 LCS_DBF_TEXT(5, trace, "rdybuff"); 680 BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && 681 buffer->state != LCS_BUF_STATE_PROCESSED); 682 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 683 buffer->state = LCS_BUF_STATE_READY; 684 index = buffer - channel->iob; 685 /* Set length. */ 686 channel->ccws[index].count = buffer->count; 687 /* Check relevant PCI/suspend bits. */ 688 __lcs_ready_buffer_bits(channel, index); 689 rc = __lcs_resume_channel(channel); 690 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 691 return rc; 692 } 693 694 /** 695 * Mark the buffer as processed. Take care of the suspend bit 696 * of the previous buffer. This function is called from 697 * interrupt context, so the lock must not be taken. 698 */ 699 static int 700 __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) 701 { 702 int index, prev, next; 703 704 LCS_DBF_TEXT(5, trace, "prcsbuff"); 705 BUG_ON(buffer->state != LCS_BUF_STATE_READY); 706 buffer->state = LCS_BUF_STATE_PROCESSED; 707 index = buffer - channel->iob; 708 prev = (index - 1) & (LCS_NUM_BUFFS - 1); 709 next = (index + 1) & (LCS_NUM_BUFFS - 1); 710 /* Set the suspend bit and clear the PCI bit of this buffer. */ 711 channel->ccws[index].flags |= CCW_FLAG_SUSPEND; 712 channel->ccws[index].flags &= ~CCW_FLAG_PCI; 713 /* Check the suspend bit of the previous buffer. */ 714 if (channel->iob[prev].state == LCS_BUF_STATE_READY) { 715 /* 716 * Previous buffer is in state ready. It might have 717 * happened in lcs_ready_buffer that the suspend bit 718 * has not been cleared to avoid an endless loop. 719 * Do it now. 720 */ 721 __lcs_ready_buffer_bits(channel, prev); 722 } 723 /* Clear PCI bit of next buffer. */ 724 channel->ccws[next].flags &= ~CCW_FLAG_PCI; 725 return __lcs_resume_channel(channel); 726 } 727 728 /** 729 * Put a processed buffer back to state empty. 730 */ 731 static void 732 lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) 733 { 734 unsigned long flags; 735 736 LCS_DBF_TEXT(5, trace, "relbuff"); 737 BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && 738 buffer->state != LCS_BUF_STATE_PROCESSED); 739 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 740 buffer->state = LCS_BUF_STATE_EMPTY; 741 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 742 } 743 744 /** 745 * Get buffer for a lan command. 746 */ 747 static struct lcs_buffer * 748 lcs_get_lancmd(struct lcs_card *card, int count) 749 { 750 struct lcs_buffer *buffer; 751 struct lcs_cmd *cmd; 752 753 LCS_DBF_TEXT(4, trace, "getlncmd"); 754 /* Get buffer and wait if none is available. */ 755 wait_event(card->write.wait_q, 756 ((buffer = lcs_get_buffer(&card->write)) != NULL)); 757 count += sizeof(struct lcs_header); 758 *(__u16 *)(buffer->data + count) = 0; 759 buffer->count = count + sizeof(__u16); 760 buffer->callback = lcs_release_buffer; 761 cmd = (struct lcs_cmd *) buffer->data; 762 cmd->offset = count; 763 cmd->type = LCS_FRAME_TYPE_CONTROL; 764 cmd->slot = 0; 765 return buffer; 766 } 767 768 769 static void 770 lcs_get_reply(struct lcs_reply *reply) 771 { 772 WARN_ON(atomic_read(&reply->refcnt) <= 0); 773 atomic_inc(&reply->refcnt); 774 } 775 776 static void 777 lcs_put_reply(struct lcs_reply *reply) 778 { 779 WARN_ON(atomic_read(&reply->refcnt) <= 0); 780 if (atomic_dec_and_test(&reply->refcnt)) { 781 kfree(reply); 782 } 783 784 } 785 786 static struct lcs_reply * 787 lcs_alloc_reply(struct lcs_cmd *cmd) 788 { 789 struct lcs_reply *reply; 790 791 LCS_DBF_TEXT(4, trace, "getreply"); 792 793 reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); 794 if (!reply) 795 return NULL; 796 atomic_set(&reply->refcnt,1); 797 reply->sequence_no = cmd->sequence_no; 798 reply->received = 0; 799 reply->rc = 0; 800 init_waitqueue_head(&reply->wait_q); 801 802 return reply; 803 } 804 805 /** 806 * Notifier function for lancmd replies. Called from read irq. 807 */ 808 static void 809 lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) 810 { 811 struct list_head *l, *n; 812 struct lcs_reply *reply; 813 814 LCS_DBF_TEXT(4, trace, "notiwait"); 815 spin_lock(&card->lock); 816 list_for_each_safe(l, n, &card->lancmd_waiters) { 817 reply = list_entry(l, struct lcs_reply, list); 818 if (reply->sequence_no == cmd->sequence_no) { 819 lcs_get_reply(reply); 820 list_del_init(&reply->list); 821 if (reply->callback != NULL) 822 reply->callback(card, cmd); 823 reply->received = 1; 824 reply->rc = cmd->return_code; 825 wake_up(&reply->wait_q); 826 lcs_put_reply(reply); 827 break; 828 } 829 } 830 spin_unlock(&card->lock); 831 } 832 833 /** 834 * Emit buffer of a lan command. 835 */ 836 static void 837 lcs_lancmd_timeout(struct timer_list *t) 838 { 839 struct lcs_reply *reply = from_timer(reply, t, timer); 840 struct lcs_reply *list_reply, *r; 841 unsigned long flags; 842 843 LCS_DBF_TEXT(4, trace, "timeout"); 844 spin_lock_irqsave(&reply->card->lock, flags); 845 list_for_each_entry_safe(list_reply, r, 846 &reply->card->lancmd_waiters,list) { 847 if (reply == list_reply) { 848 lcs_get_reply(reply); 849 list_del_init(&reply->list); 850 spin_unlock_irqrestore(&reply->card->lock, flags); 851 reply->received = 1; 852 reply->rc = -ETIME; 853 wake_up(&reply->wait_q); 854 lcs_put_reply(reply); 855 return; 856 } 857 } 858 spin_unlock_irqrestore(&reply->card->lock, flags); 859 } 860 861 static int 862 lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, 863 void (*reply_callback)(struct lcs_card *, struct lcs_cmd *)) 864 { 865 struct lcs_reply *reply; 866 struct lcs_cmd *cmd; 867 unsigned long flags; 868 int rc; 869 870 LCS_DBF_TEXT(4, trace, "sendcmd"); 871 cmd = (struct lcs_cmd *) buffer->data; 872 cmd->return_code = 0; 873 cmd->sequence_no = card->sequence_no++; 874 reply = lcs_alloc_reply(cmd); 875 if (!reply) 876 return -ENOMEM; 877 reply->callback = reply_callback; 878 reply->card = card; 879 spin_lock_irqsave(&card->lock, flags); 880 list_add_tail(&reply->list, &card->lancmd_waiters); 881 spin_unlock_irqrestore(&card->lock, flags); 882 883 buffer->callback = lcs_release_buffer; 884 rc = lcs_ready_buffer(&card->write, buffer); 885 if (rc) 886 return rc; 887 timer_setup(&reply->timer, lcs_lancmd_timeout, 0); 888 mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout); 889 wait_event(reply->wait_q, reply->received); 890 del_timer_sync(&reply->timer); 891 LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); 892 rc = reply->rc; 893 lcs_put_reply(reply); 894 return rc ? -EIO : 0; 895 } 896 897 /** 898 * LCS startup command 899 */ 900 static int 901 lcs_send_startup(struct lcs_card *card, __u8 initiator) 902 { 903 struct lcs_buffer *buffer; 904 struct lcs_cmd *cmd; 905 906 LCS_DBF_TEXT(2, trace, "startup"); 907 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); 908 cmd = (struct lcs_cmd *) buffer->data; 909 cmd->cmd_code = LCS_CMD_STARTUP; 910 cmd->initiator = initiator; 911 cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE; 912 return lcs_send_lancmd(card, buffer, NULL); 913 } 914 915 /** 916 * LCS shutdown command 917 */ 918 static int 919 lcs_send_shutdown(struct lcs_card *card) 920 { 921 struct lcs_buffer *buffer; 922 struct lcs_cmd *cmd; 923 924 LCS_DBF_TEXT(2, trace, "shutdown"); 925 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); 926 cmd = (struct lcs_cmd *) buffer->data; 927 cmd->cmd_code = LCS_CMD_SHUTDOWN; 928 cmd->initiator = LCS_INITIATOR_TCPIP; 929 return lcs_send_lancmd(card, buffer, NULL); 930 } 931 932 /** 933 * LCS lanstat command 934 */ 935 static void 936 __lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd) 937 { 938 LCS_DBF_TEXT(2, trace, "statcb"); 939 memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH); 940 } 941 942 static int 943 lcs_send_lanstat(struct lcs_card *card) 944 { 945 struct lcs_buffer *buffer; 946 struct lcs_cmd *cmd; 947 948 LCS_DBF_TEXT(2,trace, "cmdstat"); 949 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); 950 cmd = (struct lcs_cmd *) buffer->data; 951 /* Setup lanstat command. */ 952 cmd->cmd_code = LCS_CMD_LANSTAT; 953 cmd->initiator = LCS_INITIATOR_TCPIP; 954 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; 955 cmd->cmd.lcs_std_cmd.portno = card->portno; 956 return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb); 957 } 958 959 /** 960 * send stoplan command 961 */ 962 static int 963 lcs_send_stoplan(struct lcs_card *card, __u8 initiator) 964 { 965 struct lcs_buffer *buffer; 966 struct lcs_cmd *cmd; 967 968 LCS_DBF_TEXT(2, trace, "cmdstpln"); 969 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); 970 cmd = (struct lcs_cmd *) buffer->data; 971 cmd->cmd_code = LCS_CMD_STOPLAN; 972 cmd->initiator = initiator; 973 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; 974 cmd->cmd.lcs_std_cmd.portno = card->portno; 975 return lcs_send_lancmd(card, buffer, NULL); 976 } 977 978 /** 979 * send startlan command 980 */ 981 static void 982 __lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd) 983 { 984 LCS_DBF_TEXT(2, trace, "srtlancb"); 985 card->lan_type = cmd->cmd.lcs_std_cmd.lan_type; 986 card->portno = cmd->cmd.lcs_std_cmd.portno; 987 } 988 989 static int 990 lcs_send_startlan(struct lcs_card *card, __u8 initiator) 991 { 992 struct lcs_buffer *buffer; 993 struct lcs_cmd *cmd; 994 995 LCS_DBF_TEXT(2, trace, "cmdstaln"); 996 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); 997 cmd = (struct lcs_cmd *) buffer->data; 998 cmd->cmd_code = LCS_CMD_STARTLAN; 999 cmd->initiator = initiator; 1000 cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; 1001 cmd->cmd.lcs_std_cmd.portno = card->portno; 1002 return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb); 1003 } 1004 1005 #ifdef CONFIG_IP_MULTICAST 1006 /** 1007 * send setipm command (Multicast) 1008 */ 1009 static int 1010 lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) 1011 { 1012 struct lcs_buffer *buffer; 1013 struct lcs_cmd *cmd; 1014 1015 LCS_DBF_TEXT(2, trace, "cmdsetim"); 1016 buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); 1017 cmd = (struct lcs_cmd *) buffer->data; 1018 cmd->cmd_code = LCS_CMD_SETIPM; 1019 cmd->initiator = LCS_INITIATOR_TCPIP; 1020 cmd->cmd.lcs_qipassist.lan_type = card->lan_type; 1021 cmd->cmd.lcs_qipassist.portno = card->portno; 1022 cmd->cmd.lcs_qipassist.version = 4; 1023 cmd->cmd.lcs_qipassist.num_ip_pairs = 1; 1024 memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, 1025 &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); 1026 LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); 1027 return lcs_send_lancmd(card, buffer, NULL); 1028 } 1029 1030 /** 1031 * send delipm command (Multicast) 1032 */ 1033 static int 1034 lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) 1035 { 1036 struct lcs_buffer *buffer; 1037 struct lcs_cmd *cmd; 1038 1039 LCS_DBF_TEXT(2, trace, "cmddelim"); 1040 buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); 1041 cmd = (struct lcs_cmd *) buffer->data; 1042 cmd->cmd_code = LCS_CMD_DELIPM; 1043 cmd->initiator = LCS_INITIATOR_TCPIP; 1044 cmd->cmd.lcs_qipassist.lan_type = card->lan_type; 1045 cmd->cmd.lcs_qipassist.portno = card->portno; 1046 cmd->cmd.lcs_qipassist.version = 4; 1047 cmd->cmd.lcs_qipassist.num_ip_pairs = 1; 1048 memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, 1049 &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); 1050 LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); 1051 return lcs_send_lancmd(card, buffer, NULL); 1052 } 1053 1054 /** 1055 * check if multicast is supported by LCS 1056 */ 1057 static void 1058 __lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd) 1059 { 1060 LCS_DBF_TEXT(2, trace, "chkmccb"); 1061 card->ip_assists_supported = 1062 cmd->cmd.lcs_qipassist.ip_assists_supported; 1063 card->ip_assists_enabled = 1064 cmd->cmd.lcs_qipassist.ip_assists_enabled; 1065 } 1066 1067 static int 1068 lcs_check_multicast_support(struct lcs_card *card) 1069 { 1070 struct lcs_buffer *buffer; 1071 struct lcs_cmd *cmd; 1072 int rc; 1073 1074 LCS_DBF_TEXT(2, trace, "cmdqipa"); 1075 /* Send query ipassist. */ 1076 buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); 1077 cmd = (struct lcs_cmd *) buffer->data; 1078 cmd->cmd_code = LCS_CMD_QIPASSIST; 1079 cmd->initiator = LCS_INITIATOR_TCPIP; 1080 cmd->cmd.lcs_qipassist.lan_type = card->lan_type; 1081 cmd->cmd.lcs_qipassist.portno = card->portno; 1082 cmd->cmd.lcs_qipassist.version = 4; 1083 cmd->cmd.lcs_qipassist.num_ip_pairs = 1; 1084 rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); 1085 if (rc != 0) { 1086 pr_err("Query IPAssist failed. Assuming unsupported!\n"); 1087 return -EOPNOTSUPP; 1088 } 1089 if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) 1090 return 0; 1091 return -EOPNOTSUPP; 1092 } 1093 1094 /** 1095 * set or del multicast address on LCS card 1096 */ 1097 static void 1098 lcs_fix_multicast_list(struct lcs_card *card) 1099 { 1100 struct list_head failed_list; 1101 struct lcs_ipm_list *ipm, *tmp; 1102 unsigned long flags; 1103 int rc; 1104 1105 LCS_DBF_TEXT(4,trace, "fixipm"); 1106 INIT_LIST_HEAD(&failed_list); 1107 spin_lock_irqsave(&card->ipm_lock, flags); 1108 list_modified: 1109 list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ 1110 switch (ipm->ipm_state) { 1111 case LCS_IPM_STATE_SET_REQUIRED: 1112 /* del from ipm_list so no one else can tamper with 1113 * this entry */ 1114 list_del_init(&ipm->list); 1115 spin_unlock_irqrestore(&card->ipm_lock, flags); 1116 rc = lcs_send_setipm(card, ipm); 1117 spin_lock_irqsave(&card->ipm_lock, flags); 1118 if (rc) { 1119 pr_info("Adding multicast address failed." 1120 " Table possibly full!\n"); 1121 /* store ipm in failed list -> will be added 1122 * to ipm_list again, so a retry will be done 1123 * during the next call of this function */ 1124 list_add_tail(&ipm->list, &failed_list); 1125 } else { 1126 ipm->ipm_state = LCS_IPM_STATE_ON_CARD; 1127 /* re-insert into ipm_list */ 1128 list_add_tail(&ipm->list, &card->ipm_list); 1129 } 1130 goto list_modified; 1131 case LCS_IPM_STATE_DEL_REQUIRED: 1132 list_del(&ipm->list); 1133 spin_unlock_irqrestore(&card->ipm_lock, flags); 1134 lcs_send_delipm(card, ipm); 1135 spin_lock_irqsave(&card->ipm_lock, flags); 1136 kfree(ipm); 1137 goto list_modified; 1138 case LCS_IPM_STATE_ON_CARD: 1139 break; 1140 } 1141 } 1142 /* re-insert all entries from the failed_list into ipm_list */ 1143 list_for_each_entry_safe(ipm, tmp, &failed_list, list) 1144 list_move_tail(&ipm->list, &card->ipm_list); 1145 1146 spin_unlock_irqrestore(&card->ipm_lock, flags); 1147 } 1148 1149 /** 1150 * get mac address for the relevant Multicast address 1151 */ 1152 static void 1153 lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) 1154 { 1155 LCS_DBF_TEXT(4,trace, "getmac"); 1156 ip_eth_mc_map(ipm, mac); 1157 } 1158 1159 /** 1160 * function called by net device to handle multicast address relevant things 1161 */ 1162 static void lcs_remove_mc_addresses(struct lcs_card *card, 1163 struct in_device *in4_dev) 1164 { 1165 struct ip_mc_list *im4; 1166 struct list_head *l; 1167 struct lcs_ipm_list *ipm; 1168 unsigned long flags; 1169 char buf[MAX_ADDR_LEN]; 1170 1171 LCS_DBF_TEXT(4, trace, "remmclst"); 1172 spin_lock_irqsave(&card->ipm_lock, flags); 1173 list_for_each(l, &card->ipm_list) { 1174 ipm = list_entry(l, struct lcs_ipm_list, list); 1175 for (im4 = rcu_dereference(in4_dev->mc_list); 1176 im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) { 1177 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1178 if ( (ipm->ipm.ip_addr == im4->multiaddr) && 1179 (memcmp(buf, &ipm->ipm.mac_addr, 1180 LCS_MAC_LENGTH) == 0) ) 1181 break; 1182 } 1183 if (im4 == NULL) 1184 ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED; 1185 } 1186 spin_unlock_irqrestore(&card->ipm_lock, flags); 1187 } 1188 1189 static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, 1190 struct ip_mc_list *im4, 1191 char *buf) 1192 { 1193 struct lcs_ipm_list *tmp, *ipm = NULL; 1194 struct list_head *l; 1195 unsigned long flags; 1196 1197 LCS_DBF_TEXT(4, trace, "chkmcent"); 1198 spin_lock_irqsave(&card->ipm_lock, flags); 1199 list_for_each(l, &card->ipm_list) { 1200 tmp = list_entry(l, struct lcs_ipm_list, list); 1201 if ( (tmp->ipm.ip_addr == im4->multiaddr) && 1202 (memcmp(buf, &tmp->ipm.mac_addr, 1203 LCS_MAC_LENGTH) == 0) ) { 1204 ipm = tmp; 1205 break; 1206 } 1207 } 1208 spin_unlock_irqrestore(&card->ipm_lock, flags); 1209 return ipm; 1210 } 1211 1212 static void lcs_set_mc_addresses(struct lcs_card *card, 1213 struct in_device *in4_dev) 1214 { 1215 1216 struct ip_mc_list *im4; 1217 struct lcs_ipm_list *ipm; 1218 char buf[MAX_ADDR_LEN]; 1219 unsigned long flags; 1220 1221 LCS_DBF_TEXT(4, trace, "setmclst"); 1222 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; 1223 im4 = rcu_dereference(im4->next_rcu)) { 1224 lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); 1225 ipm = lcs_check_addr_entry(card, im4, buf); 1226 if (ipm != NULL) 1227 continue; /* Address already in list. */ 1228 ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); 1229 if (ipm == NULL) { 1230 pr_info("Not enough memory to add" 1231 " new multicast entry!\n"); 1232 break; 1233 } 1234 memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); 1235 ipm->ipm.ip_addr = im4->multiaddr; 1236 ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED; 1237 spin_lock_irqsave(&card->ipm_lock, flags); 1238 LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4); 1239 list_add(&ipm->list, &card->ipm_list); 1240 spin_unlock_irqrestore(&card->ipm_lock, flags); 1241 } 1242 } 1243 1244 static int 1245 lcs_register_mc_addresses(void *data) 1246 { 1247 struct lcs_card *card; 1248 struct in_device *in4_dev; 1249 1250 card = (struct lcs_card *) data; 1251 1252 if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD)) 1253 return 0; 1254 LCS_DBF_TEXT(4, trace, "regmulti"); 1255 1256 in4_dev = in_dev_get(card->dev); 1257 if (in4_dev == NULL) 1258 goto out; 1259 rcu_read_lock(); 1260 lcs_remove_mc_addresses(card,in4_dev); 1261 lcs_set_mc_addresses(card, in4_dev); 1262 rcu_read_unlock(); 1263 in_dev_put(in4_dev); 1264 1265 netif_carrier_off(card->dev); 1266 netif_tx_disable(card->dev); 1267 wait_event(card->write.wait_q, 1268 (card->write.state != LCS_CH_STATE_RUNNING)); 1269 lcs_fix_multicast_list(card); 1270 if (card->state == DEV_STATE_UP) { 1271 netif_carrier_on(card->dev); 1272 netif_wake_queue(card->dev); 1273 } 1274 out: 1275 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); 1276 return 0; 1277 } 1278 #endif /* CONFIG_IP_MULTICAST */ 1279 1280 /** 1281 * function called by net device to 1282 * handle multicast address relevant things 1283 */ 1284 static void 1285 lcs_set_multicast_list(struct net_device *dev) 1286 { 1287 #ifdef CONFIG_IP_MULTICAST 1288 struct lcs_card *card; 1289 1290 LCS_DBF_TEXT(4, trace, "setmulti"); 1291 card = (struct lcs_card *) dev->ml_priv; 1292 1293 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1294 schedule_work(&card->kernel_thread_starter); 1295 #endif /* CONFIG_IP_MULTICAST */ 1296 } 1297 1298 static long 1299 lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) 1300 { 1301 if (!IS_ERR(irb)) 1302 return 0; 1303 1304 switch (PTR_ERR(irb)) { 1305 case -EIO: 1306 dev_warn(&cdev->dev, 1307 "An I/O-error occurred on the LCS device\n"); 1308 LCS_DBF_TEXT(2, trace, "ckirberr"); 1309 LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); 1310 break; 1311 case -ETIMEDOUT: 1312 dev_warn(&cdev->dev, 1313 "A command timed out on the LCS device\n"); 1314 LCS_DBF_TEXT(2, trace, "ckirberr"); 1315 LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); 1316 break; 1317 default: 1318 dev_warn(&cdev->dev, 1319 "An error occurred on the LCS device, rc=%ld\n", 1320 PTR_ERR(irb)); 1321 LCS_DBF_TEXT(2, trace, "ckirberr"); 1322 LCS_DBF_TEXT(2, trace, " rc???"); 1323 } 1324 return PTR_ERR(irb); 1325 } 1326 1327 static int 1328 lcs_get_problem(struct ccw_device *cdev, struct irb *irb) 1329 { 1330 int dstat, cstat; 1331 char *sense; 1332 1333 sense = (char *) irb->ecw; 1334 cstat = irb->scsw.cmd.cstat; 1335 dstat = irb->scsw.cmd.dstat; 1336 1337 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1338 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1339 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1340 LCS_DBF_TEXT(2, trace, "CGENCHK"); 1341 return 1; 1342 } 1343 if (dstat & DEV_STAT_UNIT_CHECK) { 1344 if (sense[LCS_SENSE_BYTE_1] & 1345 LCS_SENSE_RESETTING_EVENT) { 1346 LCS_DBF_TEXT(2, trace, "REVIND"); 1347 return 1; 1348 } 1349 if (sense[LCS_SENSE_BYTE_0] & 1350 LCS_SENSE_CMD_REJECT) { 1351 LCS_DBF_TEXT(2, trace, "CMDREJ"); 1352 return 0; 1353 } 1354 if ((!sense[LCS_SENSE_BYTE_0]) && 1355 (!sense[LCS_SENSE_BYTE_1]) && 1356 (!sense[LCS_SENSE_BYTE_2]) && 1357 (!sense[LCS_SENSE_BYTE_3])) { 1358 LCS_DBF_TEXT(2, trace, "ZEROSEN"); 1359 return 0; 1360 } 1361 LCS_DBF_TEXT(2, trace, "DGENCHK"); 1362 return 1; 1363 } 1364 return 0; 1365 } 1366 1367 static void 1368 lcs_schedule_recovery(struct lcs_card *card) 1369 { 1370 LCS_DBF_TEXT(2, trace, "startrec"); 1371 if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD)) 1372 schedule_work(&card->kernel_thread_starter); 1373 } 1374 1375 /** 1376 * IRQ Handler for LCS channels 1377 */ 1378 static void 1379 lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1380 { 1381 struct lcs_card *card; 1382 struct lcs_channel *channel; 1383 int rc, index; 1384 int cstat, dstat; 1385 1386 if (lcs_check_irb_error(cdev, irb)) 1387 return; 1388 1389 card = CARD_FROM_DEV(cdev); 1390 if (card->read.ccwdev == cdev) 1391 channel = &card->read; 1392 else 1393 channel = &card->write; 1394 1395 cstat = irb->scsw.cmd.cstat; 1396 dstat = irb->scsw.cmd.dstat; 1397 LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev)); 1398 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, 1399 irb->scsw.cmd.dstat); 1400 LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, 1401 irb->scsw.cmd.actl); 1402 1403 /* Check for channel and device errors presented */ 1404 rc = lcs_get_problem(cdev, irb); 1405 if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { 1406 dev_warn(&cdev->dev, 1407 "The LCS device stopped because of an error," 1408 " dstat=0x%X, cstat=0x%X \n", 1409 dstat, cstat); 1410 if (rc) { 1411 channel->state = LCS_CH_STATE_ERROR; 1412 } 1413 } 1414 if (channel->state == LCS_CH_STATE_ERROR) { 1415 lcs_schedule_recovery(card); 1416 wake_up(&card->wait_q); 1417 return; 1418 } 1419 /* How far in the ccw chain have we processed? */ 1420 if ((channel->state != LCS_CH_STATE_INIT) && 1421 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1422 (irb->scsw.cmd.cpa != 0)) { 1423 index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) 1424 - channel->ccws; 1425 if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || 1426 (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) 1427 /* Bloody io subsystem tells us lies about cpa... */ 1428 index = (index - 1) & (LCS_NUM_BUFFS - 1); 1429 while (channel->io_idx != index) { 1430 __lcs_processed_buffer(channel, 1431 channel->iob + channel->io_idx); 1432 channel->io_idx = 1433 (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1); 1434 } 1435 } 1436 1437 if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || 1438 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || 1439 (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) 1440 /* Mark channel as stopped. */ 1441 channel->state = LCS_CH_STATE_STOPPED; 1442 else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) 1443 /* CCW execution stopped on a suspend bit. */ 1444 channel->state = LCS_CH_STATE_SUSPENDED; 1445 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1446 if (irb->scsw.cmd.cc != 0) { 1447 ccw_device_halt(channel->ccwdev, (addr_t) channel); 1448 return; 1449 } 1450 /* The channel has been stopped by halt_IO. */ 1451 channel->state = LCS_CH_STATE_HALTED; 1452 } 1453 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) 1454 channel->state = LCS_CH_STATE_CLEARED; 1455 /* Do the rest in the tasklet. */ 1456 tasklet_schedule(&channel->irq_tasklet); 1457 } 1458 1459 /** 1460 * Tasklet for IRQ handler 1461 */ 1462 static void 1463 lcs_tasklet(unsigned long data) 1464 { 1465 unsigned long flags; 1466 struct lcs_channel *channel; 1467 struct lcs_buffer *iob; 1468 int buf_idx; 1469 1470 channel = (struct lcs_channel *) data; 1471 LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); 1472 1473 /* Check for processed buffers. */ 1474 iob = channel->iob; 1475 buf_idx = channel->buf_idx; 1476 while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) { 1477 /* Do the callback thing. */ 1478 if (iob[buf_idx].callback != NULL) 1479 iob[buf_idx].callback(channel, iob + buf_idx); 1480 buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1); 1481 } 1482 channel->buf_idx = buf_idx; 1483 1484 if (channel->state == LCS_CH_STATE_STOPPED) 1485 lcs_start_channel(channel); 1486 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1487 if (channel->state == LCS_CH_STATE_SUSPENDED && 1488 channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) 1489 __lcs_resume_channel(channel); 1490 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1491 1492 /* Something happened on the channel. Wake up waiters. */ 1493 wake_up(&channel->wait_q); 1494 } 1495 1496 /** 1497 * Finish current tx buffer and make it ready for transmit. 1498 */ 1499 static void 1500 __lcs_emit_txbuffer(struct lcs_card *card) 1501 { 1502 LCS_DBF_TEXT(5, trace, "emittx"); 1503 *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0; 1504 card->tx_buffer->count += 2; 1505 lcs_ready_buffer(&card->write, card->tx_buffer); 1506 card->tx_buffer = NULL; 1507 card->tx_emitted++; 1508 } 1509 1510 /** 1511 * Callback for finished tx buffers. 1512 */ 1513 static void 1514 lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) 1515 { 1516 struct lcs_card *card; 1517 1518 LCS_DBF_TEXT(5, trace, "txbuffcb"); 1519 /* Put buffer back to pool. */ 1520 lcs_release_buffer(channel, buffer); 1521 card = container_of(channel, struct lcs_card, write); 1522 if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev)) 1523 netif_wake_queue(card->dev); 1524 spin_lock(&card->lock); 1525 card->tx_emitted--; 1526 if (card->tx_emitted <= 0 && card->tx_buffer != NULL) 1527 /* 1528 * Last running tx buffer has finished. Submit partially 1529 * filled current buffer. 1530 */ 1531 __lcs_emit_txbuffer(card); 1532 spin_unlock(&card->lock); 1533 } 1534 1535 /** 1536 * Packet transmit function called by network stack 1537 */ 1538 static int 1539 __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, 1540 struct net_device *dev) 1541 { 1542 struct lcs_header *header; 1543 int rc = NETDEV_TX_OK; 1544 1545 LCS_DBF_TEXT(5, trace, "hardxmit"); 1546 if (skb == NULL) { 1547 card->stats.tx_dropped++; 1548 card->stats.tx_errors++; 1549 return NETDEV_TX_OK; 1550 } 1551 if (card->state != DEV_STATE_UP) { 1552 dev_kfree_skb(skb); 1553 card->stats.tx_dropped++; 1554 card->stats.tx_errors++; 1555 card->stats.tx_carrier_errors++; 1556 return NETDEV_TX_OK; 1557 } 1558 if (skb->protocol == htons(ETH_P_IPV6)) { 1559 dev_kfree_skb(skb); 1560 return NETDEV_TX_OK; 1561 } 1562 netif_stop_queue(card->dev); 1563 spin_lock(&card->lock); 1564 if (card->tx_buffer != NULL && 1565 card->tx_buffer->count + sizeof(struct lcs_header) + 1566 skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) 1567 /* skb too big for current tx buffer. */ 1568 __lcs_emit_txbuffer(card); 1569 if (card->tx_buffer == NULL) { 1570 /* Get new tx buffer */ 1571 card->tx_buffer = lcs_get_buffer(&card->write); 1572 if (card->tx_buffer == NULL) { 1573 card->stats.tx_dropped++; 1574 rc = NETDEV_TX_BUSY; 1575 goto out; 1576 } 1577 card->tx_buffer->callback = lcs_txbuffer_cb; 1578 card->tx_buffer->count = 0; 1579 } 1580 header = (struct lcs_header *) 1581 (card->tx_buffer->data + card->tx_buffer->count); 1582 card->tx_buffer->count += skb->len + sizeof(struct lcs_header); 1583 header->offset = card->tx_buffer->count; 1584 header->type = card->lan_type; 1585 header->slot = card->portno; 1586 skb_copy_from_linear_data(skb, header + 1, skb->len); 1587 spin_unlock(&card->lock); 1588 card->stats.tx_bytes += skb->len; 1589 card->stats.tx_packets++; 1590 dev_kfree_skb(skb); 1591 netif_wake_queue(card->dev); 1592 spin_lock(&card->lock); 1593 if (card->tx_emitted <= 0 && card->tx_buffer != NULL) 1594 /* If this is the first tx buffer emit it immediately. */ 1595 __lcs_emit_txbuffer(card); 1596 out: 1597 spin_unlock(&card->lock); 1598 return rc; 1599 } 1600 1601 static int 1602 lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) 1603 { 1604 struct lcs_card *card; 1605 int rc; 1606 1607 LCS_DBF_TEXT(5, trace, "pktxmit"); 1608 card = (struct lcs_card *) dev->ml_priv; 1609 rc = __lcs_start_xmit(card, skb, dev); 1610 return rc; 1611 } 1612 1613 /** 1614 * send startlan and lanstat command to make LCS device ready 1615 */ 1616 static int 1617 lcs_startlan_auto(struct lcs_card *card) 1618 { 1619 int rc; 1620 1621 LCS_DBF_TEXT(2, trace, "strtauto"); 1622 #ifdef CONFIG_ETHERNET 1623 card->lan_type = LCS_FRAME_TYPE_ENET; 1624 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1625 if (rc == 0) 1626 return 0; 1627 1628 #endif 1629 #ifdef CONFIG_FDDI 1630 card->lan_type = LCS_FRAME_TYPE_FDDI; 1631 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1632 if (rc == 0) 1633 return 0; 1634 #endif 1635 return -EIO; 1636 } 1637 1638 static int 1639 lcs_startlan(struct lcs_card *card) 1640 { 1641 int rc, i; 1642 1643 LCS_DBF_TEXT(2, trace, "startlan"); 1644 rc = 0; 1645 if (card->portno != LCS_INVALID_PORT_NO) { 1646 if (card->lan_type == LCS_FRAME_TYPE_AUTO) 1647 rc = lcs_startlan_auto(card); 1648 else 1649 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1650 } else { 1651 for (i = 0; i <= 16; i++) { 1652 card->portno = i; 1653 if (card->lan_type != LCS_FRAME_TYPE_AUTO) 1654 rc = lcs_send_startlan(card, 1655 LCS_INITIATOR_TCPIP); 1656 else 1657 /* autodetecting lan type */ 1658 rc = lcs_startlan_auto(card); 1659 if (rc == 0) 1660 break; 1661 } 1662 } 1663 if (rc == 0) 1664 return lcs_send_lanstat(card); 1665 return rc; 1666 } 1667 1668 /** 1669 * LCS detect function 1670 * setup channels and make them I/O ready 1671 */ 1672 static int 1673 lcs_detect(struct lcs_card *card) 1674 { 1675 int rc = 0; 1676 1677 LCS_DBF_TEXT(2, setup, "lcsdetct"); 1678 /* start/reset card */ 1679 if (card->dev) 1680 netif_stop_queue(card->dev); 1681 rc = lcs_stop_channels(card); 1682 if (rc == 0) { 1683 rc = lcs_start_channels(card); 1684 if (rc == 0) { 1685 rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP); 1686 if (rc == 0) 1687 rc = lcs_startlan(card); 1688 } 1689 } 1690 if (rc == 0) { 1691 card->state = DEV_STATE_UP; 1692 } else { 1693 card->state = DEV_STATE_DOWN; 1694 card->write.state = LCS_CH_STATE_INIT; 1695 card->read.state = LCS_CH_STATE_INIT; 1696 } 1697 return rc; 1698 } 1699 1700 /** 1701 * LCS Stop card 1702 */ 1703 static int 1704 lcs_stopcard(struct lcs_card *card) 1705 { 1706 int rc; 1707 1708 LCS_DBF_TEXT(3, setup, "stopcard"); 1709 1710 if (card->read.state != LCS_CH_STATE_STOPPED && 1711 card->write.state != LCS_CH_STATE_STOPPED && 1712 card->read.state != LCS_CH_STATE_ERROR && 1713 card->write.state != LCS_CH_STATE_ERROR && 1714 card->state == DEV_STATE_UP) { 1715 lcs_clear_multicast_list(card); 1716 rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); 1717 rc = lcs_send_shutdown(card); 1718 } 1719 rc = lcs_stop_channels(card); 1720 card->state = DEV_STATE_DOWN; 1721 1722 return rc; 1723 } 1724 1725 /** 1726 * Kernel Thread helper functions for LGW initiated commands 1727 */ 1728 static void 1729 lcs_start_kernel_thread(struct work_struct *work) 1730 { 1731 struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); 1732 LCS_DBF_TEXT(5, trace, "krnthrd"); 1733 if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) 1734 kthread_run(lcs_recovery, card, "lcs_recover"); 1735 #ifdef CONFIG_IP_MULTICAST 1736 if (lcs_do_start_thread(card, LCS_SET_MC_THREAD)) 1737 kthread_run(lcs_register_mc_addresses, card, "regipm"); 1738 #endif 1739 } 1740 1741 /** 1742 * Process control frames. 1743 */ 1744 static void 1745 lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) 1746 { 1747 LCS_DBF_TEXT(5, trace, "getctrl"); 1748 if (cmd->initiator == LCS_INITIATOR_LGW) { 1749 switch(cmd->cmd_code) { 1750 case LCS_CMD_STARTUP: 1751 case LCS_CMD_STARTLAN: 1752 lcs_schedule_recovery(card); 1753 break; 1754 case LCS_CMD_STOPLAN: 1755 pr_warn("Stoplan for %s initiated by LGW\n", 1756 card->dev->name); 1757 if (card->dev) 1758 netif_carrier_off(card->dev); 1759 break; 1760 default: 1761 LCS_DBF_TEXT(5, trace, "noLGWcmd"); 1762 break; 1763 } 1764 } else 1765 lcs_notify_lancmd_waiters(card, cmd); 1766 } 1767 1768 /** 1769 * Unpack network packet. 1770 */ 1771 static void 1772 lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) 1773 { 1774 struct sk_buff *skb; 1775 1776 LCS_DBF_TEXT(5, trace, "getskb"); 1777 if (card->dev == NULL || 1778 card->state != DEV_STATE_UP) 1779 /* The card isn't up. Ignore the packet. */ 1780 return; 1781 1782 skb = dev_alloc_skb(skb_len); 1783 if (skb == NULL) { 1784 dev_err(&card->dev->dev, 1785 " Allocating a socket buffer to interface %s failed\n", 1786 card->dev->name); 1787 card->stats.rx_dropped++; 1788 return; 1789 } 1790 skb_put_data(skb, skb_data, skb_len); 1791 skb->protocol = card->lan_type_trans(skb, card->dev); 1792 card->stats.rx_bytes += skb_len; 1793 card->stats.rx_packets++; 1794 if (skb->protocol == htons(ETH_P_802_2)) 1795 *((__u32 *)skb->cb) = ++card->pkt_seq; 1796 netif_rx(skb); 1797 } 1798 1799 /** 1800 * LCS main routine to get packets and lancmd replies from the buffers 1801 */ 1802 static void 1803 lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) 1804 { 1805 struct lcs_card *card; 1806 struct lcs_header *lcs_hdr; 1807 __u16 offset; 1808 1809 LCS_DBF_TEXT(5, trace, "lcsgtpkt"); 1810 lcs_hdr = (struct lcs_header *) buffer->data; 1811 if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) { 1812 LCS_DBF_TEXT(4, trace, "-eiogpkt"); 1813 return; 1814 } 1815 card = container_of(channel, struct lcs_card, read); 1816 offset = 0; 1817 while (lcs_hdr->offset != 0) { 1818 if (lcs_hdr->offset <= 0 || 1819 lcs_hdr->offset > LCS_IOBUFFERSIZE || 1820 lcs_hdr->offset < offset) { 1821 /* Offset invalid. */ 1822 card->stats.rx_length_errors++; 1823 card->stats.rx_errors++; 1824 return; 1825 } 1826 /* What kind of frame is it? */ 1827 if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) 1828 /* Control frame. */ 1829 lcs_get_control(card, (struct lcs_cmd *) lcs_hdr); 1830 else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET || 1831 lcs_hdr->type == LCS_FRAME_TYPE_TR || 1832 lcs_hdr->type == LCS_FRAME_TYPE_FDDI) 1833 /* Normal network packet. */ 1834 lcs_get_skb(card, (char *)(lcs_hdr + 1), 1835 lcs_hdr->offset - offset - 1836 sizeof(struct lcs_header)); 1837 else 1838 /* Unknown frame type. */ 1839 ; // FIXME: error message ? 1840 /* Proceed to next frame. */ 1841 offset = lcs_hdr->offset; 1842 lcs_hdr->offset = LCS_ILLEGAL_OFFSET; 1843 lcs_hdr = (struct lcs_header *) (buffer->data + offset); 1844 } 1845 /* The buffer is now empty. Make it ready again. */ 1846 lcs_ready_buffer(&card->read, buffer); 1847 } 1848 1849 /** 1850 * get network statistics for ifconfig and other user programs 1851 */ 1852 static struct net_device_stats * 1853 lcs_getstats(struct net_device *dev) 1854 { 1855 struct lcs_card *card; 1856 1857 LCS_DBF_TEXT(4, trace, "netstats"); 1858 card = (struct lcs_card *) dev->ml_priv; 1859 return &card->stats; 1860 } 1861 1862 /** 1863 * stop lcs device 1864 * This function will be called by user doing ifconfig xxx down 1865 */ 1866 static int 1867 lcs_stop_device(struct net_device *dev) 1868 { 1869 struct lcs_card *card; 1870 int rc; 1871 1872 LCS_DBF_TEXT(2, trace, "stopdev"); 1873 card = (struct lcs_card *) dev->ml_priv; 1874 netif_carrier_off(dev); 1875 netif_tx_disable(dev); 1876 dev->flags &= ~IFF_UP; 1877 wait_event(card->write.wait_q, 1878 (card->write.state != LCS_CH_STATE_RUNNING)); 1879 rc = lcs_stopcard(card); 1880 if (rc) 1881 dev_err(&card->dev->dev, 1882 " Shutting down the LCS device failed\n"); 1883 return rc; 1884 } 1885 1886 /** 1887 * start lcs device and make it runnable 1888 * This function will be called by user doing ifconfig xxx up 1889 */ 1890 static int 1891 lcs_open_device(struct net_device *dev) 1892 { 1893 struct lcs_card *card; 1894 int rc; 1895 1896 LCS_DBF_TEXT(2, trace, "opendev"); 1897 card = (struct lcs_card *) dev->ml_priv; 1898 /* initialize statistics */ 1899 rc = lcs_detect(card); 1900 if (rc) { 1901 pr_err("Error in opening device!\n"); 1902 1903 } else { 1904 dev->flags |= IFF_UP; 1905 netif_carrier_on(dev); 1906 netif_wake_queue(dev); 1907 card->state = DEV_STATE_UP; 1908 } 1909 return rc; 1910 } 1911 1912 /** 1913 * show function for portno called by cat or similar things 1914 */ 1915 static ssize_t 1916 lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf) 1917 { 1918 struct lcs_card *card; 1919 1920 card = dev_get_drvdata(dev); 1921 1922 if (!card) 1923 return 0; 1924 1925 return sprintf(buf, "%d\n", card->portno); 1926 } 1927 1928 /** 1929 * store the value which is piped to file portno 1930 */ 1931 static ssize_t 1932 lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1933 { 1934 struct lcs_card *card; 1935 int rc; 1936 s16 value; 1937 1938 card = dev_get_drvdata(dev); 1939 1940 if (!card) 1941 return 0; 1942 1943 rc = kstrtos16(buf, 0, &value); 1944 if (rc) 1945 return -EINVAL; 1946 /* TODO: sanity checks */ 1947 card->portno = value; 1948 1949 return count; 1950 1951 } 1952 1953 static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1954 1955 static const char *lcs_type[] = { 1956 "not a channel", 1957 "2216 parallel", 1958 "2216 channel", 1959 "OSA LCS card", 1960 "unknown channel type", 1961 "unsupported channel type", 1962 }; 1963 1964 static ssize_t 1965 lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1966 { 1967 struct ccwgroup_device *cgdev; 1968 1969 cgdev = to_ccwgroupdev(dev); 1970 if (!cgdev) 1971 return -ENODEV; 1972 1973 return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]); 1974 } 1975 1976 static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); 1977 1978 static ssize_t 1979 lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) 1980 { 1981 struct lcs_card *card; 1982 1983 card = dev_get_drvdata(dev); 1984 1985 return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0; 1986 } 1987 1988 static ssize_t 1989 lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1990 { 1991 struct lcs_card *card; 1992 unsigned int value; 1993 int rc; 1994 1995 card = dev_get_drvdata(dev); 1996 1997 if (!card) 1998 return 0; 1999 2000 rc = kstrtouint(buf, 0, &value); 2001 if (rc) 2002 return -EINVAL; 2003 /* TODO: sanity checks */ 2004 card->lancmd_timeout = value; 2005 2006 return count; 2007 2008 } 2009 2010 static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); 2011 2012 static ssize_t 2013 lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 2014 const char *buf, size_t count) 2015 { 2016 struct lcs_card *card = dev_get_drvdata(dev); 2017 char *tmp; 2018 int i; 2019 2020 if (!card) 2021 return -EINVAL; 2022 if (card->state != DEV_STATE_UP) 2023 return -EPERM; 2024 i = simple_strtoul(buf, &tmp, 16); 2025 if (i == 1) 2026 lcs_schedule_recovery(card); 2027 return count; 2028 } 2029 2030 static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store); 2031 2032 static struct attribute * lcs_attrs[] = { 2033 &dev_attr_portno.attr, 2034 &dev_attr_type.attr, 2035 &dev_attr_lancmd_timeout.attr, 2036 &dev_attr_recover.attr, 2037 NULL, 2038 }; 2039 static struct attribute_group lcs_attr_group = { 2040 .attrs = lcs_attrs, 2041 }; 2042 static const struct attribute_group *lcs_attr_groups[] = { 2043 &lcs_attr_group, 2044 NULL, 2045 }; 2046 static const struct device_type lcs_devtype = { 2047 .name = "lcs", 2048 .groups = lcs_attr_groups, 2049 }; 2050 2051 /** 2052 * lcs_probe_device is called on establishing a new ccwgroup_device. 2053 */ 2054 static int 2055 lcs_probe_device(struct ccwgroup_device *ccwgdev) 2056 { 2057 struct lcs_card *card; 2058 2059 if (!get_device(&ccwgdev->dev)) 2060 return -ENODEV; 2061 2062 LCS_DBF_TEXT(2, setup, "add_dev"); 2063 card = lcs_alloc_card(); 2064 if (!card) { 2065 LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); 2066 put_device(&ccwgdev->dev); 2067 return -ENOMEM; 2068 } 2069 dev_set_drvdata(&ccwgdev->dev, card); 2070 ccwgdev->cdev[0]->handler = lcs_irq; 2071 ccwgdev->cdev[1]->handler = lcs_irq; 2072 card->gdev = ccwgdev; 2073 INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread); 2074 card->thread_start_mask = 0; 2075 card->thread_allowed_mask = 0; 2076 card->thread_running_mask = 0; 2077 ccwgdev->dev.type = &lcs_devtype; 2078 2079 return 0; 2080 } 2081 2082 static int 2083 lcs_register_netdev(struct ccwgroup_device *ccwgdev) 2084 { 2085 struct lcs_card *card; 2086 2087 LCS_DBF_TEXT(2, setup, "regnetdv"); 2088 card = dev_get_drvdata(&ccwgdev->dev); 2089 if (card->dev->reg_state != NETREG_UNINITIALIZED) 2090 return 0; 2091 SET_NETDEV_DEV(card->dev, &ccwgdev->dev); 2092 return register_netdev(card->dev); 2093 } 2094 2095 /** 2096 * lcs_new_device will be called by setting the group device online. 2097 */ 2098 static const struct net_device_ops lcs_netdev_ops = { 2099 .ndo_open = lcs_open_device, 2100 .ndo_stop = lcs_stop_device, 2101 .ndo_get_stats = lcs_getstats, 2102 .ndo_start_xmit = lcs_start_xmit, 2103 }; 2104 2105 static const struct net_device_ops lcs_mc_netdev_ops = { 2106 .ndo_open = lcs_open_device, 2107 .ndo_stop = lcs_stop_device, 2108 .ndo_get_stats = lcs_getstats, 2109 .ndo_start_xmit = lcs_start_xmit, 2110 .ndo_set_rx_mode = lcs_set_multicast_list, 2111 }; 2112 2113 static int 2114 lcs_new_device(struct ccwgroup_device *ccwgdev) 2115 { 2116 struct lcs_card *card; 2117 struct net_device *dev=NULL; 2118 enum lcs_dev_states recover_state; 2119 int rc; 2120 2121 card = dev_get_drvdata(&ccwgdev->dev); 2122 if (!card) 2123 return -ENODEV; 2124 2125 LCS_DBF_TEXT(2, setup, "newdev"); 2126 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2127 card->read.ccwdev = ccwgdev->cdev[0]; 2128 card->write.ccwdev = ccwgdev->cdev[1]; 2129 2130 recover_state = card->state; 2131 rc = ccw_device_set_online(card->read.ccwdev); 2132 if (rc) 2133 goto out_err; 2134 rc = ccw_device_set_online(card->write.ccwdev); 2135 if (rc) 2136 goto out_werr; 2137 2138 LCS_DBF_TEXT(3, setup, "lcsnewdv"); 2139 2140 lcs_setup_card(card); 2141 rc = lcs_detect(card); 2142 if (rc) { 2143 LCS_DBF_TEXT(2, setup, "dtctfail"); 2144 dev_err(&ccwgdev->dev, 2145 "Detecting a network adapter for LCS devices" 2146 " failed with rc=%d (0x%x)\n", rc, rc); 2147 lcs_stopcard(card); 2148 goto out; 2149 } 2150 if (card->dev) { 2151 LCS_DBF_TEXT(2, setup, "samedev"); 2152 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2153 goto netdev_out; 2154 } 2155 switch (card->lan_type) { 2156 #ifdef CONFIG_ETHERNET 2157 case LCS_FRAME_TYPE_ENET: 2158 card->lan_type_trans = eth_type_trans; 2159 dev = alloc_etherdev(0); 2160 break; 2161 #endif 2162 #ifdef CONFIG_FDDI 2163 case LCS_FRAME_TYPE_FDDI: 2164 card->lan_type_trans = fddi_type_trans; 2165 dev = alloc_fddidev(0); 2166 break; 2167 #endif 2168 default: 2169 LCS_DBF_TEXT(3, setup, "errinit"); 2170 pr_err(" Initialization failed\n"); 2171 goto out; 2172 } 2173 if (!dev) 2174 goto out; 2175 card->dev = dev; 2176 card->dev->ml_priv = card; 2177 card->dev->netdev_ops = &lcs_netdev_ops; 2178 memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); 2179 #ifdef CONFIG_IP_MULTICAST 2180 if (!lcs_check_multicast_support(card)) 2181 card->dev->netdev_ops = &lcs_mc_netdev_ops; 2182 #endif 2183 netdev_out: 2184 lcs_set_allowed_threads(card,0xffffffff); 2185 if (recover_state == DEV_STATE_RECOVER) { 2186 lcs_set_multicast_list(card->dev); 2187 card->dev->flags |= IFF_UP; 2188 netif_carrier_on(card->dev); 2189 netif_wake_queue(card->dev); 2190 card->state = DEV_STATE_UP; 2191 } else { 2192 lcs_stopcard(card); 2193 } 2194 2195 if (lcs_register_netdev(ccwgdev) != 0) 2196 goto out; 2197 2198 /* Print out supported assists: IPv6 */ 2199 pr_info("LCS device %s %s IPv6 support\n", card->dev->name, 2200 (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? 2201 "with" : "without"); 2202 /* Print out supported assist: Multicast */ 2203 pr_info("LCS device %s %s Multicast support\n", card->dev->name, 2204 (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? 2205 "with" : "without"); 2206 return 0; 2207 out: 2208 2209 ccw_device_set_offline(card->write.ccwdev); 2210 out_werr: 2211 ccw_device_set_offline(card->read.ccwdev); 2212 out_err: 2213 return -ENODEV; 2214 } 2215 2216 /** 2217 * lcs_shutdown_device, called when setting the group device offline. 2218 */ 2219 static int 2220 __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) 2221 { 2222 struct lcs_card *card; 2223 enum lcs_dev_states recover_state; 2224 int ret = 0, ret2 = 0, ret3 = 0; 2225 2226 LCS_DBF_TEXT(3, setup, "shtdndev"); 2227 card = dev_get_drvdata(&ccwgdev->dev); 2228 if (!card) 2229 return -ENODEV; 2230 if (recovery_mode == 0) { 2231 lcs_set_allowed_threads(card, 0); 2232 if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) 2233 return -ERESTARTSYS; 2234 } 2235 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2236 recover_state = card->state; 2237 2238 ret = lcs_stop_device(card->dev); 2239 ret2 = ccw_device_set_offline(card->read.ccwdev); 2240 ret3 = ccw_device_set_offline(card->write.ccwdev); 2241 if (!ret) 2242 ret = (ret2) ? ret2 : ret3; 2243 if (ret) 2244 LCS_DBF_TEXT_(3, setup, "1err:%d", ret); 2245 if (recover_state == DEV_STATE_UP) { 2246 card->state = DEV_STATE_RECOVER; 2247 } 2248 return 0; 2249 } 2250 2251 static int 2252 lcs_shutdown_device(struct ccwgroup_device *ccwgdev) 2253 { 2254 return __lcs_shutdown_device(ccwgdev, 0); 2255 } 2256 2257 /** 2258 * drive lcs recovery after startup and startlan initiated by Lan Gateway 2259 */ 2260 static int 2261 lcs_recovery(void *ptr) 2262 { 2263 struct lcs_card *card; 2264 struct ccwgroup_device *gdev; 2265 int rc; 2266 2267 card = (struct lcs_card *) ptr; 2268 2269 LCS_DBF_TEXT(4, trace, "recover1"); 2270 if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD)) 2271 return 0; 2272 LCS_DBF_TEXT(4, trace, "recover2"); 2273 gdev = card->gdev; 2274 dev_warn(&gdev->dev, 2275 "A recovery process has been started for the LCS device\n"); 2276 rc = __lcs_shutdown_device(gdev, 1); 2277 rc = lcs_new_device(gdev); 2278 if (!rc) 2279 pr_info("Device %s successfully recovered!\n", 2280 card->dev->name); 2281 else 2282 pr_info("Device %s could not be recovered!\n", 2283 card->dev->name); 2284 lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); 2285 return 0; 2286 } 2287 2288 /** 2289 * lcs_remove_device, free buffers and card 2290 */ 2291 static void 2292 lcs_remove_device(struct ccwgroup_device *ccwgdev) 2293 { 2294 struct lcs_card *card; 2295 2296 card = dev_get_drvdata(&ccwgdev->dev); 2297 if (!card) 2298 return; 2299 2300 LCS_DBF_TEXT(3, setup, "remdev"); 2301 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2302 if (ccwgdev->state == CCWGROUP_ONLINE) { 2303 lcs_shutdown_device(ccwgdev); 2304 } 2305 if (card->dev) 2306 unregister_netdev(card->dev); 2307 lcs_cleanup_card(card); 2308 lcs_free_card(card); 2309 dev_set_drvdata(&ccwgdev->dev, NULL); 2310 put_device(&ccwgdev->dev); 2311 } 2312 2313 static int lcs_pm_suspend(struct lcs_card *card) 2314 { 2315 if (card->dev) 2316 netif_device_detach(card->dev); 2317 lcs_set_allowed_threads(card, 0); 2318 lcs_wait_for_threads(card, 0xffffffff); 2319 if (card->state != DEV_STATE_DOWN) 2320 __lcs_shutdown_device(card->gdev, 1); 2321 return 0; 2322 } 2323 2324 static int lcs_pm_resume(struct lcs_card *card) 2325 { 2326 int rc = 0; 2327 2328 if (card->state == DEV_STATE_RECOVER) 2329 rc = lcs_new_device(card->gdev); 2330 if (card->dev) 2331 netif_device_attach(card->dev); 2332 if (rc) { 2333 dev_warn(&card->gdev->dev, "The lcs device driver " 2334 "failed to recover the device\n"); 2335 } 2336 return rc; 2337 } 2338 2339 static int lcs_prepare(struct ccwgroup_device *gdev) 2340 { 2341 return 0; 2342 } 2343 2344 static void lcs_complete(struct ccwgroup_device *gdev) 2345 { 2346 return; 2347 } 2348 2349 static int lcs_freeze(struct ccwgroup_device *gdev) 2350 { 2351 struct lcs_card *card = dev_get_drvdata(&gdev->dev); 2352 return lcs_pm_suspend(card); 2353 } 2354 2355 static int lcs_thaw(struct ccwgroup_device *gdev) 2356 { 2357 struct lcs_card *card = dev_get_drvdata(&gdev->dev); 2358 return lcs_pm_resume(card); 2359 } 2360 2361 static int lcs_restore(struct ccwgroup_device *gdev) 2362 { 2363 struct lcs_card *card = dev_get_drvdata(&gdev->dev); 2364 return lcs_pm_resume(card); 2365 } 2366 2367 static struct ccw_device_id lcs_ids[] = { 2368 {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel}, 2369 {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216}, 2370 {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2}, 2371 {}, 2372 }; 2373 MODULE_DEVICE_TABLE(ccw, lcs_ids); 2374 2375 static struct ccw_driver lcs_ccw_driver = { 2376 .driver = { 2377 .owner = THIS_MODULE, 2378 .name = "lcs", 2379 }, 2380 .ids = lcs_ids, 2381 .probe = ccwgroup_probe_ccwdev, 2382 .remove = ccwgroup_remove_ccwdev, 2383 .int_class = IRQIO_LCS, 2384 }; 2385 2386 /** 2387 * LCS ccwgroup driver registration 2388 */ 2389 static struct ccwgroup_driver lcs_group_driver = { 2390 .driver = { 2391 .owner = THIS_MODULE, 2392 .name = "lcs", 2393 }, 2394 .ccw_driver = &lcs_ccw_driver, 2395 .setup = lcs_probe_device, 2396 .remove = lcs_remove_device, 2397 .set_online = lcs_new_device, 2398 .set_offline = lcs_shutdown_device, 2399 .prepare = lcs_prepare, 2400 .complete = lcs_complete, 2401 .freeze = lcs_freeze, 2402 .thaw = lcs_thaw, 2403 .restore = lcs_restore, 2404 }; 2405 2406 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 2407 size_t count) 2408 { 2409 int err; 2410 err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf); 2411 return err ? err : count; 2412 } 2413 static DRIVER_ATTR_WO(group); 2414 2415 static struct attribute *lcs_drv_attrs[] = { 2416 &driver_attr_group.attr, 2417 NULL, 2418 }; 2419 static struct attribute_group lcs_drv_attr_group = { 2420 .attrs = lcs_drv_attrs, 2421 }; 2422 static const struct attribute_group *lcs_drv_attr_groups[] = { 2423 &lcs_drv_attr_group, 2424 NULL, 2425 }; 2426 2427 /** 2428 * LCS Module/Kernel initialization function 2429 */ 2430 static int 2431 __init lcs_init_module(void) 2432 { 2433 int rc; 2434 2435 pr_info("Loading %s\n", version); 2436 rc = lcs_register_debug_facility(); 2437 LCS_DBF_TEXT(0, setup, "lcsinit"); 2438 if (rc) 2439 goto out_err; 2440 lcs_root_dev = root_device_register("lcs"); 2441 rc = PTR_ERR_OR_ZERO(lcs_root_dev); 2442 if (rc) 2443 goto register_err; 2444 rc = ccw_driver_register(&lcs_ccw_driver); 2445 if (rc) 2446 goto ccw_err; 2447 lcs_group_driver.driver.groups = lcs_drv_attr_groups; 2448 rc = ccwgroup_driver_register(&lcs_group_driver); 2449 if (rc) 2450 goto ccwgroup_err; 2451 return 0; 2452 2453 ccwgroup_err: 2454 ccw_driver_unregister(&lcs_ccw_driver); 2455 ccw_err: 2456 root_device_unregister(lcs_root_dev); 2457 register_err: 2458 lcs_unregister_debug_facility(); 2459 out_err: 2460 pr_err("Initializing the lcs device driver failed\n"); 2461 return rc; 2462 } 2463 2464 2465 /** 2466 * LCS module cleanup function 2467 */ 2468 static void 2469 __exit lcs_cleanup_module(void) 2470 { 2471 pr_info("Terminating lcs module.\n"); 2472 LCS_DBF_TEXT(0, trace, "cleanup"); 2473 ccwgroup_driver_unregister(&lcs_group_driver); 2474 ccw_driver_unregister(&lcs_ccw_driver); 2475 root_device_unregister(lcs_root_dev); 2476 lcs_unregister_debug_facility(); 2477 } 2478 2479 module_init(lcs_init_module); 2480 module_exit(lcs_cleanup_module); 2481 2482 MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>"); 2483 MODULE_LICENSE("GPL"); 2484 2485