1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/bitops.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/etherdevice.h> 20 #include <linux/ip.h> 21 #include <linux/in.h> 22 #include <linux/ipv6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/igmp.h> 25 #include <linux/slab.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <linux/skbuff.h> 29 30 #include <net/ip.h> 31 #include <net/arp.h> 32 #include <net/route.h> 33 #include <net/ipv6.h> 34 #include <net/ip6_route.h> 35 #include <net/ip6_fib.h> 36 #include <net/ip6_checksum.h> 37 #include <net/iucv/af_iucv.h> 38 #include <linux/hashtable.h> 39 40 #include "qeth_l3.h" 41 42 43 static int qeth_l3_set_offline(struct ccwgroup_device *); 44 static int qeth_l3_stop(struct net_device *); 45 static void qeth_l3_set_rx_mode(struct net_device *dev); 46 static int qeth_l3_register_addr_entry(struct qeth_card *, 47 struct qeth_ipaddr *); 48 static int qeth_l3_deregister_addr_entry(struct qeth_card *, 49 struct qeth_ipaddr *); 50 51 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) 52 { 53 sprintf(buf, "%pI4", addr); 54 } 55 56 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 57 { 58 sprintf(buf, "%pI6", addr); 59 } 60 61 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, 62 char *buf) 63 { 64 if (proto == QETH_PROT_IPV4) 65 qeth_l3_ipaddr4_to_string(addr, buf); 66 else if (proto == QETH_PROT_IPV6) 67 qeth_l3_ipaddr6_to_string(addr, buf); 68 } 69 70 static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot) 71 { 72 struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 73 74 if (addr) 75 qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot); 76 return addr; 77 } 78 79 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, 80 struct qeth_ipaddr *query) 81 { 82 u64 key = qeth_l3_ipaddr_hash(query); 83 struct qeth_ipaddr *addr; 84 85 if (query->is_multicast) { 86 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) 87 if (qeth_l3_addr_match_ip(addr, query)) 88 return addr; 89 } else { 90 hash_for_each_possible(card->ip_htable, addr, hnode, key) 91 if (qeth_l3_addr_match_ip(addr, query)) 92 return addr; 93 } 94 return NULL; 95 } 96 97 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 98 { 99 int i, j; 100 u8 octet; 101 102 for (i = 0; i < len; ++i) { 103 octet = addr[i]; 104 for (j = 7; j >= 0; --j) { 105 bits[i*8 + j] = octet & 1; 106 octet >>= 1; 107 } 108 } 109 } 110 111 static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 112 struct qeth_ipaddr *addr) 113 { 114 struct qeth_ipato_entry *ipatoe; 115 u8 addr_bits[128] = {0, }; 116 u8 ipatoe_bits[128] = {0, }; 117 int rc = 0; 118 119 if (!card->ipato.enabled) 120 return 0; 121 if (addr->type != QETH_IP_TYPE_NORMAL) 122 return 0; 123 124 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 125 (addr->proto == QETH_PROT_IPV4)? 4:16); 126 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 127 if (addr->proto != ipatoe->proto) 128 continue; 129 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, 130 (ipatoe->proto == QETH_PROT_IPV4) ? 131 4 : 16); 132 if (addr->proto == QETH_PROT_IPV4) 133 rc = !memcmp(addr_bits, ipatoe_bits, 134 min(32, ipatoe->mask_bits)); 135 else 136 rc = !memcmp(addr_bits, ipatoe_bits, 137 min(128, ipatoe->mask_bits)); 138 if (rc) 139 break; 140 } 141 /* invert? */ 142 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) 143 rc = !rc; 144 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) 145 rc = !rc; 146 147 return rc; 148 } 149 150 static int qeth_l3_delete_ip(struct qeth_card *card, 151 struct qeth_ipaddr *tmp_addr) 152 { 153 int rc = 0; 154 struct qeth_ipaddr *addr; 155 156 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 157 QETH_CARD_TEXT(card, 2, "delrxip"); 158 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 159 QETH_CARD_TEXT(card, 2, "delvipa"); 160 else 161 QETH_CARD_TEXT(card, 2, "delip"); 162 163 if (tmp_addr->proto == QETH_PROT_IPV4) 164 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 165 else { 166 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 167 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 168 } 169 170 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 171 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) 172 return -ENOENT; 173 174 addr->ref_counter--; 175 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) 176 return rc; 177 if (addr->in_progress) 178 return -EINPROGRESS; 179 180 if (qeth_card_hw_is_reachable(card)) 181 rc = qeth_l3_deregister_addr_entry(card, addr); 182 183 hash_del(&addr->hnode); 184 kfree(addr); 185 186 return rc; 187 } 188 189 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) 190 { 191 int rc = 0; 192 struct qeth_ipaddr *addr; 193 char buf[40]; 194 195 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 196 QETH_CARD_TEXT(card, 2, "addrxip"); 197 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 198 QETH_CARD_TEXT(card, 2, "addvipa"); 199 else 200 QETH_CARD_TEXT(card, 2, "addip"); 201 202 if (tmp_addr->proto == QETH_PROT_IPV4) 203 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 204 else { 205 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 206 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 207 } 208 209 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 210 if (addr) { 211 if (tmp_addr->type != QETH_IP_TYPE_NORMAL) 212 return -EADDRINUSE; 213 if (qeth_l3_addr_match_all(addr, tmp_addr)) { 214 addr->ref_counter++; 215 return 0; 216 } 217 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, 218 buf); 219 dev_warn(&card->gdev->dev, 220 "Registering IP address %s failed\n", buf); 221 return -EADDRINUSE; 222 } else { 223 addr = qeth_l3_get_addr_buffer(tmp_addr->proto); 224 if (!addr) 225 return -ENOMEM; 226 227 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); 228 addr->ref_counter = 1; 229 230 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { 231 QETH_CARD_TEXT(card, 2, "tkovaddr"); 232 addr->ipato = 1; 233 } 234 hash_add(card->ip_htable, &addr->hnode, 235 qeth_l3_ipaddr_hash(addr)); 236 237 if (!qeth_card_hw_is_reachable(card)) { 238 addr->disp_flag = QETH_DISP_ADDR_ADD; 239 return 0; 240 } 241 242 /* qeth_l3_register_addr_entry can go to sleep 243 * if we add a IPV4 addr. It is caused by the reason 244 * that SETIP ipa cmd starts ARP staff for IPV4 addr. 245 * Thus we should unlock spinlock, and make a protection 246 * using in_progress variable to indicate that there is 247 * an hardware operation with this IPV4 address 248 */ 249 if (addr->proto == QETH_PROT_IPV4) { 250 addr->in_progress = 1; 251 spin_unlock_bh(&card->ip_lock); 252 rc = qeth_l3_register_addr_entry(card, addr); 253 spin_lock_bh(&card->ip_lock); 254 addr->in_progress = 0; 255 } else 256 rc = qeth_l3_register_addr_entry(card, addr); 257 258 if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) || 259 (rc == IPA_RC_LAN_OFFLINE)) { 260 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 261 if (addr->ref_counter < 1) { 262 qeth_l3_deregister_addr_entry(card, addr); 263 hash_del(&addr->hnode); 264 kfree(addr); 265 } 266 } else { 267 hash_del(&addr->hnode); 268 kfree(addr); 269 } 270 } 271 return rc; 272 } 273 274 static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) 275 { 276 struct qeth_ipaddr *addr; 277 struct hlist_node *tmp; 278 int i; 279 280 QETH_CARD_TEXT(card, 4, "clearip"); 281 282 if (recover && card->options.sniffer) 283 return; 284 285 spin_lock_bh(&card->ip_lock); 286 287 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 288 if (!recover) { 289 hash_del(&addr->hnode); 290 kfree(addr); 291 continue; 292 } 293 addr->disp_flag = QETH_DISP_ADDR_ADD; 294 } 295 296 spin_unlock_bh(&card->ip_lock); 297 298 spin_lock_bh(&card->mclock); 299 300 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 301 hash_del(&addr->hnode); 302 kfree(addr); 303 } 304 305 spin_unlock_bh(&card->mclock); 306 307 308 } 309 static void qeth_l3_recover_ip(struct qeth_card *card) 310 { 311 struct qeth_ipaddr *addr; 312 struct hlist_node *tmp; 313 int i; 314 int rc; 315 316 QETH_CARD_TEXT(card, 4, "recovrip"); 317 318 spin_lock_bh(&card->ip_lock); 319 320 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 321 if (addr->disp_flag == QETH_DISP_ADDR_ADD) { 322 if (addr->proto == QETH_PROT_IPV4) { 323 addr->in_progress = 1; 324 spin_unlock_bh(&card->ip_lock); 325 rc = qeth_l3_register_addr_entry(card, addr); 326 spin_lock_bh(&card->ip_lock); 327 addr->in_progress = 0; 328 } else 329 rc = qeth_l3_register_addr_entry(card, addr); 330 331 if (!rc) { 332 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 333 if (addr->ref_counter < 1) 334 qeth_l3_delete_ip(card, addr); 335 } else { 336 hash_del(&addr->hnode); 337 kfree(addr); 338 } 339 } 340 } 341 342 spin_unlock_bh(&card->ip_lock); 343 344 } 345 346 static int qeth_l3_send_setdelmc(struct qeth_card *card, 347 struct qeth_ipaddr *addr, int ipacmd) 348 { 349 int rc; 350 struct qeth_cmd_buffer *iob; 351 struct qeth_ipa_cmd *cmd; 352 353 QETH_CARD_TEXT(card, 4, "setdelmc"); 354 355 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 356 if (!iob) 357 return -ENOMEM; 358 cmd = __ipa_cmd(iob); 359 ether_addr_copy(cmd->data.setdelipm.mac, addr->mac); 360 if (addr->proto == QETH_PROT_IPV6) 361 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr, 362 sizeof(struct in6_addr)); 363 else 364 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); 365 366 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 367 368 return rc; 369 } 370 371 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) 372 { 373 int i, j; 374 for (i = 0; i < 16; i++) { 375 j = (len) - (i * 8); 376 if (j >= 8) 377 netmask[i] = 0xff; 378 else if (j > 0) 379 netmask[i] = (u8)(0xFF00 >> j); 380 else 381 netmask[i] = 0; 382 } 383 } 384 385 static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set) 386 { 387 switch (addr->type) { 388 case QETH_IP_TYPE_RXIP: 389 return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 390 case QETH_IP_TYPE_VIPA: 391 return (set) ? QETH_IPA_SETIP_VIPA_FLAG : 392 QETH_IPA_DELIP_VIPA_FLAG; 393 default: 394 return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 395 } 396 } 397 398 static int qeth_l3_send_setdelip(struct qeth_card *card, 399 struct qeth_ipaddr *addr, 400 enum qeth_ipa_cmds ipacmd) 401 { 402 struct qeth_cmd_buffer *iob; 403 struct qeth_ipa_cmd *cmd; 404 __u8 netmask[16]; 405 u32 flags; 406 407 QETH_CARD_TEXT(card, 4, "setdelip"); 408 409 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 410 if (!iob) 411 return -ENOMEM; 412 cmd = __ipa_cmd(iob); 413 414 flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP); 415 QETH_CARD_TEXT_(card, 4, "flags%02X", flags); 416 417 if (addr->proto == QETH_PROT_IPV6) { 418 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 419 sizeof(struct in6_addr)); 420 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); 421 memcpy(cmd->data.setdelip6.mask, netmask, 422 sizeof(struct in6_addr)); 423 cmd->data.setdelip6.flags = flags; 424 } else { 425 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); 426 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); 427 cmd->data.setdelip4.flags = flags; 428 } 429 430 return qeth_send_ipa_cmd(card, iob, NULL, NULL); 431 } 432 433 static int qeth_l3_send_setrouting(struct qeth_card *card, 434 enum qeth_routing_types type, enum qeth_prot_versions prot) 435 { 436 int rc; 437 struct qeth_ipa_cmd *cmd; 438 struct qeth_cmd_buffer *iob; 439 440 QETH_CARD_TEXT(card, 4, "setroutg"); 441 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); 442 if (!iob) 443 return -ENOMEM; 444 cmd = __ipa_cmd(iob); 445 cmd->data.setrtg.type = (type); 446 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 447 448 return rc; 449 } 450 451 static int qeth_l3_correct_routing_type(struct qeth_card *card, 452 enum qeth_routing_types *type, enum qeth_prot_versions prot) 453 { 454 if (card->info.type == QETH_CARD_TYPE_IQD) { 455 switch (*type) { 456 case NO_ROUTER: 457 case PRIMARY_CONNECTOR: 458 case SECONDARY_CONNECTOR: 459 case MULTICAST_ROUTER: 460 return 0; 461 default: 462 goto out_inval; 463 } 464 } else { 465 switch (*type) { 466 case NO_ROUTER: 467 case PRIMARY_ROUTER: 468 case SECONDARY_ROUTER: 469 return 0; 470 case MULTICAST_ROUTER: 471 if (qeth_is_ipafunc_supported(card, prot, 472 IPA_OSA_MC_ROUTER)) 473 return 0; 474 default: 475 goto out_inval; 476 } 477 } 478 out_inval: 479 *type = NO_ROUTER; 480 return -EINVAL; 481 } 482 483 int qeth_l3_setrouting_v4(struct qeth_card *card) 484 { 485 int rc; 486 487 QETH_CARD_TEXT(card, 3, "setrtg4"); 488 489 rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, 490 QETH_PROT_IPV4); 491 if (rc) 492 return rc; 493 494 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 495 QETH_PROT_IPV4); 496 if (rc) { 497 card->options.route4.type = NO_ROUTER; 498 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 499 " on %s. Type set to 'no router'.\n", rc, 500 QETH_CARD_IFNAME(card)); 501 } 502 return rc; 503 } 504 505 int qeth_l3_setrouting_v6(struct qeth_card *card) 506 { 507 int rc = 0; 508 509 QETH_CARD_TEXT(card, 3, "setrtg6"); 510 511 if (!qeth_is_supported(card, IPA_IPV6)) 512 return 0; 513 rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, 514 QETH_PROT_IPV6); 515 if (rc) 516 return rc; 517 518 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 519 QETH_PROT_IPV6); 520 if (rc) { 521 card->options.route6.type = NO_ROUTER; 522 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 523 " on %s. Type set to 'no router'.\n", rc, 524 QETH_CARD_IFNAME(card)); 525 } 526 return rc; 527 } 528 529 /* 530 * IP address takeover related functions 531 */ 532 533 /** 534 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. 535 * 536 * Caller must hold ip_lock. 537 */ 538 void qeth_l3_update_ipato(struct qeth_card *card) 539 { 540 struct qeth_ipaddr *addr; 541 unsigned int i; 542 543 hash_for_each(card->ip_htable, i, addr, hnode) { 544 if (addr->type != QETH_IP_TYPE_NORMAL) 545 continue; 546 addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr); 547 } 548 } 549 550 static void qeth_l3_clear_ipato_list(struct qeth_card *card) 551 { 552 struct qeth_ipato_entry *ipatoe, *tmp; 553 554 spin_lock_bh(&card->ip_lock); 555 556 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 557 list_del(&ipatoe->entry); 558 kfree(ipatoe); 559 } 560 561 qeth_l3_update_ipato(card); 562 spin_unlock_bh(&card->ip_lock); 563 } 564 565 int qeth_l3_add_ipato_entry(struct qeth_card *card, 566 struct qeth_ipato_entry *new) 567 { 568 struct qeth_ipato_entry *ipatoe; 569 int rc = 0; 570 571 QETH_CARD_TEXT(card, 2, "addipato"); 572 573 spin_lock_bh(&card->ip_lock); 574 575 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 576 if (ipatoe->proto != new->proto) 577 continue; 578 if (!memcmp(ipatoe->addr, new->addr, 579 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && 580 (ipatoe->mask_bits == new->mask_bits)) { 581 rc = -EEXIST; 582 break; 583 } 584 } 585 586 if (!rc) { 587 list_add_tail(&new->entry, &card->ipato.entries); 588 qeth_l3_update_ipato(card); 589 } 590 591 spin_unlock_bh(&card->ip_lock); 592 593 return rc; 594 } 595 596 int qeth_l3_del_ipato_entry(struct qeth_card *card, 597 enum qeth_prot_versions proto, u8 *addr, 598 int mask_bits) 599 { 600 struct qeth_ipato_entry *ipatoe, *tmp; 601 int rc = -ENOENT; 602 603 QETH_CARD_TEXT(card, 2, "delipato"); 604 605 spin_lock_bh(&card->ip_lock); 606 607 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 608 if (ipatoe->proto != proto) 609 continue; 610 if (!memcmp(ipatoe->addr, addr, 611 (proto == QETH_PROT_IPV4)? 4:16) && 612 (ipatoe->mask_bits == mask_bits)) { 613 list_del(&ipatoe->entry); 614 qeth_l3_update_ipato(card); 615 kfree(ipatoe); 616 rc = 0; 617 } 618 } 619 620 spin_unlock_bh(&card->ip_lock); 621 return rc; 622 } 623 624 int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, 625 enum qeth_ip_types type, 626 enum qeth_prot_versions proto) 627 { 628 struct qeth_ipaddr addr; 629 int rc; 630 631 qeth_l3_init_ipaddr(&addr, type, proto); 632 if (proto == QETH_PROT_IPV4) 633 memcpy(&addr.u.a4.addr, ip, 4); 634 else 635 memcpy(&addr.u.a6.addr, ip, 16); 636 637 spin_lock_bh(&card->ip_lock); 638 rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); 639 spin_unlock_bh(&card->ip_lock); 640 return rc; 641 } 642 643 int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) 644 { 645 struct qeth_ipaddr addr; 646 int rc, i; 647 648 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); 649 addr.u.a6.addr.s6_addr[0] = 0xfe; 650 addr.u.a6.addr.s6_addr[1] = 0x80; 651 for (i = 0; i < 8; i++) 652 addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; 653 654 spin_lock_bh(&card->ip_lock); 655 rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); 656 spin_unlock_bh(&card->ip_lock); 657 return rc; 658 } 659 660 static int qeth_l3_register_addr_entry(struct qeth_card *card, 661 struct qeth_ipaddr *addr) 662 { 663 char buf[50]; 664 int rc = 0; 665 int cnt = 3; 666 667 668 if (addr->proto == QETH_PROT_IPV4) { 669 QETH_CARD_TEXT(card, 2, "setaddr4"); 670 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 671 } else if (addr->proto == QETH_PROT_IPV6) { 672 QETH_CARD_TEXT(card, 2, "setaddr6"); 673 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 674 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 675 } else { 676 QETH_CARD_TEXT(card, 2, "setaddr?"); 677 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 678 } 679 do { 680 if (addr->is_multicast) 681 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); 682 else 683 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP); 684 if (rc) 685 QETH_CARD_TEXT(card, 2, "failed"); 686 } while ((--cnt > 0) && rc); 687 if (rc) { 688 QETH_CARD_TEXT(card, 2, "FAILED"); 689 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); 690 dev_warn(&card->gdev->dev, 691 "Registering IP address %s failed\n", buf); 692 } 693 return rc; 694 } 695 696 static int qeth_l3_deregister_addr_entry(struct qeth_card *card, 697 struct qeth_ipaddr *addr) 698 { 699 int rc = 0; 700 701 if (addr->proto == QETH_PROT_IPV4) { 702 QETH_CARD_TEXT(card, 2, "deladdr4"); 703 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 704 } else if (addr->proto == QETH_PROT_IPV6) { 705 QETH_CARD_TEXT(card, 2, "deladdr6"); 706 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 707 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 708 } else { 709 QETH_CARD_TEXT(card, 2, "deladdr?"); 710 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 711 } 712 if (addr->is_multicast) 713 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); 714 else 715 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP); 716 if (rc) 717 QETH_CARD_TEXT(card, 2, "failed"); 718 719 return rc; 720 } 721 722 static int qeth_l3_setadapter_parms(struct qeth_card *card) 723 { 724 int rc = 0; 725 726 QETH_DBF_TEXT(SETUP, 2, "setadprm"); 727 728 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { 729 rc = qeth_setadpparms_change_macaddr(card); 730 if (rc) 731 dev_warn(&card->gdev->dev, "Reading the adapter MAC" 732 " address failed\n"); 733 } 734 735 return rc; 736 } 737 738 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) 739 { 740 int rc; 741 742 QETH_CARD_TEXT(card, 3, "ipaarp"); 743 744 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 745 dev_info(&card->gdev->dev, 746 "ARP processing not supported on %s!\n", 747 QETH_CARD_IFNAME(card)); 748 return 0; 749 } 750 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 751 IPA_CMD_ASS_START, 0); 752 if (rc) { 753 dev_warn(&card->gdev->dev, 754 "Starting ARP processing support for %s failed\n", 755 QETH_CARD_IFNAME(card)); 756 } 757 return rc; 758 } 759 760 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) 761 { 762 int rc; 763 764 QETH_CARD_TEXT(card, 3, "stsrcmac"); 765 766 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { 767 dev_info(&card->gdev->dev, 768 "Inbound source MAC-address not supported on %s\n", 769 QETH_CARD_IFNAME(card)); 770 return -EOPNOTSUPP; 771 } 772 773 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC, 774 IPA_CMD_ASS_START, 0); 775 if (rc) 776 dev_warn(&card->gdev->dev, 777 "Starting source MAC-address support for %s failed\n", 778 QETH_CARD_IFNAME(card)); 779 return rc; 780 } 781 782 static int qeth_l3_start_ipa_vlan(struct qeth_card *card) 783 { 784 int rc = 0; 785 786 QETH_CARD_TEXT(card, 3, "strtvlan"); 787 788 if (!qeth_is_supported(card, IPA_FULL_VLAN)) { 789 dev_info(&card->gdev->dev, 790 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); 791 return -EOPNOTSUPP; 792 } 793 794 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO, 795 IPA_CMD_ASS_START, 0); 796 if (rc) { 797 dev_warn(&card->gdev->dev, 798 "Starting VLAN support for %s failed\n", 799 QETH_CARD_IFNAME(card)); 800 } else { 801 dev_info(&card->gdev->dev, "VLAN enabled\n"); 802 } 803 return rc; 804 } 805 806 static int qeth_l3_start_ipa_multicast(struct qeth_card *card) 807 { 808 int rc; 809 810 QETH_CARD_TEXT(card, 3, "stmcast"); 811 812 if (!qeth_is_supported(card, IPA_MULTICASTING)) { 813 dev_info(&card->gdev->dev, 814 "Multicast not supported on %s\n", 815 QETH_CARD_IFNAME(card)); 816 return -EOPNOTSUPP; 817 } 818 819 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING, 820 IPA_CMD_ASS_START, 0); 821 if (rc) { 822 dev_warn(&card->gdev->dev, 823 "Starting multicast support for %s failed\n", 824 QETH_CARD_IFNAME(card)); 825 } else { 826 dev_info(&card->gdev->dev, "Multicast enabled\n"); 827 card->dev->flags |= IFF_MULTICAST; 828 } 829 return rc; 830 } 831 832 static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 833 { 834 int rc; 835 836 QETH_CARD_TEXT(card, 3, "softipv6"); 837 838 if (card->info.type == QETH_CARD_TYPE_IQD) 839 goto out; 840 841 rc = qeth_send_simple_setassparms(card, IPA_IPV6, 842 IPA_CMD_ASS_START, 3); 843 if (rc) { 844 dev_err(&card->gdev->dev, 845 "Activating IPv6 support for %s failed\n", 846 QETH_CARD_IFNAME(card)); 847 return rc; 848 } 849 rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, 850 IPA_CMD_ASS_START, 0); 851 if (rc) { 852 dev_err(&card->gdev->dev, 853 "Activating IPv6 support for %s failed\n", 854 QETH_CARD_IFNAME(card)); 855 return rc; 856 } 857 rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU, 858 IPA_CMD_ASS_START, 0); 859 if (rc) { 860 dev_warn(&card->gdev->dev, 861 "Enabling the passthrough mode for %s failed\n", 862 QETH_CARD_IFNAME(card)); 863 return rc; 864 } 865 out: 866 dev_info(&card->gdev->dev, "IPV6 enabled\n"); 867 return 0; 868 } 869 870 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) 871 { 872 QETH_CARD_TEXT(card, 3, "strtipv6"); 873 874 if (!qeth_is_supported(card, IPA_IPV6)) { 875 dev_info(&card->gdev->dev, 876 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); 877 return 0; 878 } 879 return qeth_l3_softsetup_ipv6(card); 880 } 881 882 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) 883 { 884 int rc; 885 886 QETH_CARD_TEXT(card, 3, "stbrdcst"); 887 card->info.broadcast_capable = 0; 888 if (!qeth_is_supported(card, IPA_FILTERING)) { 889 dev_info(&card->gdev->dev, 890 "Broadcast not supported on %s\n", 891 QETH_CARD_IFNAME(card)); 892 rc = -EOPNOTSUPP; 893 goto out; 894 } 895 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 896 IPA_CMD_ASS_START, 0); 897 if (rc) { 898 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " 899 "%s failed\n", QETH_CARD_IFNAME(card)); 900 goto out; 901 } 902 903 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 904 IPA_CMD_ASS_CONFIGURE, 1); 905 if (rc) { 906 dev_warn(&card->gdev->dev, 907 "Setting up broadcast filtering for %s failed\n", 908 QETH_CARD_IFNAME(card)); 909 goto out; 910 } 911 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; 912 dev_info(&card->gdev->dev, "Broadcast enabled\n"); 913 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 914 IPA_CMD_ASS_ENABLE, 1); 915 if (rc) { 916 dev_warn(&card->gdev->dev, "Setting up broadcast echo " 917 "filtering for %s failed\n", QETH_CARD_IFNAME(card)); 918 goto out; 919 } 920 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; 921 out: 922 if (card->info.broadcast_capable) 923 card->dev->flags |= IFF_BROADCAST; 924 else 925 card->dev->flags &= ~IFF_BROADCAST; 926 return rc; 927 } 928 929 static int qeth_l3_start_ipassists(struct qeth_card *card) 930 { 931 QETH_CARD_TEXT(card, 3, "strtipas"); 932 933 if (qeth_set_access_ctrl_online(card, 0)) 934 return -EIO; 935 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 936 qeth_l3_start_ipa_source_mac(card); /* go on*/ 937 qeth_l3_start_ipa_vlan(card); /* go on*/ 938 qeth_l3_start_ipa_multicast(card); /* go on*/ 939 qeth_l3_start_ipa_ipv6(card); /* go on*/ 940 qeth_l3_start_ipa_broadcast(card); /* go on*/ 941 return 0; 942 } 943 944 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 945 struct qeth_reply *reply, unsigned long data) 946 { 947 struct qeth_ipa_cmd *cmd; 948 949 cmd = (struct qeth_ipa_cmd *) data; 950 if (cmd->hdr.return_code == 0) 951 ether_addr_copy(card->dev->dev_addr, 952 cmd->data.create_destroy_addr.unique_id); 953 else 954 eth_random_addr(card->dev->dev_addr); 955 956 return 0; 957 } 958 959 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) 960 { 961 int rc = 0; 962 struct qeth_cmd_buffer *iob; 963 struct qeth_ipa_cmd *cmd; 964 965 QETH_DBF_TEXT(SETUP, 2, "hsrmac"); 966 967 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 968 QETH_PROT_IPV6); 969 if (!iob) 970 return -ENOMEM; 971 cmd = __ipa_cmd(iob); 972 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 973 card->info.unique_id; 974 975 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, 976 NULL); 977 return rc; 978 } 979 980 static int qeth_l3_get_unique_id_cb(struct qeth_card *card, 981 struct qeth_reply *reply, unsigned long data) 982 { 983 struct qeth_ipa_cmd *cmd; 984 985 cmd = (struct qeth_ipa_cmd *) data; 986 if (cmd->hdr.return_code == 0) 987 card->info.unique_id = *((__u16 *) 988 &cmd->data.create_destroy_addr.unique_id[6]); 989 else { 990 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 991 UNIQUE_ID_NOT_BY_CARD; 992 dev_warn(&card->gdev->dev, "The network adapter failed to " 993 "generate a unique ID\n"); 994 } 995 return 0; 996 } 997 998 static int qeth_l3_get_unique_id(struct qeth_card *card) 999 { 1000 int rc = 0; 1001 struct qeth_cmd_buffer *iob; 1002 struct qeth_ipa_cmd *cmd; 1003 1004 QETH_DBF_TEXT(SETUP, 2, "guniqeid"); 1005 1006 if (!qeth_is_supported(card, IPA_IPV6)) { 1007 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1008 UNIQUE_ID_NOT_BY_CARD; 1009 return 0; 1010 } 1011 1012 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1013 QETH_PROT_IPV6); 1014 if (!iob) 1015 return -ENOMEM; 1016 cmd = __ipa_cmd(iob); 1017 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1018 card->info.unique_id; 1019 1020 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); 1021 return rc; 1022 } 1023 1024 static int 1025 qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, 1026 unsigned long data) 1027 { 1028 struct qeth_ipa_cmd *cmd; 1029 __u16 rc; 1030 1031 QETH_DBF_TEXT(SETUP, 2, "diastrcb"); 1032 1033 cmd = (struct qeth_ipa_cmd *)data; 1034 rc = cmd->hdr.return_code; 1035 if (rc) 1036 QETH_CARD_TEXT_(card, 2, "dxter%x", rc); 1037 switch (cmd->data.diagass.action) { 1038 case QETH_DIAGS_CMD_TRACE_QUERY: 1039 break; 1040 case QETH_DIAGS_CMD_TRACE_DISABLE: 1041 switch (rc) { 1042 case 0: 1043 case IPA_RC_INVALID_SUBCMD: 1044 card->info.promisc_mode = SET_PROMISC_MODE_OFF; 1045 dev_info(&card->gdev->dev, "The HiperSockets network " 1046 "traffic analyzer is deactivated\n"); 1047 break; 1048 default: 1049 break; 1050 } 1051 break; 1052 case QETH_DIAGS_CMD_TRACE_ENABLE: 1053 switch (rc) { 1054 case 0: 1055 card->info.promisc_mode = SET_PROMISC_MODE_ON; 1056 dev_info(&card->gdev->dev, "The HiperSockets network " 1057 "traffic analyzer is activated\n"); 1058 break; 1059 case IPA_RC_HARDWARE_AUTH_ERROR: 1060 dev_warn(&card->gdev->dev, "The device is not " 1061 "authorized to run as a HiperSockets network " 1062 "traffic analyzer\n"); 1063 break; 1064 case IPA_RC_TRACE_ALREADY_ACTIVE: 1065 dev_warn(&card->gdev->dev, "A HiperSockets " 1066 "network traffic analyzer is already " 1067 "active in the HiperSockets LAN\n"); 1068 break; 1069 default: 1070 break; 1071 } 1072 break; 1073 default: 1074 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n", 1075 cmd->data.diagass.action, QETH_CARD_IFNAME(card)); 1076 } 1077 1078 return 0; 1079 } 1080 1081 static int 1082 qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) 1083 { 1084 struct qeth_cmd_buffer *iob; 1085 struct qeth_ipa_cmd *cmd; 1086 1087 QETH_DBF_TEXT(SETUP, 2, "diagtrac"); 1088 1089 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 1090 if (!iob) 1091 return -ENOMEM; 1092 cmd = __ipa_cmd(iob); 1093 cmd->data.diagass.subcmd_len = 16; 1094 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; 1095 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET; 1096 cmd->data.diagass.action = diags_cmd; 1097 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); 1098 } 1099 1100 static void 1101 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) 1102 { 1103 struct ip_mc_list *im4; 1104 struct qeth_ipaddr *tmp, *ipm; 1105 1106 QETH_CARD_TEXT(card, 4, "addmc"); 1107 1108 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1109 if (!tmp) 1110 return; 1111 1112 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; 1113 im4 = rcu_dereference(im4->next_rcu)) { 1114 ip_eth_mc_map(im4->multiaddr, tmp->mac); 1115 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); 1116 tmp->is_multicast = 1; 1117 1118 ipm = qeth_l3_find_addr_by_ip(card, tmp); 1119 if (ipm) { 1120 /* for mcast, by-IP match means full match */ 1121 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1122 } else { 1123 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1124 if (!ipm) 1125 continue; 1126 ether_addr_copy(ipm->mac, tmp->mac); 1127 ipm->u.a4.addr = be32_to_cpu(im4->multiaddr); 1128 ipm->is_multicast = 1; 1129 ipm->disp_flag = QETH_DISP_ADDR_ADD; 1130 hash_add(card->ip_mc_htable, 1131 &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); 1132 } 1133 } 1134 1135 kfree(tmp); 1136 } 1137 1138 /* called with rcu_read_lock */ 1139 static void qeth_l3_add_vlan_mc(struct qeth_card *card) 1140 { 1141 struct in_device *in_dev; 1142 u16 vid; 1143 1144 QETH_CARD_TEXT(card, 4, "addmcvl"); 1145 1146 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1147 return; 1148 1149 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1150 struct net_device *netdev; 1151 1152 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), 1153 vid); 1154 if (netdev == NULL || 1155 !(netdev->flags & IFF_UP)) 1156 continue; 1157 in_dev = __in_dev_get_rcu(netdev); 1158 if (!in_dev) 1159 continue; 1160 qeth_l3_add_mc_to_hash(card, in_dev); 1161 } 1162 } 1163 1164 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) 1165 { 1166 struct in_device *in4_dev; 1167 1168 QETH_CARD_TEXT(card, 4, "chkmcv4"); 1169 1170 rcu_read_lock(); 1171 in4_dev = __in_dev_get_rcu(card->dev); 1172 if (in4_dev == NULL) 1173 goto unlock; 1174 qeth_l3_add_mc_to_hash(card, in4_dev); 1175 qeth_l3_add_vlan_mc(card); 1176 unlock: 1177 rcu_read_unlock(); 1178 } 1179 1180 static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, 1181 struct inet6_dev *in6_dev) 1182 { 1183 struct qeth_ipaddr *ipm; 1184 struct ifmcaddr6 *im6; 1185 struct qeth_ipaddr *tmp; 1186 1187 QETH_CARD_TEXT(card, 4, "addmc6"); 1188 1189 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1190 if (!tmp) 1191 return; 1192 1193 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { 1194 ipv6_eth_mc_map(&im6->mca_addr, tmp->mac); 1195 memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr, 1196 sizeof(struct in6_addr)); 1197 tmp->is_multicast = 1; 1198 1199 ipm = qeth_l3_find_addr_by_ip(card, tmp); 1200 if (ipm) { 1201 /* for mcast, by-IP match means full match */ 1202 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1203 continue; 1204 } 1205 1206 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1207 if (!ipm) 1208 continue; 1209 1210 ether_addr_copy(ipm->mac, tmp->mac); 1211 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, 1212 sizeof(struct in6_addr)); 1213 ipm->is_multicast = 1; 1214 ipm->disp_flag = QETH_DISP_ADDR_ADD; 1215 hash_add(card->ip_mc_htable, 1216 &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); 1217 1218 } 1219 kfree(tmp); 1220 } 1221 1222 /* called with rcu_read_lock */ 1223 static void qeth_l3_add_vlan_mc6(struct qeth_card *card) 1224 { 1225 struct inet6_dev *in_dev; 1226 u16 vid; 1227 1228 QETH_CARD_TEXT(card, 4, "admc6vl"); 1229 1230 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1231 return; 1232 1233 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1234 struct net_device *netdev; 1235 1236 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), 1237 vid); 1238 if (netdev == NULL || 1239 !(netdev->flags & IFF_UP)) 1240 continue; 1241 in_dev = in6_dev_get(netdev); 1242 if (!in_dev) 1243 continue; 1244 read_lock_bh(&in_dev->lock); 1245 qeth_l3_add_mc6_to_hash(card, in_dev); 1246 read_unlock_bh(&in_dev->lock); 1247 in6_dev_put(in_dev); 1248 } 1249 } 1250 1251 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) 1252 { 1253 struct inet6_dev *in6_dev; 1254 1255 QETH_CARD_TEXT(card, 4, "chkmcv6"); 1256 1257 if (!qeth_is_supported(card, IPA_IPV6)) 1258 return ; 1259 in6_dev = in6_dev_get(card->dev); 1260 if (!in6_dev) 1261 return; 1262 1263 rcu_read_lock(); 1264 read_lock_bh(&in6_dev->lock); 1265 qeth_l3_add_mc6_to_hash(card, in6_dev); 1266 qeth_l3_add_vlan_mc6(card); 1267 read_unlock_bh(&in6_dev->lock); 1268 rcu_read_unlock(); 1269 in6_dev_put(in6_dev); 1270 } 1271 1272 static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, 1273 __be16 proto, u16 vid) 1274 { 1275 struct qeth_card *card = dev->ml_priv; 1276 1277 set_bit(vid, card->active_vlans); 1278 return 0; 1279 } 1280 1281 static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, 1282 __be16 proto, u16 vid) 1283 { 1284 struct qeth_card *card = dev->ml_priv; 1285 1286 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 1287 1288 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 1289 QETH_CARD_TEXT(card, 3, "kidREC"); 1290 return 0; 1291 } 1292 clear_bit(vid, card->active_vlans); 1293 qeth_l3_set_rx_mode(dev); 1294 return 0; 1295 } 1296 1297 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 1298 struct qeth_hdr *hdr) 1299 { 1300 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 1301 u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 1302 ETH_P_IP; 1303 unsigned char tg_addr[ETH_ALEN]; 1304 1305 skb_reset_network_header(skb); 1306 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { 1307 case QETH_CAST_MULTICAST: 1308 if (prot == ETH_P_IP) 1309 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 1310 else 1311 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 1312 1313 card->stats.multicast++; 1314 break; 1315 case QETH_CAST_BROADCAST: 1316 ether_addr_copy(tg_addr, card->dev->broadcast); 1317 card->stats.multicast++; 1318 break; 1319 default: 1320 if (card->options.sniffer) 1321 skb->pkt_type = PACKET_OTHERHOST; 1322 ether_addr_copy(tg_addr, card->dev->dev_addr); 1323 } 1324 1325 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 1326 card->dev->header_ops->create(skb, card->dev, prot, 1327 tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac, 1328 skb->len); 1329 else 1330 card->dev->header_ops->create(skb, card->dev, prot, 1331 tg_addr, "FAKELL", skb->len); 1332 } 1333 1334 skb->protocol = eth_type_trans(skb, card->dev); 1335 1336 /* copy VLAN tag from hdr into skb */ 1337 if (!card->options.sniffer && 1338 (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 1339 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 1340 u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 1341 hdr->hdr.l3.vlan_id : 1342 hdr->hdr.l3.next_hop.rx.vlan_id; 1343 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 1344 } 1345 1346 qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags); 1347 } 1348 1349 static int qeth_l3_process_inbound_buffer(struct qeth_card *card, 1350 int budget, int *done) 1351 { 1352 int work_done = 0; 1353 struct sk_buff *skb; 1354 struct qeth_hdr *hdr; 1355 unsigned int len; 1356 __u16 magic; 1357 1358 *done = 0; 1359 WARN_ON_ONCE(!budget); 1360 while (budget) { 1361 skb = qeth_core_get_next_skb(card, 1362 &card->qdio.in_q->bufs[card->rx.b_index], 1363 &card->rx.b_element, &card->rx.e_offset, &hdr); 1364 if (!skb) { 1365 *done = 1; 1366 break; 1367 } 1368 switch (hdr->hdr.l3.id) { 1369 case QETH_HEADER_TYPE_LAYER3: 1370 magic = *(__u16 *)skb->data; 1371 if ((card->info.type == QETH_CARD_TYPE_IQD) && 1372 (magic == ETH_P_AF_IUCV)) { 1373 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 1374 len = skb->len; 1375 card->dev->header_ops->create(skb, card->dev, 0, 1376 card->dev->dev_addr, "FAKELL", len); 1377 skb_reset_mac_header(skb); 1378 netif_receive_skb(skb); 1379 } else { 1380 qeth_l3_rebuild_skb(card, skb, hdr); 1381 len = skb->len; 1382 napi_gro_receive(&card->napi, skb); 1383 } 1384 break; 1385 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 1386 skb->protocol = eth_type_trans(skb, skb->dev); 1387 len = skb->len; 1388 netif_receive_skb(skb); 1389 break; 1390 default: 1391 dev_kfree_skb_any(skb); 1392 QETH_CARD_TEXT(card, 3, "inbunkno"); 1393 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 1394 continue; 1395 } 1396 work_done++; 1397 budget--; 1398 card->stats.rx_packets++; 1399 card->stats.rx_bytes += len; 1400 } 1401 return work_done; 1402 } 1403 1404 static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) 1405 { 1406 QETH_DBF_TEXT(SETUP, 2, "stopcard"); 1407 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1408 1409 qeth_set_allowed_threads(card, 0, 1); 1410 if (card->options.sniffer && 1411 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) 1412 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1413 if (card->read.state == CH_STATE_UP && 1414 card->write.state == CH_STATE_UP && 1415 (card->state == CARD_STATE_UP)) { 1416 if (recovery_mode) 1417 qeth_l3_stop(card->dev); 1418 else { 1419 rtnl_lock(); 1420 dev_close(card->dev); 1421 rtnl_unlock(); 1422 } 1423 card->state = CARD_STATE_SOFTSETUP; 1424 } 1425 if (card->state == CARD_STATE_SOFTSETUP) { 1426 qeth_l3_clear_ip_htable(card, 1); 1427 qeth_clear_ipacmd_list(card); 1428 card->state = CARD_STATE_HARDSETUP; 1429 } 1430 if (card->state == CARD_STATE_HARDSETUP) { 1431 qeth_qdio_clear_card(card, 0); 1432 qeth_clear_qdio_buffers(card); 1433 qeth_clear_working_pool_list(card); 1434 card->state = CARD_STATE_DOWN; 1435 } 1436 if (card->state == CARD_STATE_DOWN) { 1437 qeth_clear_cmd_buffers(&card->read); 1438 qeth_clear_cmd_buffers(&card->write); 1439 } 1440 } 1441 1442 /* 1443 * test for and Switch promiscuous mode (on or off) 1444 * either for guestlan or HiperSocket Sniffer 1445 */ 1446 static void 1447 qeth_l3_handle_promisc_mode(struct qeth_card *card) 1448 { 1449 struct net_device *dev = card->dev; 1450 1451 if (((dev->flags & IFF_PROMISC) && 1452 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || 1453 (!(dev->flags & IFF_PROMISC) && 1454 (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) 1455 return; 1456 1457 if (card->info.guestlan) { /* Guestlan trace */ 1458 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 1459 qeth_setadp_promisc_mode(card); 1460 } else if (card->options.sniffer && /* HiperSockets trace */ 1461 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 1462 if (dev->flags & IFF_PROMISC) { 1463 QETH_CARD_TEXT(card, 3, "+promisc"); 1464 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); 1465 } else { 1466 QETH_CARD_TEXT(card, 3, "-promisc"); 1467 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1468 } 1469 } 1470 } 1471 1472 static void qeth_l3_set_rx_mode(struct net_device *dev) 1473 { 1474 struct qeth_card *card = dev->ml_priv; 1475 struct qeth_ipaddr *addr; 1476 struct hlist_node *tmp; 1477 int i, rc; 1478 1479 QETH_CARD_TEXT(card, 3, "setmulti"); 1480 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 1481 (card->state != CARD_STATE_UP)) 1482 return; 1483 if (!card->options.sniffer) { 1484 spin_lock_bh(&card->mclock); 1485 1486 qeth_l3_add_multicast_ipv4(card); 1487 qeth_l3_add_multicast_ipv6(card); 1488 1489 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 1490 switch (addr->disp_flag) { 1491 case QETH_DISP_ADDR_DELETE: 1492 rc = qeth_l3_deregister_addr_entry(card, addr); 1493 if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) { 1494 hash_del(&addr->hnode); 1495 kfree(addr); 1496 } 1497 break; 1498 case QETH_DISP_ADDR_ADD: 1499 rc = qeth_l3_register_addr_entry(card, addr); 1500 if (rc && rc != IPA_RC_LAN_OFFLINE) { 1501 hash_del(&addr->hnode); 1502 kfree(addr); 1503 break; 1504 } 1505 addr->ref_counter = 1; 1506 /* fall through */ 1507 default: 1508 /* for next call to set_rx_mode(): */ 1509 addr->disp_flag = QETH_DISP_ADDR_DELETE; 1510 } 1511 } 1512 1513 spin_unlock_bh(&card->mclock); 1514 1515 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 1516 return; 1517 } 1518 qeth_l3_handle_promisc_mode(card); 1519 } 1520 1521 static const char *qeth_l3_arp_get_error_cause(int *rc) 1522 { 1523 switch (*rc) { 1524 case QETH_IPA_ARP_RC_FAILED: 1525 *rc = -EIO; 1526 return "operation failed"; 1527 case QETH_IPA_ARP_RC_NOTSUPP: 1528 *rc = -EOPNOTSUPP; 1529 return "operation not supported"; 1530 case QETH_IPA_ARP_RC_OUT_OF_RANGE: 1531 *rc = -EINVAL; 1532 return "argument out of range"; 1533 case QETH_IPA_ARP_RC_Q_NOTSUPP: 1534 *rc = -EOPNOTSUPP; 1535 return "query operation not supported"; 1536 case QETH_IPA_ARP_RC_Q_NO_DATA: 1537 *rc = -ENOENT; 1538 return "no query data available"; 1539 default: 1540 return "unknown error"; 1541 } 1542 } 1543 1544 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) 1545 { 1546 int tmp; 1547 int rc; 1548 1549 QETH_CARD_TEXT(card, 3, "arpstnoe"); 1550 1551 /* 1552 * currently GuestLAN only supports the ARP assist function 1553 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; 1554 * thus we say EOPNOTSUPP for this ARP function 1555 */ 1556 if (card->info.guestlan) 1557 return -EOPNOTSUPP; 1558 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1559 return -EOPNOTSUPP; 1560 } 1561 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 1562 IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 1563 no_entries); 1564 if (rc) { 1565 tmp = rc; 1566 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " 1567 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), 1568 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 1569 } 1570 return rc; 1571 } 1572 1573 static __u32 get_arp_entry_size(struct qeth_card *card, 1574 struct qeth_arp_query_data *qdata, 1575 struct qeth_arp_entrytype *type, __u8 strip_entries) 1576 { 1577 __u32 rc; 1578 __u8 is_hsi; 1579 1580 is_hsi = qdata->reply_bits == 5; 1581 if (type->ip == QETHARP_IP_ADDR_V4) { 1582 QETH_CARD_TEXT(card, 4, "arpev4"); 1583 if (strip_entries) { 1584 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) : 1585 sizeof(struct qeth_arp_qi_entry7_short); 1586 } else { 1587 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) : 1588 sizeof(struct qeth_arp_qi_entry7); 1589 } 1590 } else if (type->ip == QETHARP_IP_ADDR_V6) { 1591 QETH_CARD_TEXT(card, 4, "arpev6"); 1592 if (strip_entries) { 1593 rc = is_hsi ? 1594 sizeof(struct qeth_arp_qi_entry5_short_ipv6) : 1595 sizeof(struct qeth_arp_qi_entry7_short_ipv6); 1596 } else { 1597 rc = is_hsi ? 1598 sizeof(struct qeth_arp_qi_entry5_ipv6) : 1599 sizeof(struct qeth_arp_qi_entry7_ipv6); 1600 } 1601 } else { 1602 QETH_CARD_TEXT(card, 4, "arpinv"); 1603 rc = 0; 1604 } 1605 1606 return rc; 1607 } 1608 1609 static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) 1610 { 1611 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) || 1612 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6); 1613 } 1614 1615 static int qeth_l3_arp_query_cb(struct qeth_card *card, 1616 struct qeth_reply *reply, unsigned long data) 1617 { 1618 struct qeth_ipa_cmd *cmd; 1619 struct qeth_arp_query_data *qdata; 1620 struct qeth_arp_query_info *qinfo; 1621 int i; 1622 int e; 1623 int entrybytes_done; 1624 int stripped_bytes; 1625 __u8 do_strip_entries; 1626 1627 QETH_CARD_TEXT(card, 3, "arpquecb"); 1628 1629 qinfo = (struct qeth_arp_query_info *) reply->param; 1630 cmd = (struct qeth_ipa_cmd *) data; 1631 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version); 1632 if (cmd->hdr.return_code) { 1633 QETH_CARD_TEXT(card, 4, "arpcberr"); 1634 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1635 return 0; 1636 } 1637 if (cmd->data.setassparms.hdr.return_code) { 1638 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 1639 QETH_CARD_TEXT(card, 4, "setaperr"); 1640 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1641 return 0; 1642 } 1643 qdata = &cmd->data.setassparms.data.query_arp; 1644 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); 1645 1646 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0; 1647 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0; 1648 entrybytes_done = 0; 1649 for (e = 0; e < qdata->no_entries; ++e) { 1650 char *cur_entry; 1651 __u32 esize; 1652 struct qeth_arp_entrytype *etype; 1653 1654 cur_entry = &qdata->data + entrybytes_done; 1655 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type; 1656 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) { 1657 QETH_CARD_TEXT(card, 4, "pmis"); 1658 QETH_CARD_TEXT_(card, 4, "%i", etype->ip); 1659 break; 1660 } 1661 esize = get_arp_entry_size(card, qdata, etype, 1662 do_strip_entries); 1663 QETH_CARD_TEXT_(card, 5, "esz%i", esize); 1664 if (!esize) 1665 break; 1666 1667 if ((qinfo->udata_len - qinfo->udata_offset) < esize) { 1668 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); 1669 cmd->hdr.return_code = IPA_RC_ENOMEM; 1670 goto out_error; 1671 } 1672 1673 memcpy(qinfo->udata + qinfo->udata_offset, 1674 &qdata->data + entrybytes_done + stripped_bytes, 1675 esize); 1676 entrybytes_done += esize + stripped_bytes; 1677 qinfo->udata_offset += esize; 1678 ++qinfo->no_entries; 1679 } 1680 /* check if all replies received ... */ 1681 if (cmd->data.setassparms.hdr.seq_no < 1682 cmd->data.setassparms.hdr.number_of_replies) 1683 return 1; 1684 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries); 1685 memcpy(qinfo->udata, &qinfo->no_entries, 4); 1686 /* keep STRIP_ENTRIES flag so the user program can distinguish 1687 * stripped entries from normal ones */ 1688 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 1689 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 1690 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 1691 QETH_CARD_TEXT_(card, 4, "rc%i", 0); 1692 return 0; 1693 out_error: 1694 i = 0; 1695 memcpy(qinfo->udata, &i, 4); 1696 return 0; 1697 } 1698 1699 static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, 1700 struct qeth_cmd_buffer *iob, int len, 1701 int (*reply_cb)(struct qeth_card *, struct qeth_reply *, 1702 unsigned long), 1703 void *reply_param) 1704 { 1705 QETH_CARD_TEXT(card, 4, "sendarp"); 1706 1707 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 1708 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 1709 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 1710 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, 1711 reply_cb, reply_param); 1712 } 1713 1714 static int qeth_l3_query_arp_cache_info(struct qeth_card *card, 1715 enum qeth_prot_versions prot, 1716 struct qeth_arp_query_info *qinfo) 1717 { 1718 struct qeth_cmd_buffer *iob; 1719 struct qeth_ipa_cmd *cmd; 1720 int tmp; 1721 int rc; 1722 1723 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); 1724 1725 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1726 IPA_CMD_ASS_ARP_QUERY_INFO, 1727 sizeof(struct qeth_arp_query_data) 1728 - sizeof(char), 1729 prot); 1730 if (!iob) 1731 return -ENOMEM; 1732 cmd = __ipa_cmd(iob); 1733 cmd->data.setassparms.data.query_arp.request_bits = 0x000F; 1734 cmd->data.setassparms.data.query_arp.reply_bits = 0; 1735 cmd->data.setassparms.data.query_arp.no_entries = 0; 1736 rc = qeth_l3_send_ipa_arp_cmd(card, iob, 1737 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, 1738 qeth_l3_arp_query_cb, (void *)qinfo); 1739 if (rc) { 1740 tmp = rc; 1741 QETH_DBF_MESSAGE(2, 1742 "Error while querying ARP cache on %s: %s " 1743 "(0x%x/%d)\n", QETH_CARD_IFNAME(card), 1744 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 1745 } 1746 1747 return rc; 1748 } 1749 1750 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 1751 { 1752 struct qeth_arp_query_info qinfo = {0, }; 1753 int rc; 1754 1755 QETH_CARD_TEXT(card, 3, "arpquery"); 1756 1757 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 1758 IPA_ARP_PROCESSING)) { 1759 QETH_CARD_TEXT(card, 3, "arpqnsup"); 1760 rc = -EOPNOTSUPP; 1761 goto out; 1762 } 1763 /* get size of userspace buffer and mask_bits -> 6 bytes */ 1764 if (copy_from_user(&qinfo, udata, 6)) { 1765 rc = -EFAULT; 1766 goto out; 1767 } 1768 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 1769 if (!qinfo.udata) { 1770 rc = -ENOMEM; 1771 goto out; 1772 } 1773 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 1774 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo); 1775 if (rc) { 1776 if (copy_to_user(udata, qinfo.udata, 4)) 1777 rc = -EFAULT; 1778 goto free_and_out; 1779 } 1780 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { 1781 /* fails in case of GuestLAN QDIO mode */ 1782 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo); 1783 } 1784 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { 1785 QETH_CARD_TEXT(card, 4, "qactf"); 1786 rc = -EFAULT; 1787 goto free_and_out; 1788 } 1789 QETH_CARD_TEXT(card, 4, "qacts"); 1790 1791 free_and_out: 1792 kfree(qinfo.udata); 1793 out: 1794 return rc; 1795 } 1796 1797 static int qeth_l3_arp_add_entry(struct qeth_card *card, 1798 struct qeth_arp_cache_entry *entry) 1799 { 1800 struct qeth_cmd_buffer *iob; 1801 char buf[16]; 1802 int tmp; 1803 int rc; 1804 1805 QETH_CARD_TEXT(card, 3, "arpadent"); 1806 1807 /* 1808 * currently GuestLAN only supports the ARP assist function 1809 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; 1810 * thus we say EOPNOTSUPP for this ARP function 1811 */ 1812 if (card->info.guestlan) 1813 return -EOPNOTSUPP; 1814 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1815 return -EOPNOTSUPP; 1816 } 1817 1818 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1819 IPA_CMD_ASS_ARP_ADD_ENTRY, 1820 sizeof(struct qeth_arp_cache_entry), 1821 QETH_PROT_IPV4); 1822 if (!iob) 1823 return -ENOMEM; 1824 rc = qeth_send_setassparms(card, iob, 1825 sizeof(struct qeth_arp_cache_entry), 1826 (unsigned long) entry, 1827 qeth_setassparms_cb, NULL); 1828 if (rc) { 1829 tmp = rc; 1830 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); 1831 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s " 1832 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), 1833 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 1834 } 1835 return rc; 1836 } 1837 1838 static int qeth_l3_arp_remove_entry(struct qeth_card *card, 1839 struct qeth_arp_cache_entry *entry) 1840 { 1841 struct qeth_cmd_buffer *iob; 1842 char buf[16] = {0, }; 1843 int tmp; 1844 int rc; 1845 1846 QETH_CARD_TEXT(card, 3, "arprment"); 1847 1848 /* 1849 * currently GuestLAN only supports the ARP assist function 1850 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY; 1851 * thus we say EOPNOTSUPP for this ARP function 1852 */ 1853 if (card->info.guestlan) 1854 return -EOPNOTSUPP; 1855 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1856 return -EOPNOTSUPP; 1857 } 1858 memcpy(buf, entry, 12); 1859 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1860 IPA_CMD_ASS_ARP_REMOVE_ENTRY, 1861 12, 1862 QETH_PROT_IPV4); 1863 if (!iob) 1864 return -ENOMEM; 1865 rc = qeth_send_setassparms(card, iob, 1866 12, (unsigned long)buf, 1867 qeth_setassparms_cb, NULL); 1868 if (rc) { 1869 tmp = rc; 1870 memset(buf, 0, 16); 1871 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); 1872 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s" 1873 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), 1874 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 1875 } 1876 return rc; 1877 } 1878 1879 static int qeth_l3_arp_flush_cache(struct qeth_card *card) 1880 { 1881 int rc; 1882 int tmp; 1883 1884 QETH_CARD_TEXT(card, 3, "arpflush"); 1885 1886 /* 1887 * currently GuestLAN only supports the ARP assist function 1888 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; 1889 * thus we say EOPNOTSUPP for this ARP function 1890 */ 1891 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) 1892 return -EOPNOTSUPP; 1893 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1894 return -EOPNOTSUPP; 1895 } 1896 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 1897 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); 1898 if (rc) { 1899 tmp = rc; 1900 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " 1901 "(0x%x/%d)\n", QETH_CARD_IFNAME(card), 1902 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 1903 } 1904 return rc; 1905 } 1906 1907 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1908 { 1909 struct qeth_card *card = dev->ml_priv; 1910 struct qeth_arp_cache_entry arp_entry; 1911 int rc = 0; 1912 1913 switch (cmd) { 1914 case SIOC_QETH_ARP_SET_NO_ENTRIES: 1915 if (!capable(CAP_NET_ADMIN)) { 1916 rc = -EPERM; 1917 break; 1918 } 1919 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); 1920 break; 1921 case SIOC_QETH_ARP_QUERY_INFO: 1922 if (!capable(CAP_NET_ADMIN)) { 1923 rc = -EPERM; 1924 break; 1925 } 1926 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); 1927 break; 1928 case SIOC_QETH_ARP_ADD_ENTRY: 1929 if (!capable(CAP_NET_ADMIN)) { 1930 rc = -EPERM; 1931 break; 1932 } 1933 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, 1934 sizeof(struct qeth_arp_cache_entry))) 1935 rc = -EFAULT; 1936 else 1937 rc = qeth_l3_arp_add_entry(card, &arp_entry); 1938 break; 1939 case SIOC_QETH_ARP_REMOVE_ENTRY: 1940 if (!capable(CAP_NET_ADMIN)) { 1941 rc = -EPERM; 1942 break; 1943 } 1944 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, 1945 sizeof(struct qeth_arp_cache_entry))) 1946 rc = -EFAULT; 1947 else 1948 rc = qeth_l3_arp_remove_entry(card, &arp_entry); 1949 break; 1950 case SIOC_QETH_ARP_FLUSH_CACHE: 1951 if (!capable(CAP_NET_ADMIN)) { 1952 rc = -EPERM; 1953 break; 1954 } 1955 rc = qeth_l3_arp_flush_cache(card); 1956 break; 1957 default: 1958 rc = -EOPNOTSUPP; 1959 } 1960 return rc; 1961 } 1962 1963 static int qeth_l3_get_cast_type(struct sk_buff *skb) 1964 { 1965 struct neighbour *n = NULL; 1966 struct dst_entry *dst; 1967 1968 rcu_read_lock(); 1969 dst = skb_dst(skb); 1970 if (dst) 1971 n = dst_neigh_lookup_skb(dst, skb); 1972 if (n) { 1973 int cast_type = n->type; 1974 1975 rcu_read_unlock(); 1976 neigh_release(n); 1977 if ((cast_type == RTN_BROADCAST) || 1978 (cast_type == RTN_MULTICAST) || 1979 (cast_type == RTN_ANYCAST)) 1980 return cast_type; 1981 return RTN_UNSPEC; 1982 } 1983 rcu_read_unlock(); 1984 1985 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ 1986 if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) 1987 return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? 1988 RTN_MULTICAST : RTN_UNSPEC; 1989 else if (be16_to_cpu(skb->protocol) == ETH_P_IP) 1990 return ipv4_is_multicast(ip_hdr(skb)->daddr) ? 1991 RTN_MULTICAST : RTN_UNSPEC; 1992 1993 /* ... and MAC address */ 1994 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast)) 1995 return RTN_BROADCAST; 1996 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 1997 return RTN_MULTICAST; 1998 1999 /* default to unicast */ 2000 return RTN_UNSPEC; 2001 } 2002 2003 static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, 2004 struct qeth_hdr *hdr, struct sk_buff *skb) 2005 { 2006 char daddr[16]; 2007 struct af_iucv_trans_hdr *iucv_hdr; 2008 2009 memset(hdr, 0, sizeof(struct qeth_hdr)); 2010 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2011 hdr->hdr.l3.ext_flags = 0; 2012 hdr->hdr.l3.length = skb->len - ETH_HLEN; 2013 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 2014 2015 iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); 2016 memset(daddr, 0, sizeof(daddr)); 2017 daddr[0] = 0xfe; 2018 daddr[1] = 0x80; 2019 memcpy(&daddr[8], iucv_hdr->destUserID, 8); 2020 memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16); 2021 } 2022 2023 static u8 qeth_l3_cast_type_to_flag(int cast_type) 2024 { 2025 if (cast_type == RTN_MULTICAST) 2026 return QETH_CAST_MULTICAST; 2027 if (cast_type == RTN_ANYCAST) 2028 return QETH_CAST_ANYCAST; 2029 if (cast_type == RTN_BROADCAST) 2030 return QETH_CAST_BROADCAST; 2031 return QETH_CAST_UNICAST; 2032 } 2033 2034 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2035 struct sk_buff *skb, int ipv, int cast_type, 2036 unsigned int data_len) 2037 { 2038 memset(hdr, 0, sizeof(struct qeth_hdr)); 2039 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2040 hdr->hdr.l3.length = data_len; 2041 2042 /* 2043 * before we're going to overwrite this location with next hop ip. 2044 * v6 uses passthrough, v4 sets the tag in the QDIO header. 2045 */ 2046 if (skb_vlan_tag_present(skb)) { 2047 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) 2048 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; 2049 else 2050 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; 2051 hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); 2052 } 2053 2054 /* OSA only: */ 2055 if (!ipv) { 2056 hdr->hdr.l3.flags = QETH_HDR_PASSTHRU; 2057 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, 2058 skb->dev->broadcast)) 2059 hdr->hdr.l3.flags |= QETH_CAST_BROADCAST; 2060 else 2061 hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ? 2062 QETH_CAST_MULTICAST : QETH_CAST_UNICAST; 2063 return; 2064 } 2065 2066 hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type); 2067 rcu_read_lock(); 2068 if (ipv == 4) { 2069 struct rtable *rt = skb_rtable(skb); 2070 2071 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ? 2072 rt_nexthop(rt, ip_hdr(skb)->daddr) : 2073 ip_hdr(skb)->daddr; 2074 } else { 2075 /* IPv6 */ 2076 const struct rt6_info *rt = skb_rt6_info(skb); 2077 const struct in6_addr *next_hop; 2078 2079 if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) 2080 next_hop = &rt->rt6i_gateway; 2081 else 2082 next_hop = &ipv6_hdr(skb)->daddr; 2083 memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16); 2084 2085 hdr->hdr.l3.flags |= QETH_HDR_IPV6; 2086 if (card->info.type != QETH_CARD_TYPE_IQD) 2087 hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; 2088 } 2089 rcu_read_unlock(); 2090 } 2091 2092 static void qeth_tso_fill_header(struct qeth_card *card, 2093 struct qeth_hdr *qhdr, struct sk_buff *skb) 2094 { 2095 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; 2096 struct tcphdr *tcph = tcp_hdr(skb); 2097 struct iphdr *iph = ip_hdr(skb); 2098 struct ipv6hdr *ip6h = ipv6_hdr(skb); 2099 2100 /*fix header to TSO values ...*/ 2101 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 2102 /*set values which are fix for the first approach ...*/ 2103 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 2104 hdr->ext.imb_hdr_no = 1; 2105 hdr->ext.hdr_type = 1; 2106 hdr->ext.hdr_version = 1; 2107 hdr->ext.hdr_len = 28; 2108 /*insert non-fix values */ 2109 hdr->ext.mss = skb_shinfo(skb)->gso_size; 2110 hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb)); 2111 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - 2112 sizeof(struct qeth_hdr_tso)); 2113 tcph->check = 0; 2114 if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) { 2115 ip6h->payload_len = 0; 2116 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 2117 0, IPPROTO_TCP, 0); 2118 } else { 2119 /*OSA want us to set these values ...*/ 2120 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 2121 0, IPPROTO_TCP, 0); 2122 iph->tot_len = 0; 2123 iph->check = 0; 2124 } 2125 } 2126 2127 /** 2128 * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso 2129 * @card: qeth card structure, to check max. elems. 2130 * @skb: SKB address 2131 * @extra_elems: extra elems needed, to check against max. 2132 * 2133 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 2134 * skb data, including linear part and fragments, but excluding TCP header. 2135 * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().) 2136 * Checks if the result plus extra_elems fits under the limit for the card. 2137 * Returns 0 if it does not. 2138 * Note: extra_elems is not included in the returned result. 2139 */ 2140 static int qeth_l3_get_elements_no_tso(struct qeth_card *card, 2141 struct sk_buff *skb, int extra_elems) 2142 { 2143 addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); 2144 addr_t end = (addr_t)skb->data + skb_headlen(skb); 2145 int elements = qeth_get_elements_for_frags(skb); 2146 2147 if (start != end) 2148 elements += qeth_get_elements_for_range(start, end); 2149 2150 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 2151 QETH_DBF_MESSAGE(2, 2152 "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n", 2153 elements + extra_elems, skb->len); 2154 return 0; 2155 } 2156 return elements; 2157 } 2158 2159 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, 2160 struct net_device *dev) 2161 { 2162 int rc; 2163 __be16 *tag; 2164 struct qeth_hdr *hdr = NULL; 2165 int hdr_elements = 0; 2166 int elements; 2167 struct qeth_card *card = dev->ml_priv; 2168 struct sk_buff *new_skb = NULL; 2169 int ipv = qeth_get_ip_version(skb); 2170 int cast_type = qeth_l3_get_cast_type(skb); 2171 struct qeth_qdio_out_q *queue = 2172 card->qdio.out_qs[card->qdio.do_prio_queueing 2173 || (cast_type && card->info.is_multicast_different) ? 2174 qeth_get_priority_queue(card, skb, ipv, cast_type) : 2175 card->qdio.default_out_queue]; 2176 int tx_bytes = skb->len; 2177 unsigned int hd_len = 0; 2178 bool use_tso; 2179 int data_offset = -1; 2180 unsigned int nr_frags; 2181 2182 if (((card->info.type == QETH_CARD_TYPE_IQD) && 2183 (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || 2184 ((card->options.cq == QETH_CQ_ENABLED) && 2185 (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) || 2186 card->options.sniffer) 2187 goto tx_drop; 2188 2189 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2190 card->stats.tx_carrier_errors++; 2191 goto tx_drop; 2192 } 2193 2194 if ((cast_type == RTN_BROADCAST) && 2195 (card->info.broadcast_capable == 0)) 2196 goto tx_drop; 2197 2198 if (card->options.performance_stats) { 2199 card->perf_stats.outbound_cnt++; 2200 card->perf_stats.outbound_start_time = qeth_get_micros(); 2201 } 2202 2203 /* Ignore segment size from skb_is_gso(), 1 page is always used. */ 2204 use_tso = skb_is_gso(skb) && 2205 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); 2206 2207 if (card->info.type == QETH_CARD_TYPE_IQD) { 2208 new_skb = skb; 2209 data_offset = ETH_HLEN; 2210 hd_len = sizeof(*hdr); 2211 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2212 if (!hdr) 2213 goto tx_drop; 2214 hdr_elements++; 2215 } else { 2216 /* create a clone with writeable headroom */ 2217 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) 2218 + VLAN_HLEN); 2219 if (!new_skb) 2220 goto tx_drop; 2221 2222 if (ipv == 4) { 2223 skb_pull(new_skb, ETH_HLEN); 2224 } 2225 2226 if (ipv != 4 && skb_vlan_tag_present(new_skb)) { 2227 skb_push(new_skb, VLAN_HLEN); 2228 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); 2229 skb_copy_to_linear_data_offset(new_skb, 4, 2230 new_skb->data + 8, 4); 2231 skb_copy_to_linear_data_offset(new_skb, 8, 2232 new_skb->data + 12, 4); 2233 tag = (__be16 *)(new_skb->data + 12); 2234 *tag = cpu_to_be16(ETH_P_8021Q); 2235 *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); 2236 } 2237 } 2238 2239 netif_stop_queue(dev); 2240 2241 /* fix hardware limitation: as long as we do not have sbal 2242 * chaining we can not send long frag lists 2243 */ 2244 if ((card->info.type != QETH_CARD_TYPE_IQD) && 2245 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || 2246 (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { 2247 int lin_rc = skb_linearize(new_skb); 2248 2249 if (card->options.performance_stats) { 2250 if (lin_rc) 2251 card->perf_stats.tx_linfail++; 2252 else 2253 card->perf_stats.tx_lin++; 2254 } 2255 if (lin_rc) 2256 goto tx_drop; 2257 } 2258 nr_frags = skb_shinfo(new_skb)->nr_frags; 2259 2260 if (use_tso) { 2261 hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); 2262 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2263 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, 2264 new_skb->len - sizeof(struct qeth_hdr_tso)); 2265 qeth_tso_fill_header(card, hdr, new_skb); 2266 hdr_elements++; 2267 } else { 2268 if (data_offset < 0) { 2269 hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); 2270 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, 2271 new_skb->len - 2272 sizeof(struct qeth_hdr)); 2273 } else { 2274 if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV) 2275 qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); 2276 else { 2277 qeth_l3_fill_header(card, hdr, new_skb, ipv, 2278 cast_type, 2279 new_skb->len - data_offset); 2280 } 2281 } 2282 2283 if (new_skb->ip_summed == CHECKSUM_PARTIAL) { 2284 qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv); 2285 if (card->options.performance_stats) 2286 card->perf_stats.tx_csum++; 2287 } 2288 } 2289 2290 elements = use_tso ? 2291 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : 2292 qeth_get_elements_no(card, new_skb, hdr_elements, 2293 (data_offset > 0) ? data_offset : 0); 2294 if (!elements) { 2295 if (data_offset >= 0) 2296 kmem_cache_free(qeth_core_header_cache, hdr); 2297 goto tx_drop; 2298 } 2299 elements += hdr_elements; 2300 2301 if (card->info.type != QETH_CARD_TYPE_IQD) { 2302 int len; 2303 if (use_tso) { 2304 hd_len = sizeof(struct qeth_hdr_tso) + 2305 ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); 2306 len = hd_len; 2307 } else { 2308 len = sizeof(struct qeth_hdr_layer3); 2309 } 2310 2311 if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) 2312 goto tx_drop; 2313 rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, 2314 hd_len, elements); 2315 } else 2316 rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset, 2317 hd_len); 2318 2319 if (!rc) { 2320 card->stats.tx_packets++; 2321 card->stats.tx_bytes += tx_bytes; 2322 if (new_skb != skb) 2323 dev_kfree_skb_any(skb); 2324 if (card->options.performance_stats) { 2325 if (use_tso) { 2326 card->perf_stats.large_send_bytes += tx_bytes; 2327 card->perf_stats.large_send_cnt++; 2328 } 2329 if (nr_frags) { 2330 card->perf_stats.sg_skbs_sent++; 2331 /* nr_frags + skb->data */ 2332 card->perf_stats.sg_frags_sent += nr_frags + 1; 2333 } 2334 } 2335 rc = NETDEV_TX_OK; 2336 } else { 2337 if (data_offset >= 0) 2338 kmem_cache_free(qeth_core_header_cache, hdr); 2339 2340 if (rc == -EBUSY) { 2341 if (new_skb != skb) 2342 dev_kfree_skb_any(new_skb); 2343 return NETDEV_TX_BUSY; 2344 } else 2345 goto tx_drop; 2346 } 2347 2348 netif_wake_queue(dev); 2349 if (card->options.performance_stats) 2350 card->perf_stats.outbound_time += qeth_get_micros() - 2351 card->perf_stats.outbound_start_time; 2352 return rc; 2353 2354 tx_drop: 2355 card->stats.tx_dropped++; 2356 card->stats.tx_errors++; 2357 if ((new_skb != skb) && new_skb) 2358 dev_kfree_skb_any(new_skb); 2359 dev_kfree_skb_any(skb); 2360 netif_wake_queue(dev); 2361 return NETDEV_TX_OK; 2362 } 2363 2364 static int __qeth_l3_open(struct net_device *dev) 2365 { 2366 struct qeth_card *card = dev->ml_priv; 2367 int rc = 0; 2368 2369 QETH_CARD_TEXT(card, 4, "qethopen"); 2370 if (card->state == CARD_STATE_UP) 2371 return rc; 2372 if (card->state != CARD_STATE_SOFTSETUP) 2373 return -ENODEV; 2374 card->data.state = CH_STATE_UP; 2375 card->state = CARD_STATE_UP; 2376 netif_start_queue(dev); 2377 2378 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { 2379 napi_enable(&card->napi); 2380 napi_schedule(&card->napi); 2381 } else 2382 rc = -EIO; 2383 return rc; 2384 } 2385 2386 static int qeth_l3_open(struct net_device *dev) 2387 { 2388 struct qeth_card *card = dev->ml_priv; 2389 2390 QETH_CARD_TEXT(card, 5, "qethope_"); 2391 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 2392 QETH_CARD_TEXT(card, 3, "openREC"); 2393 return -ERESTARTSYS; 2394 } 2395 return __qeth_l3_open(dev); 2396 } 2397 2398 static int qeth_l3_stop(struct net_device *dev) 2399 { 2400 struct qeth_card *card = dev->ml_priv; 2401 2402 QETH_CARD_TEXT(card, 4, "qethstop"); 2403 netif_tx_disable(dev); 2404 if (card->state == CARD_STATE_UP) { 2405 card->state = CARD_STATE_SOFTSETUP; 2406 napi_disable(&card->napi); 2407 } 2408 return 0; 2409 } 2410 2411 static const struct ethtool_ops qeth_l3_ethtool_ops = { 2412 .get_link = ethtool_op_get_link, 2413 .get_strings = qeth_core_get_strings, 2414 .get_ethtool_stats = qeth_core_get_ethtool_stats, 2415 .get_sset_count = qeth_core_get_sset_count, 2416 .get_drvinfo = qeth_core_get_drvinfo, 2417 .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, 2418 }; 2419 2420 /* 2421 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting 2422 * NOARP on the netdevice is no option because it also turns off neighbor 2423 * solicitation. For IPv4 we install a neighbor_setup function. We don't want 2424 * arp resolution but we want the hard header (packet socket will work 2425 * e.g. tcpdump) 2426 */ 2427 static int qeth_l3_neigh_setup_noarp(struct neighbour *n) 2428 { 2429 n->nud_state = NUD_NOARP; 2430 memcpy(n->ha, "FAKELL", 6); 2431 n->output = n->ops->connected_output; 2432 return 0; 2433 } 2434 2435 static int 2436 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) 2437 { 2438 if (np->tbl->family == AF_INET) 2439 np->neigh_setup = qeth_l3_neigh_setup_noarp; 2440 2441 return 0; 2442 } 2443 2444 static const struct net_device_ops qeth_l3_netdev_ops = { 2445 .ndo_open = qeth_l3_open, 2446 .ndo_stop = qeth_l3_stop, 2447 .ndo_get_stats = qeth_get_stats, 2448 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2449 .ndo_validate_addr = eth_validate_addr, 2450 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2451 .ndo_do_ioctl = qeth_do_ioctl, 2452 .ndo_change_mtu = qeth_change_mtu, 2453 .ndo_fix_features = qeth_fix_features, 2454 .ndo_set_features = qeth_set_features, 2455 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2456 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2457 .ndo_tx_timeout = qeth_tx_timeout, 2458 }; 2459 2460 static const struct net_device_ops qeth_l3_osa_netdev_ops = { 2461 .ndo_open = qeth_l3_open, 2462 .ndo_stop = qeth_l3_stop, 2463 .ndo_get_stats = qeth_get_stats, 2464 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2465 .ndo_features_check = qeth_features_check, 2466 .ndo_validate_addr = eth_validate_addr, 2467 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2468 .ndo_do_ioctl = qeth_do_ioctl, 2469 .ndo_change_mtu = qeth_change_mtu, 2470 .ndo_fix_features = qeth_fix_features, 2471 .ndo_set_features = qeth_set_features, 2472 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2473 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2474 .ndo_tx_timeout = qeth_tx_timeout, 2475 .ndo_neigh_setup = qeth_l3_neigh_setup, 2476 }; 2477 2478 static int qeth_l3_setup_netdev(struct qeth_card *card) 2479 { 2480 int rc; 2481 2482 if (card->info.type == QETH_CARD_TYPE_OSD || 2483 card->info.type == QETH_CARD_TYPE_OSX) { 2484 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 2485 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 2486 pr_info("qeth_l3: ignoring TR device\n"); 2487 return -ENODEV; 2488 } 2489 2490 card->dev = alloc_etherdev(0); 2491 if (!card->dev) 2492 return -ENODEV; 2493 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; 2494 2495 /*IPv6 address autoconfiguration stuff*/ 2496 qeth_l3_get_unique_id(card); 2497 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 2498 card->dev->dev_id = card->info.unique_id & 0xffff; 2499 2500 card->dev->hw_features |= NETIF_F_SG; 2501 card->dev->vlan_features |= NETIF_F_SG; 2502 2503 if (!card->info.guestlan) { 2504 card->dev->features |= NETIF_F_SG; 2505 card->dev->hw_features |= NETIF_F_TSO | 2506 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2507 card->dev->vlan_features |= NETIF_F_TSO | 2508 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2509 } 2510 2511 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { 2512 card->dev->hw_features |= NETIF_F_IPV6_CSUM; 2513 card->dev->vlan_features |= NETIF_F_IPV6_CSUM; 2514 } 2515 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 2516 card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, 2517 ether_setup); 2518 if (!card->dev) 2519 return -ENODEV; 2520 card->dev->flags |= IFF_NOARP; 2521 card->dev->netdev_ops = &qeth_l3_netdev_ops; 2522 rc = qeth_l3_iqd_read_initial_mac(card); 2523 if (rc) 2524 return rc; 2525 if (card->options.hsuid[0]) 2526 memcpy(card->dev->perm_addr, card->options.hsuid, 9); 2527 } else 2528 return -ENODEV; 2529 2530 card->dev->ml_priv = card; 2531 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 2532 card->dev->mtu = card->info.initial_mtu; 2533 card->dev->min_mtu = 64; 2534 card->dev->max_mtu = ETH_MAX_MTU; 2535 card->dev->dev_port = card->info.portno; 2536 card->dev->ethtool_ops = &qeth_l3_ethtool_ops; 2537 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 2538 NETIF_F_HW_VLAN_CTAG_RX | 2539 NETIF_F_HW_VLAN_CTAG_FILTER; 2540 netif_keep_dst(card->dev); 2541 netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * 2542 PAGE_SIZE); 2543 2544 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 2545 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 2546 netif_carrier_off(card->dev); 2547 return register_netdev(card->dev); 2548 } 2549 2550 static const struct device_type qeth_l3_devtype = { 2551 .name = "qeth_layer3", 2552 .groups = qeth_l3_attr_groups, 2553 }; 2554 2555 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 2556 { 2557 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2558 int rc; 2559 2560 if (gdev->dev.type == &qeth_generic_devtype) { 2561 rc = qeth_l3_create_device_attributes(&gdev->dev); 2562 if (rc) 2563 return rc; 2564 } 2565 hash_init(card->ip_htable); 2566 hash_init(card->ip_mc_htable); 2567 card->options.layer2 = 0; 2568 card->info.hwtrap = 0; 2569 return 0; 2570 } 2571 2572 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) 2573 { 2574 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2575 2576 if (cgdev->dev.type == &qeth_generic_devtype) 2577 qeth_l3_remove_device_attributes(&cgdev->dev); 2578 2579 qeth_set_allowed_threads(card, 0, 1); 2580 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2581 2582 if (cgdev->state == CCWGROUP_ONLINE) 2583 qeth_l3_set_offline(cgdev); 2584 2585 if (card->dev) { 2586 unregister_netdev(card->dev); 2587 free_netdev(card->dev); 2588 card->dev = NULL; 2589 } 2590 2591 qeth_l3_clear_ip_htable(card, 0); 2592 qeth_l3_clear_ipato_list(card); 2593 return; 2594 } 2595 2596 static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) 2597 { 2598 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2599 int rc = 0; 2600 enum qeth_card_states recover_flag; 2601 2602 mutex_lock(&card->discipline_mutex); 2603 mutex_lock(&card->conf_mutex); 2604 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 2605 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2606 2607 recover_flag = card->state; 2608 rc = qeth_core_hardsetup_card(card); 2609 if (rc) { 2610 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); 2611 rc = -ENODEV; 2612 goto out_remove; 2613 } 2614 2615 if (!card->dev && qeth_l3_setup_netdev(card)) { 2616 rc = -ENODEV; 2617 goto out_remove; 2618 } 2619 2620 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { 2621 if (card->info.hwtrap && 2622 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) 2623 card->info.hwtrap = 0; 2624 } else 2625 card->info.hwtrap = 0; 2626 2627 card->state = CARD_STATE_HARDSETUP; 2628 qeth_print_status_message(card); 2629 2630 /* softsetup */ 2631 QETH_DBF_TEXT(SETUP, 2, "softsetp"); 2632 2633 rc = qeth_l3_setadapter_parms(card); 2634 if (rc) 2635 QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); 2636 if (!card->options.sniffer) { 2637 rc = qeth_l3_start_ipassists(card); 2638 if (rc) { 2639 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 2640 goto out_remove; 2641 } 2642 rc = qeth_l3_setrouting_v4(card); 2643 if (rc) 2644 QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc); 2645 rc = qeth_l3_setrouting_v6(card); 2646 if (rc) 2647 QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); 2648 } 2649 netif_tx_disable(card->dev); 2650 2651 rc = qeth_init_qdio_queues(card); 2652 if (rc) { 2653 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 2654 rc = -ENODEV; 2655 goto out_remove; 2656 } 2657 card->state = CARD_STATE_SOFTSETUP; 2658 2659 qeth_set_allowed_threads(card, 0xffffffff, 0); 2660 qeth_l3_recover_ip(card); 2661 if (card->lan_online) 2662 netif_carrier_on(card->dev); 2663 else 2664 netif_carrier_off(card->dev); 2665 if (recover_flag == CARD_STATE_RECOVER) { 2666 rtnl_lock(); 2667 if (recovery_mode) 2668 __qeth_l3_open(card->dev); 2669 else 2670 dev_open(card->dev); 2671 qeth_l3_set_rx_mode(card->dev); 2672 qeth_recover_features(card->dev); 2673 rtnl_unlock(); 2674 } 2675 qeth_trace_features(card); 2676 /* let user_space know that device is online */ 2677 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 2678 mutex_unlock(&card->conf_mutex); 2679 mutex_unlock(&card->discipline_mutex); 2680 return 0; 2681 out_remove: 2682 qeth_l3_stop_card(card, 0); 2683 ccw_device_set_offline(CARD_DDEV(card)); 2684 ccw_device_set_offline(CARD_WDEV(card)); 2685 ccw_device_set_offline(CARD_RDEV(card)); 2686 qdio_free(CARD_DDEV(card)); 2687 if (recover_flag == CARD_STATE_RECOVER) 2688 card->state = CARD_STATE_RECOVER; 2689 else 2690 card->state = CARD_STATE_DOWN; 2691 mutex_unlock(&card->conf_mutex); 2692 mutex_unlock(&card->discipline_mutex); 2693 return rc; 2694 } 2695 2696 static int qeth_l3_set_online(struct ccwgroup_device *gdev) 2697 { 2698 return __qeth_l3_set_online(gdev, 0); 2699 } 2700 2701 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, 2702 int recovery_mode) 2703 { 2704 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2705 int rc = 0, rc2 = 0, rc3 = 0; 2706 enum qeth_card_states recover_flag; 2707 2708 mutex_lock(&card->discipline_mutex); 2709 mutex_lock(&card->conf_mutex); 2710 QETH_DBF_TEXT(SETUP, 3, "setoffl"); 2711 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); 2712 2713 if (card->dev && netif_carrier_ok(card->dev)) 2714 netif_carrier_off(card->dev); 2715 recover_flag = card->state; 2716 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { 2717 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 2718 card->info.hwtrap = 1; 2719 } 2720 qeth_l3_stop_card(card, recovery_mode); 2721 if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) { 2722 rtnl_lock(); 2723 call_netdevice_notifiers(NETDEV_REBOOT, card->dev); 2724 rtnl_unlock(); 2725 } 2726 rc = ccw_device_set_offline(CARD_DDEV(card)); 2727 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 2728 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 2729 if (!rc) 2730 rc = (rc2) ? rc2 : rc3; 2731 if (rc) 2732 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2733 qdio_free(CARD_DDEV(card)); 2734 if (recover_flag == CARD_STATE_UP) 2735 card->state = CARD_STATE_RECOVER; 2736 /* let user_space know that device is offline */ 2737 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); 2738 mutex_unlock(&card->conf_mutex); 2739 mutex_unlock(&card->discipline_mutex); 2740 return 0; 2741 } 2742 2743 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) 2744 { 2745 return __qeth_l3_set_offline(cgdev, 0); 2746 } 2747 2748 static int qeth_l3_recover(void *ptr) 2749 { 2750 struct qeth_card *card; 2751 int rc = 0; 2752 2753 card = (struct qeth_card *) ptr; 2754 QETH_CARD_TEXT(card, 2, "recover1"); 2755 QETH_CARD_HEX(card, 2, &card, sizeof(void *)); 2756 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 2757 return 0; 2758 QETH_CARD_TEXT(card, 2, "recover2"); 2759 dev_warn(&card->gdev->dev, 2760 "A recovery process has been started for the device\n"); 2761 qeth_set_recovery_task(card); 2762 __qeth_l3_set_offline(card->gdev, 1); 2763 rc = __qeth_l3_set_online(card->gdev, 1); 2764 if (!rc) 2765 dev_info(&card->gdev->dev, 2766 "Device successfully recovered!\n"); 2767 else { 2768 qeth_close_dev(card); 2769 dev_warn(&card->gdev->dev, "The qeth device driver " 2770 "failed to recover an error on the device\n"); 2771 } 2772 qeth_clear_recovery_task(card); 2773 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 2774 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 2775 return 0; 2776 } 2777 2778 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) 2779 { 2780 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2781 2782 if (card->dev) 2783 netif_device_detach(card->dev); 2784 qeth_set_allowed_threads(card, 0, 1); 2785 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2786 if (gdev->state == CCWGROUP_OFFLINE) 2787 return 0; 2788 if (card->state == CARD_STATE_UP) { 2789 if (card->info.hwtrap) 2790 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 2791 __qeth_l3_set_offline(card->gdev, 1); 2792 } else 2793 __qeth_l3_set_offline(card->gdev, 0); 2794 return 0; 2795 } 2796 2797 static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) 2798 { 2799 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2800 int rc = 0; 2801 2802 if (gdev->state == CCWGROUP_OFFLINE) 2803 goto out; 2804 2805 if (card->state == CARD_STATE_RECOVER) { 2806 rc = __qeth_l3_set_online(card->gdev, 1); 2807 if (rc) { 2808 rtnl_lock(); 2809 dev_close(card->dev); 2810 rtnl_unlock(); 2811 } 2812 } else 2813 rc = __qeth_l3_set_online(card->gdev, 0); 2814 out: 2815 qeth_set_allowed_threads(card, 0xffffffff, 0); 2816 if (card->dev) 2817 netif_device_attach(card->dev); 2818 if (rc) 2819 dev_warn(&card->gdev->dev, "The qeth device driver " 2820 "failed to recover an error on the device\n"); 2821 return rc; 2822 } 2823 2824 /* Returns zero if the command is successfully "consumed" */ 2825 static int qeth_l3_control_event(struct qeth_card *card, 2826 struct qeth_ipa_cmd *cmd) 2827 { 2828 return 1; 2829 } 2830 2831 struct qeth_discipline qeth_l3_discipline = { 2832 .devtype = &qeth_l3_devtype, 2833 .process_rx_buffer = qeth_l3_process_inbound_buffer, 2834 .recover = qeth_l3_recover, 2835 .setup = qeth_l3_probe_device, 2836 .remove = qeth_l3_remove_device, 2837 .set_online = qeth_l3_set_online, 2838 .set_offline = qeth_l3_set_offline, 2839 .freeze = qeth_l3_pm_suspend, 2840 .thaw = qeth_l3_pm_resume, 2841 .restore = qeth_l3_pm_resume, 2842 .do_ioctl = qeth_l3_do_ioctl, 2843 .control_event_handler = qeth_l3_control_event, 2844 }; 2845 EXPORT_SYMBOL_GPL(qeth_l3_discipline); 2846 2847 static int qeth_l3_handle_ip_event(struct qeth_card *card, 2848 struct qeth_ipaddr *addr, 2849 unsigned long event) 2850 { 2851 switch (event) { 2852 case NETDEV_UP: 2853 spin_lock_bh(&card->ip_lock); 2854 qeth_l3_add_ip(card, addr); 2855 spin_unlock_bh(&card->ip_lock); 2856 return NOTIFY_OK; 2857 case NETDEV_DOWN: 2858 spin_lock_bh(&card->ip_lock); 2859 qeth_l3_delete_ip(card, addr); 2860 spin_unlock_bh(&card->ip_lock); 2861 return NOTIFY_OK; 2862 default: 2863 return NOTIFY_DONE; 2864 } 2865 } 2866 2867 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) 2868 { 2869 if (is_vlan_dev(dev)) 2870 dev = vlan_dev_real_dev(dev); 2871 if (dev->netdev_ops == &qeth_l3_osa_netdev_ops || 2872 dev->netdev_ops == &qeth_l3_netdev_ops) 2873 return (struct qeth_card *) dev->ml_priv; 2874 return NULL; 2875 } 2876 2877 static int qeth_l3_ip_event(struct notifier_block *this, 2878 unsigned long event, void *ptr) 2879 { 2880 2881 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 2882 struct net_device *dev = ifa->ifa_dev->dev; 2883 struct qeth_ipaddr addr; 2884 struct qeth_card *card; 2885 2886 if (dev_net(dev) != &init_net) 2887 return NOTIFY_DONE; 2888 2889 card = qeth_l3_get_card_from_dev(dev); 2890 if (!card) 2891 return NOTIFY_DONE; 2892 QETH_CARD_TEXT(card, 3, "ipevent"); 2893 2894 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); 2895 addr.u.a4.addr = be32_to_cpu(ifa->ifa_address); 2896 addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask); 2897 2898 return qeth_l3_handle_ip_event(card, &addr, event); 2899 } 2900 2901 static struct notifier_block qeth_l3_ip_notifier = { 2902 qeth_l3_ip_event, 2903 NULL, 2904 }; 2905 2906 static int qeth_l3_ip6_event(struct notifier_block *this, 2907 unsigned long event, void *ptr) 2908 { 2909 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 2910 struct net_device *dev = ifa->idev->dev; 2911 struct qeth_ipaddr addr; 2912 struct qeth_card *card; 2913 2914 card = qeth_l3_get_card_from_dev(dev); 2915 if (!card) 2916 return NOTIFY_DONE; 2917 QETH_CARD_TEXT(card, 3, "ip6event"); 2918 if (!qeth_is_supported(card, IPA_IPV6)) 2919 return NOTIFY_DONE; 2920 2921 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); 2922 addr.u.a6.addr = ifa->addr; 2923 addr.u.a6.pfxlen = ifa->prefix_len; 2924 2925 return qeth_l3_handle_ip_event(card, &addr, event); 2926 } 2927 2928 static struct notifier_block qeth_l3_ip6_notifier = { 2929 qeth_l3_ip6_event, 2930 NULL, 2931 }; 2932 2933 static int qeth_l3_register_notifiers(void) 2934 { 2935 int rc; 2936 2937 QETH_DBF_TEXT(SETUP, 5, "regnotif"); 2938 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); 2939 if (rc) 2940 return rc; 2941 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); 2942 if (rc) { 2943 unregister_inetaddr_notifier(&qeth_l3_ip_notifier); 2944 return rc; 2945 } 2946 return 0; 2947 } 2948 2949 static void qeth_l3_unregister_notifiers(void) 2950 { 2951 QETH_DBF_TEXT(SETUP, 5, "unregnot"); 2952 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 2953 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 2954 } 2955 2956 static int __init qeth_l3_init(void) 2957 { 2958 pr_info("register layer 3 discipline\n"); 2959 return qeth_l3_register_notifiers(); 2960 } 2961 2962 static void __exit qeth_l3_exit(void) 2963 { 2964 qeth_l3_unregister_notifiers(); 2965 pr_info("unregister layer 3 discipline\n"); 2966 } 2967 2968 module_init(qeth_l3_init); 2969 module_exit(qeth_l3_exit); 2970 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 2971 MODULE_DESCRIPTION("qeth layer 3 discipline"); 2972 MODULE_LICENSE("GPL"); 2973