1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/bitops.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/etherdevice.h> 20 #include <linux/ip.h> 21 #include <linux/in.h> 22 #include <linux/ipv6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/igmp.h> 25 #include <linux/slab.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <linux/skbuff.h> 29 30 #include <net/ip.h> 31 #include <net/arp.h> 32 #include <net/route.h> 33 #include <net/ipv6.h> 34 #include <net/ip6_route.h> 35 #include <net/iucv/af_iucv.h> 36 #include <linux/hashtable.h> 37 38 #include "qeth_l3.h" 39 40 41 static int qeth_l3_set_offline(struct ccwgroup_device *); 42 static int qeth_l3_register_addr_entry(struct qeth_card *, 43 struct qeth_ipaddr *); 44 static int qeth_l3_deregister_addr_entry(struct qeth_card *, 45 struct qeth_ipaddr *); 46 47 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) 48 { 49 sprintf(buf, "%pI4", addr); 50 } 51 52 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 53 { 54 sprintf(buf, "%pI6", addr); 55 } 56 57 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, 58 char *buf) 59 { 60 if (proto == QETH_PROT_IPV4) 61 qeth_l3_ipaddr4_to_string(addr, buf); 62 else if (proto == QETH_PROT_IPV6) 63 qeth_l3_ipaddr6_to_string(addr, buf); 64 } 65 66 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, 67 struct qeth_ipaddr *query) 68 { 69 u32 key = qeth_l3_ipaddr_hash(query); 70 struct qeth_ipaddr *addr; 71 72 if (query->is_multicast) { 73 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) 74 if (qeth_l3_addr_match_ip(addr, query)) 75 return addr; 76 } else { 77 hash_for_each_possible(card->ip_htable, addr, hnode, key) 78 if (qeth_l3_addr_match_ip(addr, query)) 79 return addr; 80 } 81 return NULL; 82 } 83 84 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 85 { 86 int i, j; 87 u8 octet; 88 89 for (i = 0; i < len; ++i) { 90 octet = addr[i]; 91 for (j = 7; j >= 0; --j) { 92 bits[i*8 + j] = octet & 1; 93 octet >>= 1; 94 } 95 } 96 } 97 98 static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 99 struct qeth_ipaddr *addr) 100 { 101 struct qeth_ipato_entry *ipatoe; 102 u8 addr_bits[128] = {0, }; 103 u8 ipatoe_bits[128] = {0, }; 104 int rc = 0; 105 106 if (!card->ipato.enabled) 107 return false; 108 if (addr->type != QETH_IP_TYPE_NORMAL) 109 return false; 110 111 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 112 (addr->proto == QETH_PROT_IPV4)? 4:16); 113 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 114 if (addr->proto != ipatoe->proto) 115 continue; 116 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, 117 (ipatoe->proto == QETH_PROT_IPV4) ? 118 4 : 16); 119 if (addr->proto == QETH_PROT_IPV4) 120 rc = !memcmp(addr_bits, ipatoe_bits, 121 min(32, ipatoe->mask_bits)); 122 else 123 rc = !memcmp(addr_bits, ipatoe_bits, 124 min(128, ipatoe->mask_bits)); 125 if (rc) 126 break; 127 } 128 /* invert? */ 129 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) 130 rc = !rc; 131 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) 132 rc = !rc; 133 134 return rc; 135 } 136 137 static int qeth_l3_delete_ip(struct qeth_card *card, 138 struct qeth_ipaddr *tmp_addr) 139 { 140 int rc = 0; 141 struct qeth_ipaddr *addr; 142 143 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 144 QETH_CARD_TEXT(card, 2, "delrxip"); 145 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 146 QETH_CARD_TEXT(card, 2, "delvipa"); 147 else 148 QETH_CARD_TEXT(card, 2, "delip"); 149 150 if (tmp_addr->proto == QETH_PROT_IPV4) 151 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 152 else { 153 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 154 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 155 } 156 157 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 158 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) 159 return -ENOENT; 160 161 addr->ref_counter--; 162 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) 163 return rc; 164 if (addr->in_progress) 165 return -EINPROGRESS; 166 167 if (qeth_card_hw_is_reachable(card)) 168 rc = qeth_l3_deregister_addr_entry(card, addr); 169 170 hash_del(&addr->hnode); 171 kfree(addr); 172 173 return rc; 174 } 175 176 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) 177 { 178 int rc = 0; 179 struct qeth_ipaddr *addr; 180 char buf[40]; 181 182 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 183 QETH_CARD_TEXT(card, 2, "addrxip"); 184 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 185 QETH_CARD_TEXT(card, 2, "addvipa"); 186 else 187 QETH_CARD_TEXT(card, 2, "addip"); 188 189 if (tmp_addr->proto == QETH_PROT_IPV4) 190 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 191 else { 192 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 193 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 194 } 195 196 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 197 if (addr) { 198 if (tmp_addr->type != QETH_IP_TYPE_NORMAL) 199 return -EADDRINUSE; 200 if (qeth_l3_addr_match_all(addr, tmp_addr)) { 201 addr->ref_counter++; 202 return 0; 203 } 204 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, 205 buf); 206 dev_warn(&card->gdev->dev, 207 "Registering IP address %s failed\n", buf); 208 return -EADDRINUSE; 209 } else { 210 addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL); 211 if (!addr) 212 return -ENOMEM; 213 214 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { 215 QETH_CARD_TEXT(card, 2, "tkovaddr"); 216 addr->ipato = 1; 217 } 218 hash_add(card->ip_htable, &addr->hnode, 219 qeth_l3_ipaddr_hash(addr)); 220 221 if (!qeth_card_hw_is_reachable(card)) { 222 addr->disp_flag = QETH_DISP_ADDR_ADD; 223 return 0; 224 } 225 226 /* qeth_l3_register_addr_entry can go to sleep 227 * if we add a IPV4 addr. It is caused by the reason 228 * that SETIP ipa cmd starts ARP staff for IPV4 addr. 229 * Thus we should unlock spinlock, and make a protection 230 * using in_progress variable to indicate that there is 231 * an hardware operation with this IPV4 address 232 */ 233 if (addr->proto == QETH_PROT_IPV4) { 234 addr->in_progress = 1; 235 mutex_unlock(&card->ip_lock); 236 rc = qeth_l3_register_addr_entry(card, addr); 237 mutex_lock(&card->ip_lock); 238 addr->in_progress = 0; 239 } else 240 rc = qeth_l3_register_addr_entry(card, addr); 241 242 if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { 243 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 244 if (addr->ref_counter < 1) { 245 qeth_l3_deregister_addr_entry(card, addr); 246 hash_del(&addr->hnode); 247 kfree(addr); 248 } 249 } else { 250 hash_del(&addr->hnode); 251 kfree(addr); 252 } 253 } 254 return rc; 255 } 256 257 static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr, 258 bool add) 259 { 260 int rc; 261 262 mutex_lock(&card->ip_lock); 263 rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr); 264 mutex_unlock(&card->ip_lock); 265 266 return rc; 267 } 268 269 static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) 270 { 271 struct qeth_ipaddr *addr; 272 struct hlist_node *tmp; 273 int i; 274 275 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 276 hash_del(&addr->hnode); 277 kfree(addr); 278 } 279 } 280 281 static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) 282 { 283 struct qeth_ipaddr *addr; 284 struct hlist_node *tmp; 285 int i; 286 287 QETH_CARD_TEXT(card, 4, "clearip"); 288 289 mutex_lock(&card->ip_lock); 290 291 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 292 if (!recover) { 293 hash_del(&addr->hnode); 294 kfree(addr); 295 continue; 296 } 297 addr->disp_flag = QETH_DISP_ADDR_ADD; 298 } 299 300 mutex_unlock(&card->ip_lock); 301 } 302 303 static void qeth_l3_recover_ip(struct qeth_card *card) 304 { 305 struct qeth_ipaddr *addr; 306 struct hlist_node *tmp; 307 int i; 308 int rc; 309 310 QETH_CARD_TEXT(card, 4, "recovrip"); 311 312 mutex_lock(&card->ip_lock); 313 314 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 315 if (addr->disp_flag == QETH_DISP_ADDR_ADD) { 316 if (addr->proto == QETH_PROT_IPV4) { 317 addr->in_progress = 1; 318 mutex_unlock(&card->ip_lock); 319 rc = qeth_l3_register_addr_entry(card, addr); 320 mutex_lock(&card->ip_lock); 321 addr->in_progress = 0; 322 } else 323 rc = qeth_l3_register_addr_entry(card, addr); 324 325 if (!rc) { 326 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 327 if (addr->ref_counter < 1) 328 qeth_l3_delete_ip(card, addr); 329 } else { 330 hash_del(&addr->hnode); 331 kfree(addr); 332 } 333 } 334 } 335 336 mutex_unlock(&card->ip_lock); 337 } 338 339 static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, 340 unsigned long data) 341 { 342 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 343 344 switch (cmd->hdr.return_code) { 345 case IPA_RC_SUCCESS: 346 return 0; 347 case IPA_RC_DUPLICATE_IP_ADDRESS: 348 return -EADDRINUSE; 349 case IPA_RC_MC_ADDR_NOT_FOUND: 350 return -ENOENT; 351 case IPA_RC_LAN_OFFLINE: 352 return -ENETDOWN; 353 default: 354 return -EIO; 355 } 356 } 357 358 static int qeth_l3_send_setdelmc(struct qeth_card *card, 359 struct qeth_ipaddr *addr, int ipacmd) 360 { 361 struct qeth_cmd_buffer *iob; 362 struct qeth_ipa_cmd *cmd; 363 364 QETH_CARD_TEXT(card, 4, "setdelmc"); 365 366 iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto, 367 IPA_DATA_SIZEOF(setdelipm)); 368 if (!iob) 369 return -ENOMEM; 370 cmd = __ipa_cmd(iob); 371 if (addr->proto == QETH_PROT_IPV6) { 372 cmd->data.setdelipm.ip = addr->u.a6.addr; 373 ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac); 374 } else { 375 cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr; 376 ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac); 377 } 378 379 return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); 380 } 381 382 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) 383 { 384 int i, j; 385 for (i = 0; i < 16; i++) { 386 j = (len) - (i * 8); 387 if (j >= 8) 388 netmask[i] = 0xff; 389 else if (j > 0) 390 netmask[i] = (u8)(0xFF00 >> j); 391 else 392 netmask[i] = 0; 393 } 394 } 395 396 static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set) 397 { 398 switch (addr->type) { 399 case QETH_IP_TYPE_RXIP: 400 return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 401 case QETH_IP_TYPE_VIPA: 402 return (set) ? QETH_IPA_SETIP_VIPA_FLAG : 403 QETH_IPA_DELIP_VIPA_FLAG; 404 default: 405 return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 406 } 407 } 408 409 static int qeth_l3_send_setdelip(struct qeth_card *card, 410 struct qeth_ipaddr *addr, 411 enum qeth_ipa_cmds ipacmd) 412 { 413 struct qeth_cmd_buffer *iob; 414 struct qeth_ipa_cmd *cmd; 415 __u8 netmask[16]; 416 u32 flags; 417 418 QETH_CARD_TEXT(card, 4, "setdelip"); 419 420 iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto, 421 IPA_DATA_SIZEOF(setdelip6)); 422 if (!iob) 423 return -ENOMEM; 424 cmd = __ipa_cmd(iob); 425 426 flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP); 427 QETH_CARD_TEXT_(card, 4, "flags%02X", flags); 428 429 if (addr->proto == QETH_PROT_IPV6) { 430 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 431 sizeof(struct in6_addr)); 432 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); 433 memcpy(cmd->data.setdelip6.mask, netmask, 434 sizeof(struct in6_addr)); 435 cmd->data.setdelip6.flags = flags; 436 } else { 437 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); 438 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); 439 cmd->data.setdelip4.flags = flags; 440 } 441 442 return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); 443 } 444 445 static int qeth_l3_send_setrouting(struct qeth_card *card, 446 enum qeth_routing_types type, enum qeth_prot_versions prot) 447 { 448 int rc; 449 struct qeth_ipa_cmd *cmd; 450 struct qeth_cmd_buffer *iob; 451 452 QETH_CARD_TEXT(card, 4, "setroutg"); 453 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot, 454 IPA_DATA_SIZEOF(setrtg)); 455 if (!iob) 456 return -ENOMEM; 457 cmd = __ipa_cmd(iob); 458 cmd->data.setrtg.type = (type); 459 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 460 461 return rc; 462 } 463 464 static int qeth_l3_correct_routing_type(struct qeth_card *card, 465 enum qeth_routing_types *type, enum qeth_prot_versions prot) 466 { 467 if (IS_IQD(card)) { 468 switch (*type) { 469 case NO_ROUTER: 470 case PRIMARY_CONNECTOR: 471 case SECONDARY_CONNECTOR: 472 case MULTICAST_ROUTER: 473 return 0; 474 default: 475 goto out_inval; 476 } 477 } else { 478 switch (*type) { 479 case NO_ROUTER: 480 case PRIMARY_ROUTER: 481 case SECONDARY_ROUTER: 482 return 0; 483 case MULTICAST_ROUTER: 484 if (qeth_is_ipafunc_supported(card, prot, 485 IPA_OSA_MC_ROUTER)) 486 return 0; 487 default: 488 goto out_inval; 489 } 490 } 491 out_inval: 492 *type = NO_ROUTER; 493 return -EINVAL; 494 } 495 496 int qeth_l3_setrouting_v4(struct qeth_card *card) 497 { 498 int rc; 499 500 QETH_CARD_TEXT(card, 3, "setrtg4"); 501 502 rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, 503 QETH_PROT_IPV4); 504 if (rc) 505 return rc; 506 507 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 508 QETH_PROT_IPV4); 509 if (rc) { 510 card->options.route4.type = NO_ROUTER; 511 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", 512 rc, CARD_DEVID(card)); 513 } 514 return rc; 515 } 516 517 int qeth_l3_setrouting_v6(struct qeth_card *card) 518 { 519 int rc = 0; 520 521 QETH_CARD_TEXT(card, 3, "setrtg6"); 522 523 if (!qeth_is_supported(card, IPA_IPV6)) 524 return 0; 525 rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, 526 QETH_PROT_IPV6); 527 if (rc) 528 return rc; 529 530 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 531 QETH_PROT_IPV6); 532 if (rc) { 533 card->options.route6.type = NO_ROUTER; 534 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", 535 rc, CARD_DEVID(card)); 536 } 537 return rc; 538 } 539 540 /* 541 * IP address takeover related functions 542 */ 543 544 /** 545 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. 546 * 547 * Caller must hold ip_lock. 548 */ 549 void qeth_l3_update_ipato(struct qeth_card *card) 550 { 551 struct qeth_ipaddr *addr; 552 unsigned int i; 553 554 hash_for_each(card->ip_htable, i, addr, hnode) { 555 if (addr->type != QETH_IP_TYPE_NORMAL) 556 continue; 557 addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr); 558 } 559 } 560 561 static void qeth_l3_clear_ipato_list(struct qeth_card *card) 562 { 563 struct qeth_ipato_entry *ipatoe, *tmp; 564 565 mutex_lock(&card->ip_lock); 566 567 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 568 list_del(&ipatoe->entry); 569 kfree(ipatoe); 570 } 571 572 qeth_l3_update_ipato(card); 573 mutex_unlock(&card->ip_lock); 574 } 575 576 int qeth_l3_add_ipato_entry(struct qeth_card *card, 577 struct qeth_ipato_entry *new) 578 { 579 struct qeth_ipato_entry *ipatoe; 580 int rc = 0; 581 582 QETH_CARD_TEXT(card, 2, "addipato"); 583 584 mutex_lock(&card->ip_lock); 585 586 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 587 if (ipatoe->proto != new->proto) 588 continue; 589 if (!memcmp(ipatoe->addr, new->addr, 590 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && 591 (ipatoe->mask_bits == new->mask_bits)) { 592 rc = -EEXIST; 593 break; 594 } 595 } 596 597 if (!rc) { 598 list_add_tail(&new->entry, &card->ipato.entries); 599 qeth_l3_update_ipato(card); 600 } 601 602 mutex_unlock(&card->ip_lock); 603 604 return rc; 605 } 606 607 int qeth_l3_del_ipato_entry(struct qeth_card *card, 608 enum qeth_prot_versions proto, u8 *addr, 609 int mask_bits) 610 { 611 struct qeth_ipato_entry *ipatoe, *tmp; 612 int rc = -ENOENT; 613 614 QETH_CARD_TEXT(card, 2, "delipato"); 615 616 mutex_lock(&card->ip_lock); 617 618 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 619 if (ipatoe->proto != proto) 620 continue; 621 if (!memcmp(ipatoe->addr, addr, 622 (proto == QETH_PROT_IPV4)? 4:16) && 623 (ipatoe->mask_bits == mask_bits)) { 624 list_del(&ipatoe->entry); 625 qeth_l3_update_ipato(card); 626 kfree(ipatoe); 627 rc = 0; 628 } 629 } 630 631 mutex_unlock(&card->ip_lock); 632 return rc; 633 } 634 635 int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, 636 enum qeth_ip_types type, 637 enum qeth_prot_versions proto) 638 { 639 struct qeth_ipaddr addr; 640 641 qeth_l3_init_ipaddr(&addr, type, proto); 642 if (proto == QETH_PROT_IPV4) 643 memcpy(&addr.u.a4.addr, ip, 4); 644 else 645 memcpy(&addr.u.a6.addr, ip, 16); 646 647 return qeth_l3_modify_ip(card, &addr, add); 648 } 649 650 int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) 651 { 652 struct qeth_ipaddr addr; 653 unsigned int i; 654 655 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); 656 addr.u.a6.addr.s6_addr[0] = 0xfe; 657 addr.u.a6.addr.s6_addr[1] = 0x80; 658 for (i = 0; i < 8; i++) 659 addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; 660 661 return qeth_l3_modify_ip(card, &addr, add); 662 } 663 664 static int qeth_l3_register_addr_entry(struct qeth_card *card, 665 struct qeth_ipaddr *addr) 666 { 667 char buf[50]; 668 int rc = 0; 669 int cnt = 3; 670 671 if (card->options.sniffer) 672 return 0; 673 674 if (addr->proto == QETH_PROT_IPV4) { 675 QETH_CARD_TEXT(card, 2, "setaddr4"); 676 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 677 } else if (addr->proto == QETH_PROT_IPV6) { 678 QETH_CARD_TEXT(card, 2, "setaddr6"); 679 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 680 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 681 } else { 682 QETH_CARD_TEXT(card, 2, "setaddr?"); 683 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 684 } 685 do { 686 if (addr->is_multicast) 687 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); 688 else 689 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP); 690 if (rc) 691 QETH_CARD_TEXT(card, 2, "failed"); 692 } while ((--cnt > 0) && rc); 693 if (rc) { 694 QETH_CARD_TEXT(card, 2, "FAILED"); 695 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); 696 dev_warn(&card->gdev->dev, 697 "Registering IP address %s failed\n", buf); 698 } 699 return rc; 700 } 701 702 static int qeth_l3_deregister_addr_entry(struct qeth_card *card, 703 struct qeth_ipaddr *addr) 704 { 705 int rc = 0; 706 707 if (card->options.sniffer) 708 return 0; 709 710 if (addr->proto == QETH_PROT_IPV4) { 711 QETH_CARD_TEXT(card, 2, "deladdr4"); 712 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 713 } else if (addr->proto == QETH_PROT_IPV6) { 714 QETH_CARD_TEXT(card, 2, "deladdr6"); 715 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 716 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 717 } else { 718 QETH_CARD_TEXT(card, 2, "deladdr?"); 719 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 720 } 721 if (addr->is_multicast) 722 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); 723 else 724 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP); 725 if (rc) 726 QETH_CARD_TEXT(card, 2, "failed"); 727 728 return rc; 729 } 730 731 static int qeth_l3_setadapter_parms(struct qeth_card *card) 732 { 733 int rc = 0; 734 735 QETH_CARD_TEXT(card, 2, "setadprm"); 736 737 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { 738 rc = qeth_setadpparms_change_macaddr(card); 739 if (rc) 740 dev_warn(&card->gdev->dev, "Reading the adapter MAC" 741 " address failed\n"); 742 } 743 744 return rc; 745 } 746 747 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) 748 { 749 int rc; 750 751 QETH_CARD_TEXT(card, 3, "ipaarp"); 752 753 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 754 dev_info(&card->gdev->dev, 755 "ARP processing not supported on %s!\n", 756 QETH_CARD_IFNAME(card)); 757 return 0; 758 } 759 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 760 IPA_CMD_ASS_START, NULL); 761 if (rc) { 762 dev_warn(&card->gdev->dev, 763 "Starting ARP processing support for %s failed\n", 764 QETH_CARD_IFNAME(card)); 765 } 766 return rc; 767 } 768 769 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) 770 { 771 int rc; 772 773 QETH_CARD_TEXT(card, 3, "stsrcmac"); 774 775 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { 776 dev_info(&card->gdev->dev, 777 "Inbound source MAC-address not supported on %s\n", 778 QETH_CARD_IFNAME(card)); 779 return -EOPNOTSUPP; 780 } 781 782 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC, 783 IPA_CMD_ASS_START, NULL); 784 if (rc) 785 dev_warn(&card->gdev->dev, 786 "Starting source MAC-address support for %s failed\n", 787 QETH_CARD_IFNAME(card)); 788 return rc; 789 } 790 791 static int qeth_l3_start_ipa_vlan(struct qeth_card *card) 792 { 793 int rc = 0; 794 795 QETH_CARD_TEXT(card, 3, "strtvlan"); 796 797 if (!qeth_is_supported(card, IPA_FULL_VLAN)) { 798 dev_info(&card->gdev->dev, 799 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); 800 return -EOPNOTSUPP; 801 } 802 803 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO, 804 IPA_CMD_ASS_START, NULL); 805 if (rc) { 806 dev_warn(&card->gdev->dev, 807 "Starting VLAN support for %s failed\n", 808 QETH_CARD_IFNAME(card)); 809 } else { 810 dev_info(&card->gdev->dev, "VLAN enabled\n"); 811 } 812 return rc; 813 } 814 815 static int qeth_l3_start_ipa_multicast(struct qeth_card *card) 816 { 817 int rc; 818 819 QETH_CARD_TEXT(card, 3, "stmcast"); 820 821 if (!qeth_is_supported(card, IPA_MULTICASTING)) { 822 dev_info(&card->gdev->dev, 823 "Multicast not supported on %s\n", 824 QETH_CARD_IFNAME(card)); 825 return -EOPNOTSUPP; 826 } 827 828 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING, 829 IPA_CMD_ASS_START, NULL); 830 if (rc) { 831 dev_warn(&card->gdev->dev, 832 "Starting multicast support for %s failed\n", 833 QETH_CARD_IFNAME(card)); 834 } else { 835 dev_info(&card->gdev->dev, "Multicast enabled\n"); 836 card->dev->flags |= IFF_MULTICAST; 837 } 838 return rc; 839 } 840 841 static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 842 { 843 u32 ipv6_data = 3; 844 int rc; 845 846 QETH_CARD_TEXT(card, 3, "softipv6"); 847 848 if (IS_IQD(card)) 849 goto out; 850 851 rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START, 852 &ipv6_data); 853 if (rc) { 854 dev_err(&card->gdev->dev, 855 "Activating IPv6 support for %s failed\n", 856 QETH_CARD_IFNAME(card)); 857 return rc; 858 } 859 rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START, 860 NULL); 861 if (rc) { 862 dev_err(&card->gdev->dev, 863 "Activating IPv6 support for %s failed\n", 864 QETH_CARD_IFNAME(card)); 865 return rc; 866 } 867 rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU, 868 IPA_CMD_ASS_START, NULL); 869 if (rc) { 870 dev_warn(&card->gdev->dev, 871 "Enabling the passthrough mode for %s failed\n", 872 QETH_CARD_IFNAME(card)); 873 return rc; 874 } 875 out: 876 dev_info(&card->gdev->dev, "IPV6 enabled\n"); 877 return 0; 878 } 879 880 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) 881 { 882 QETH_CARD_TEXT(card, 3, "strtipv6"); 883 884 if (!qeth_is_supported(card, IPA_IPV6)) { 885 dev_info(&card->gdev->dev, 886 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); 887 return 0; 888 } 889 return qeth_l3_softsetup_ipv6(card); 890 } 891 892 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) 893 { 894 u32 filter_data = 1; 895 int rc; 896 897 QETH_CARD_TEXT(card, 3, "stbrdcst"); 898 card->info.broadcast_capable = 0; 899 if (!qeth_is_supported(card, IPA_FILTERING)) { 900 dev_info(&card->gdev->dev, 901 "Broadcast not supported on %s\n", 902 QETH_CARD_IFNAME(card)); 903 rc = -EOPNOTSUPP; 904 goto out; 905 } 906 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 907 IPA_CMD_ASS_START, NULL); 908 if (rc) { 909 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " 910 "%s failed\n", QETH_CARD_IFNAME(card)); 911 goto out; 912 } 913 914 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 915 IPA_CMD_ASS_CONFIGURE, &filter_data); 916 if (rc) { 917 dev_warn(&card->gdev->dev, 918 "Setting up broadcast filtering for %s failed\n", 919 QETH_CARD_IFNAME(card)); 920 goto out; 921 } 922 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; 923 dev_info(&card->gdev->dev, "Broadcast enabled\n"); 924 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 925 IPA_CMD_ASS_ENABLE, &filter_data); 926 if (rc) { 927 dev_warn(&card->gdev->dev, "Setting up broadcast echo " 928 "filtering for %s failed\n", QETH_CARD_IFNAME(card)); 929 goto out; 930 } 931 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; 932 out: 933 if (card->info.broadcast_capable) 934 card->dev->flags |= IFF_BROADCAST; 935 else 936 card->dev->flags &= ~IFF_BROADCAST; 937 return rc; 938 } 939 940 static int qeth_l3_start_ipassists(struct qeth_card *card) 941 { 942 QETH_CARD_TEXT(card, 3, "strtipas"); 943 944 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 945 qeth_l3_start_ipa_source_mac(card); /* go on*/ 946 qeth_l3_start_ipa_vlan(card); /* go on*/ 947 qeth_l3_start_ipa_multicast(card); /* go on*/ 948 qeth_l3_start_ipa_ipv6(card); /* go on*/ 949 qeth_l3_start_ipa_broadcast(card); /* go on*/ 950 return 0; 951 } 952 953 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 954 struct qeth_reply *reply, unsigned long data) 955 { 956 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 957 958 if (cmd->hdr.return_code) 959 return -EIO; 960 961 ether_addr_copy(card->dev->dev_addr, 962 cmd->data.create_destroy_addr.unique_id); 963 return 0; 964 } 965 966 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) 967 { 968 int rc = 0; 969 struct qeth_cmd_buffer *iob; 970 struct qeth_ipa_cmd *cmd; 971 972 QETH_CARD_TEXT(card, 2, "hsrmac"); 973 974 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6, 975 IPA_DATA_SIZEOF(create_destroy_addr)); 976 if (!iob) 977 return -ENOMEM; 978 cmd = __ipa_cmd(iob); 979 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 980 card->info.unique_id; 981 982 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, 983 NULL); 984 return rc; 985 } 986 987 static int qeth_l3_get_unique_id_cb(struct qeth_card *card, 988 struct qeth_reply *reply, unsigned long data) 989 { 990 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 991 992 if (cmd->hdr.return_code == 0) { 993 card->info.unique_id = *((__u16 *) 994 &cmd->data.create_destroy_addr.unique_id[6]); 995 return 0; 996 } 997 998 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 999 UNIQUE_ID_NOT_BY_CARD; 1000 dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n"); 1001 return -EIO; 1002 } 1003 1004 static int qeth_l3_get_unique_id(struct qeth_card *card) 1005 { 1006 int rc = 0; 1007 struct qeth_cmd_buffer *iob; 1008 struct qeth_ipa_cmd *cmd; 1009 1010 QETH_CARD_TEXT(card, 2, "guniqeid"); 1011 1012 if (!qeth_is_supported(card, IPA_IPV6)) { 1013 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1014 UNIQUE_ID_NOT_BY_CARD; 1015 return 0; 1016 } 1017 1018 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6, 1019 IPA_DATA_SIZEOF(create_destroy_addr)); 1020 if (!iob) 1021 return -ENOMEM; 1022 cmd = __ipa_cmd(iob); 1023 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1024 card->info.unique_id; 1025 1026 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); 1027 return rc; 1028 } 1029 1030 static int 1031 qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, 1032 unsigned long data) 1033 { 1034 struct qeth_ipa_cmd *cmd; 1035 __u16 rc; 1036 1037 QETH_CARD_TEXT(card, 2, "diastrcb"); 1038 1039 cmd = (struct qeth_ipa_cmd *)data; 1040 rc = cmd->hdr.return_code; 1041 if (rc) 1042 QETH_CARD_TEXT_(card, 2, "dxter%x", rc); 1043 switch (cmd->data.diagass.action) { 1044 case QETH_DIAGS_CMD_TRACE_QUERY: 1045 break; 1046 case QETH_DIAGS_CMD_TRACE_DISABLE: 1047 switch (rc) { 1048 case 0: 1049 case IPA_RC_INVALID_SUBCMD: 1050 card->info.promisc_mode = SET_PROMISC_MODE_OFF; 1051 dev_info(&card->gdev->dev, "The HiperSockets network " 1052 "traffic analyzer is deactivated\n"); 1053 break; 1054 default: 1055 break; 1056 } 1057 break; 1058 case QETH_DIAGS_CMD_TRACE_ENABLE: 1059 switch (rc) { 1060 case 0: 1061 card->info.promisc_mode = SET_PROMISC_MODE_ON; 1062 dev_info(&card->gdev->dev, "The HiperSockets network " 1063 "traffic analyzer is activated\n"); 1064 break; 1065 case IPA_RC_HARDWARE_AUTH_ERROR: 1066 dev_warn(&card->gdev->dev, "The device is not " 1067 "authorized to run as a HiperSockets network " 1068 "traffic analyzer\n"); 1069 break; 1070 case IPA_RC_TRACE_ALREADY_ACTIVE: 1071 dev_warn(&card->gdev->dev, "A HiperSockets " 1072 "network traffic analyzer is already " 1073 "active in the HiperSockets LAN\n"); 1074 break; 1075 default: 1076 break; 1077 } 1078 break; 1079 default: 1080 QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n", 1081 cmd->data.diagass.action, CARD_DEVID(card)); 1082 } 1083 1084 return rc ? -EIO : 0; 1085 } 1086 1087 static int 1088 qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) 1089 { 1090 struct qeth_cmd_buffer *iob; 1091 struct qeth_ipa_cmd *cmd; 1092 1093 QETH_CARD_TEXT(card, 2, "diagtrac"); 1094 1095 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0); 1096 if (!iob) 1097 return -ENOMEM; 1098 cmd = __ipa_cmd(iob); 1099 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET; 1100 cmd->data.diagass.action = diags_cmd; 1101 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); 1102 } 1103 1104 static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg) 1105 { 1106 struct qeth_card *card = arg; 1107 struct inet6_dev *in6_dev; 1108 struct in_device *in4_dev; 1109 struct qeth_ipaddr *ipm; 1110 struct qeth_ipaddr tmp; 1111 struct ip_mc_list *im4; 1112 struct ifmcaddr6 *im6; 1113 1114 QETH_CARD_TEXT(card, 4, "addmc"); 1115 1116 if (!dev || !(dev->flags & IFF_UP)) 1117 goto out; 1118 1119 in4_dev = __in_dev_get_rtnl(dev); 1120 if (!in4_dev) 1121 goto walk_ipv6; 1122 1123 qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); 1124 tmp.disp_flag = QETH_DISP_ADDR_ADD; 1125 tmp.is_multicast = 1; 1126 1127 for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL; 1128 im4 = rtnl_dereference(im4->next_rcu)) { 1129 tmp.u.a4.addr = im4->multiaddr; 1130 1131 ipm = qeth_l3_find_addr_by_ip(card, &tmp); 1132 if (ipm) { 1133 /* for mcast, by-IP match means full match */ 1134 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1135 continue; 1136 } 1137 1138 ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL); 1139 if (!ipm) 1140 continue; 1141 1142 hash_add(card->ip_mc_htable, &ipm->hnode, 1143 qeth_l3_ipaddr_hash(ipm)); 1144 } 1145 1146 walk_ipv6: 1147 if (!qeth_is_supported(card, IPA_IPV6)) 1148 goto out; 1149 1150 in6_dev = __in6_dev_get(dev); 1151 if (!in6_dev) 1152 goto out; 1153 1154 qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); 1155 tmp.disp_flag = QETH_DISP_ADDR_ADD; 1156 tmp.is_multicast = 1; 1157 1158 read_lock_bh(&in6_dev->lock); 1159 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { 1160 tmp.u.a6.addr = im6->mca_addr; 1161 1162 ipm = qeth_l3_find_addr_by_ip(card, &tmp); 1163 if (ipm) { 1164 /* for mcast, by-IP match means full match */ 1165 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1166 continue; 1167 } 1168 1169 ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC); 1170 if (!ipm) 1171 continue; 1172 1173 hash_add(card->ip_mc_htable, 1174 &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); 1175 1176 } 1177 read_unlock_bh(&in6_dev->lock); 1178 1179 out: 1180 return 0; 1181 } 1182 1183 static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, 1184 __be16 proto, u16 vid) 1185 { 1186 struct qeth_card *card = dev->ml_priv; 1187 1188 QETH_CARD_TEXT_(card, 4, "aid:%d", vid); 1189 return 0; 1190 } 1191 1192 static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, 1193 __be16 proto, u16 vid) 1194 { 1195 struct qeth_card *card = dev->ml_priv; 1196 1197 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 1198 return 0; 1199 } 1200 1201 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 1202 struct qeth_hdr *hdr) 1203 { 1204 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 1205 struct net_device *dev = skb->dev; 1206 1207 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 1208 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 1209 "FAKELL", skb->len); 1210 return; 1211 } 1212 1213 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 1214 u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 1215 ETH_P_IP; 1216 unsigned char tg_addr[ETH_ALEN]; 1217 1218 skb_reset_network_header(skb); 1219 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { 1220 case QETH_CAST_MULTICAST: 1221 if (prot == ETH_P_IP) 1222 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 1223 else 1224 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 1225 QETH_CARD_STAT_INC(card, rx_multicast); 1226 break; 1227 case QETH_CAST_BROADCAST: 1228 ether_addr_copy(tg_addr, card->dev->broadcast); 1229 QETH_CARD_STAT_INC(card, rx_multicast); 1230 break; 1231 default: 1232 if (card->options.sniffer) 1233 skb->pkt_type = PACKET_OTHERHOST; 1234 ether_addr_copy(tg_addr, card->dev->dev_addr); 1235 } 1236 1237 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 1238 card->dev->header_ops->create(skb, card->dev, prot, 1239 tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac, 1240 skb->len); 1241 else 1242 card->dev->header_ops->create(skb, card->dev, prot, 1243 tg_addr, "FAKELL", skb->len); 1244 } 1245 1246 /* copy VLAN tag from hdr into skb */ 1247 if (!card->options.sniffer && 1248 (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 1249 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 1250 u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 1251 hdr->hdr.l3.vlan_id : 1252 hdr->hdr.l3.next_hop.rx.vlan_id; 1253 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 1254 } 1255 1256 qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags); 1257 } 1258 1259 static int qeth_l3_process_inbound_buffer(struct qeth_card *card, 1260 int budget, int *done) 1261 { 1262 int work_done = 0; 1263 struct sk_buff *skb; 1264 struct qeth_hdr *hdr; 1265 1266 *done = 0; 1267 WARN_ON_ONCE(!budget); 1268 while (budget) { 1269 skb = qeth_core_get_next_skb(card, 1270 &card->qdio.in_q->bufs[card->rx.b_index], 1271 &card->rx.b_element, &card->rx.e_offset, &hdr); 1272 if (!skb) { 1273 *done = 1; 1274 break; 1275 } 1276 1277 if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) 1278 qeth_l3_rebuild_skb(card, skb, hdr); 1279 1280 skb->protocol = eth_type_trans(skb, skb->dev); 1281 QETH_CARD_STAT_INC(card, rx_packets); 1282 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 1283 1284 napi_gro_receive(&card->napi, skb); 1285 work_done++; 1286 budget--; 1287 } 1288 return work_done; 1289 } 1290 1291 static void qeth_l3_stop_card(struct qeth_card *card) 1292 { 1293 QETH_CARD_TEXT(card, 2, "stopcard"); 1294 1295 qeth_set_allowed_threads(card, 0, 1); 1296 1297 cancel_work_sync(&card->rx_mode_work); 1298 qeth_l3_drain_rx_mode_cache(card); 1299 1300 if (card->options.sniffer && 1301 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) 1302 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1303 1304 if (card->state == CARD_STATE_SOFTSETUP) { 1305 qeth_l3_clear_ip_htable(card, 1); 1306 qeth_clear_ipacmd_list(card); 1307 card->state = CARD_STATE_HARDSETUP; 1308 } 1309 if (card->state == CARD_STATE_HARDSETUP) { 1310 qeth_qdio_clear_card(card, 0); 1311 qeth_drain_output_queues(card); 1312 qeth_clear_working_pool_list(card); 1313 card->state = CARD_STATE_DOWN; 1314 } 1315 1316 flush_workqueue(card->event_wq); 1317 } 1318 1319 static void qeth_l3_set_promisc_mode(struct qeth_card *card) 1320 { 1321 bool enable = card->dev->flags & IFF_PROMISC; 1322 1323 if (card->info.promisc_mode == enable) 1324 return; 1325 1326 if (IS_VM_NIC(card)) { /* Guestlan trace */ 1327 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 1328 qeth_setadp_promisc_mode(card, enable); 1329 } else if (card->options.sniffer && /* HiperSockets trace */ 1330 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 1331 if (enable) { 1332 QETH_CARD_TEXT(card, 3, "+promisc"); 1333 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); 1334 } else { 1335 QETH_CARD_TEXT(card, 3, "-promisc"); 1336 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1337 } 1338 } 1339 } 1340 1341 static void qeth_l3_rx_mode_work(struct work_struct *work) 1342 { 1343 struct qeth_card *card = container_of(work, struct qeth_card, 1344 rx_mode_work); 1345 struct qeth_ipaddr *addr; 1346 struct hlist_node *tmp; 1347 int i, rc; 1348 1349 QETH_CARD_TEXT(card, 3, "setmulti"); 1350 1351 if (!card->options.sniffer) { 1352 rtnl_lock(); 1353 qeth_l3_add_mcast_rtnl(card->dev, 0, card); 1354 if (qeth_is_supported(card, IPA_FULL_VLAN)) 1355 vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card); 1356 rtnl_unlock(); 1357 1358 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 1359 switch (addr->disp_flag) { 1360 case QETH_DISP_ADDR_DELETE: 1361 rc = qeth_l3_deregister_addr_entry(card, addr); 1362 if (!rc || rc == -ENOENT) { 1363 hash_del(&addr->hnode); 1364 kfree(addr); 1365 } 1366 break; 1367 case QETH_DISP_ADDR_ADD: 1368 rc = qeth_l3_register_addr_entry(card, addr); 1369 if (rc && rc != -ENETDOWN) { 1370 hash_del(&addr->hnode); 1371 kfree(addr); 1372 break; 1373 } 1374 addr->ref_counter = 1; 1375 /* fall through */ 1376 default: 1377 /* for next call to set_rx_mode(): */ 1378 addr->disp_flag = QETH_DISP_ADDR_DELETE; 1379 } 1380 } 1381 } 1382 1383 qeth_l3_set_promisc_mode(card); 1384 } 1385 1386 static int qeth_l3_arp_makerc(u16 rc) 1387 { 1388 switch (rc) { 1389 case IPA_RC_SUCCESS: 1390 return 0; 1391 case QETH_IPA_ARP_RC_NOTSUPP: 1392 case QETH_IPA_ARP_RC_Q_NOTSUPP: 1393 return -EOPNOTSUPP; 1394 case QETH_IPA_ARP_RC_OUT_OF_RANGE: 1395 return -EINVAL; 1396 case QETH_IPA_ARP_RC_Q_NO_DATA: 1397 return -ENOENT; 1398 default: 1399 return -EIO; 1400 } 1401 } 1402 1403 static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply, 1404 unsigned long data) 1405 { 1406 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1407 1408 qeth_setassparms_cb(card, reply, data); 1409 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1410 } 1411 1412 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) 1413 { 1414 struct qeth_cmd_buffer *iob; 1415 int rc; 1416 1417 QETH_CARD_TEXT(card, 3, "arpstnoe"); 1418 1419 /* 1420 * currently GuestLAN only supports the ARP assist function 1421 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; 1422 * thus we say EOPNOTSUPP for this ARP function 1423 */ 1424 if (IS_VM_NIC(card)) 1425 return -EOPNOTSUPP; 1426 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1427 return -EOPNOTSUPP; 1428 } 1429 1430 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1431 IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 1432 SETASS_DATA_SIZEOF(flags_32bit), 1433 QETH_PROT_IPV4); 1434 if (!iob) 1435 return -ENOMEM; 1436 1437 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries; 1438 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1439 if (rc) 1440 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n", 1441 CARD_DEVID(card), rc); 1442 return rc; 1443 } 1444 1445 static __u32 get_arp_entry_size(struct qeth_card *card, 1446 struct qeth_arp_query_data *qdata, 1447 struct qeth_arp_entrytype *type, __u8 strip_entries) 1448 { 1449 __u32 rc; 1450 __u8 is_hsi; 1451 1452 is_hsi = qdata->reply_bits == 5; 1453 if (type->ip == QETHARP_IP_ADDR_V4) { 1454 QETH_CARD_TEXT(card, 4, "arpev4"); 1455 if (strip_entries) { 1456 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) : 1457 sizeof(struct qeth_arp_qi_entry7_short); 1458 } else { 1459 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) : 1460 sizeof(struct qeth_arp_qi_entry7); 1461 } 1462 } else if (type->ip == QETHARP_IP_ADDR_V6) { 1463 QETH_CARD_TEXT(card, 4, "arpev6"); 1464 if (strip_entries) { 1465 rc = is_hsi ? 1466 sizeof(struct qeth_arp_qi_entry5_short_ipv6) : 1467 sizeof(struct qeth_arp_qi_entry7_short_ipv6); 1468 } else { 1469 rc = is_hsi ? 1470 sizeof(struct qeth_arp_qi_entry5_ipv6) : 1471 sizeof(struct qeth_arp_qi_entry7_ipv6); 1472 } 1473 } else { 1474 QETH_CARD_TEXT(card, 4, "arpinv"); 1475 rc = 0; 1476 } 1477 1478 return rc; 1479 } 1480 1481 static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) 1482 { 1483 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) || 1484 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6); 1485 } 1486 1487 static int qeth_l3_arp_query_cb(struct qeth_card *card, 1488 struct qeth_reply *reply, unsigned long data) 1489 { 1490 struct qeth_ipa_cmd *cmd; 1491 struct qeth_arp_query_data *qdata; 1492 struct qeth_arp_query_info *qinfo; 1493 int e; 1494 int entrybytes_done; 1495 int stripped_bytes; 1496 __u8 do_strip_entries; 1497 1498 QETH_CARD_TEXT(card, 3, "arpquecb"); 1499 1500 qinfo = (struct qeth_arp_query_info *) reply->param; 1501 cmd = (struct qeth_ipa_cmd *) data; 1502 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version); 1503 if (cmd->hdr.return_code) { 1504 QETH_CARD_TEXT(card, 4, "arpcberr"); 1505 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1506 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1507 } 1508 if (cmd->data.setassparms.hdr.return_code) { 1509 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 1510 QETH_CARD_TEXT(card, 4, "setaperr"); 1511 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1512 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1513 } 1514 qdata = &cmd->data.setassparms.data.query_arp; 1515 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); 1516 1517 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0; 1518 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0; 1519 entrybytes_done = 0; 1520 for (e = 0; e < qdata->no_entries; ++e) { 1521 char *cur_entry; 1522 __u32 esize; 1523 struct qeth_arp_entrytype *etype; 1524 1525 cur_entry = &qdata->data + entrybytes_done; 1526 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type; 1527 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) { 1528 QETH_CARD_TEXT(card, 4, "pmis"); 1529 QETH_CARD_TEXT_(card, 4, "%i", etype->ip); 1530 break; 1531 } 1532 esize = get_arp_entry_size(card, qdata, etype, 1533 do_strip_entries); 1534 QETH_CARD_TEXT_(card, 5, "esz%i", esize); 1535 if (!esize) 1536 break; 1537 1538 if ((qinfo->udata_len - qinfo->udata_offset) < esize) { 1539 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC); 1540 memset(qinfo->udata, 0, 4); 1541 return -ENOSPC; 1542 } 1543 1544 memcpy(qinfo->udata + qinfo->udata_offset, 1545 &qdata->data + entrybytes_done + stripped_bytes, 1546 esize); 1547 entrybytes_done += esize + stripped_bytes; 1548 qinfo->udata_offset += esize; 1549 ++qinfo->no_entries; 1550 } 1551 /* check if all replies received ... */ 1552 if (cmd->data.setassparms.hdr.seq_no < 1553 cmd->data.setassparms.hdr.number_of_replies) 1554 return 1; 1555 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries); 1556 memcpy(qinfo->udata, &qinfo->no_entries, 4); 1557 /* keep STRIP_ENTRIES flag so the user program can distinguish 1558 * stripped entries from normal ones */ 1559 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 1560 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 1561 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 1562 QETH_CARD_TEXT_(card, 4, "rc%i", 0); 1563 return 0; 1564 } 1565 1566 static int qeth_l3_query_arp_cache_info(struct qeth_card *card, 1567 enum qeth_prot_versions prot, 1568 struct qeth_arp_query_info *qinfo) 1569 { 1570 struct qeth_cmd_buffer *iob; 1571 struct qeth_ipa_cmd *cmd; 1572 int rc; 1573 1574 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); 1575 1576 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1577 IPA_CMD_ASS_ARP_QUERY_INFO, 1578 SETASS_DATA_SIZEOF(query_arp), prot); 1579 if (!iob) 1580 return -ENOMEM; 1581 cmd = __ipa_cmd(iob); 1582 cmd->data.setassparms.data.query_arp.request_bits = 0x000F; 1583 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo); 1584 if (rc) 1585 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n", 1586 CARD_DEVID(card), rc); 1587 return rc; 1588 } 1589 1590 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 1591 { 1592 struct qeth_arp_query_info qinfo = {0, }; 1593 int rc; 1594 1595 QETH_CARD_TEXT(card, 3, "arpquery"); 1596 1597 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 1598 IPA_ARP_PROCESSING)) { 1599 QETH_CARD_TEXT(card, 3, "arpqnsup"); 1600 rc = -EOPNOTSUPP; 1601 goto out; 1602 } 1603 /* get size of userspace buffer and mask_bits -> 6 bytes */ 1604 if (copy_from_user(&qinfo, udata, 6)) { 1605 rc = -EFAULT; 1606 goto out; 1607 } 1608 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 1609 if (!qinfo.udata) { 1610 rc = -ENOMEM; 1611 goto out; 1612 } 1613 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 1614 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo); 1615 if (rc) { 1616 if (copy_to_user(udata, qinfo.udata, 4)) 1617 rc = -EFAULT; 1618 goto free_and_out; 1619 } 1620 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { 1621 /* fails in case of GuestLAN QDIO mode */ 1622 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo); 1623 } 1624 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { 1625 QETH_CARD_TEXT(card, 4, "qactf"); 1626 rc = -EFAULT; 1627 goto free_and_out; 1628 } 1629 QETH_CARD_TEXT(card, 4, "qacts"); 1630 1631 free_and_out: 1632 kfree(qinfo.udata); 1633 out: 1634 return rc; 1635 } 1636 1637 static int qeth_l3_arp_modify_entry(struct qeth_card *card, 1638 struct qeth_arp_cache_entry *entry, 1639 enum qeth_arp_process_subcmds arp_cmd) 1640 { 1641 struct qeth_arp_cache_entry *cmd_entry; 1642 struct qeth_cmd_buffer *iob; 1643 int rc; 1644 1645 if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY) 1646 QETH_CARD_TEXT(card, 3, "arpadd"); 1647 else 1648 QETH_CARD_TEXT(card, 3, "arpdel"); 1649 1650 /* 1651 * currently GuestLAN only supports the ARP assist function 1652 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; 1653 * thus we say EOPNOTSUPP for this ARP function 1654 */ 1655 if (IS_VM_NIC(card)) 1656 return -EOPNOTSUPP; 1657 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1658 return -EOPNOTSUPP; 1659 } 1660 1661 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd, 1662 SETASS_DATA_SIZEOF(arp_entry), 1663 QETH_PROT_IPV4); 1664 if (!iob) 1665 return -ENOMEM; 1666 1667 cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry; 1668 ether_addr_copy(cmd_entry->macaddr, entry->macaddr); 1669 memcpy(cmd_entry->ipaddr, entry->ipaddr, 4); 1670 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1671 if (rc) 1672 QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n", 1673 arp_cmd, CARD_DEVID(card), rc); 1674 return rc; 1675 } 1676 1677 static int qeth_l3_arp_flush_cache(struct qeth_card *card) 1678 { 1679 struct qeth_cmd_buffer *iob; 1680 int rc; 1681 1682 QETH_CARD_TEXT(card, 3, "arpflush"); 1683 1684 /* 1685 * currently GuestLAN only supports the ARP assist function 1686 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; 1687 * thus we say EOPNOTSUPP for this ARP function 1688 */ 1689 if (IS_VM_NIC(card) || IS_IQD(card)) 1690 return -EOPNOTSUPP; 1691 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1692 return -EOPNOTSUPP; 1693 } 1694 1695 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1696 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0, 1697 QETH_PROT_IPV4); 1698 if (!iob) 1699 return -ENOMEM; 1700 1701 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1702 if (rc) 1703 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n", 1704 CARD_DEVID(card), rc); 1705 return rc; 1706 } 1707 1708 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1709 { 1710 struct qeth_card *card = dev->ml_priv; 1711 struct qeth_arp_cache_entry arp_entry; 1712 enum qeth_arp_process_subcmds arp_cmd; 1713 int rc = 0; 1714 1715 switch (cmd) { 1716 case SIOC_QETH_ARP_SET_NO_ENTRIES: 1717 if (!capable(CAP_NET_ADMIN)) { 1718 rc = -EPERM; 1719 break; 1720 } 1721 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); 1722 break; 1723 case SIOC_QETH_ARP_QUERY_INFO: 1724 if (!capable(CAP_NET_ADMIN)) { 1725 rc = -EPERM; 1726 break; 1727 } 1728 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); 1729 break; 1730 case SIOC_QETH_ARP_ADD_ENTRY: 1731 case SIOC_QETH_ARP_REMOVE_ENTRY: 1732 if (!capable(CAP_NET_ADMIN)) 1733 return -EPERM; 1734 if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry))) 1735 return -EFAULT; 1736 1737 arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ? 1738 IPA_CMD_ASS_ARP_ADD_ENTRY : 1739 IPA_CMD_ASS_ARP_REMOVE_ENTRY; 1740 return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd); 1741 case SIOC_QETH_ARP_FLUSH_CACHE: 1742 if (!capable(CAP_NET_ADMIN)) { 1743 rc = -EPERM; 1744 break; 1745 } 1746 rc = qeth_l3_arp_flush_cache(card); 1747 break; 1748 default: 1749 rc = -EOPNOTSUPP; 1750 } 1751 return rc; 1752 } 1753 1754 static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst, 1755 int ipv) 1756 { 1757 struct neighbour *n = NULL; 1758 1759 if (dst) 1760 n = dst_neigh_lookup_skb(dst, skb); 1761 1762 if (n) { 1763 int cast_type = n->type; 1764 1765 neigh_release(n); 1766 if ((cast_type == RTN_BROADCAST) || 1767 (cast_type == RTN_MULTICAST) || 1768 (cast_type == RTN_ANYCAST)) 1769 return cast_type; 1770 return RTN_UNICAST; 1771 } 1772 1773 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ 1774 switch (ipv) { 1775 case 4: 1776 if (ipv4_is_lbcast(ip_hdr(skb)->daddr)) 1777 return RTN_BROADCAST; 1778 return ipv4_is_multicast(ip_hdr(skb)->daddr) ? 1779 RTN_MULTICAST : RTN_UNICAST; 1780 case 6: 1781 return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? 1782 RTN_MULTICAST : RTN_UNICAST; 1783 default: 1784 /* ... and MAC address */ 1785 return qeth_get_ether_cast_type(skb); 1786 } 1787 } 1788 1789 static int qeth_l3_get_cast_type(struct sk_buff *skb) 1790 { 1791 int ipv = qeth_get_ip_version(skb); 1792 struct dst_entry *dst; 1793 int cast_type; 1794 1795 rcu_read_lock(); 1796 dst = qeth_dst_check_rcu(skb, ipv); 1797 cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv); 1798 rcu_read_unlock(); 1799 1800 return cast_type; 1801 } 1802 1803 static u8 qeth_l3_cast_type_to_flag(int cast_type) 1804 { 1805 if (cast_type == RTN_MULTICAST) 1806 return QETH_CAST_MULTICAST; 1807 if (cast_type == RTN_ANYCAST) 1808 return QETH_CAST_ANYCAST; 1809 if (cast_type == RTN_BROADCAST) 1810 return QETH_CAST_BROADCAST; 1811 return QETH_CAST_UNICAST; 1812 } 1813 1814 static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, 1815 struct qeth_hdr *hdr, struct sk_buff *skb, 1816 int ipv, unsigned int data_len) 1817 { 1818 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 1819 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 1820 struct qeth_card *card = queue->card; 1821 struct dst_entry *dst; 1822 int cast_type; 1823 1824 hdr->hdr.l3.length = data_len; 1825 1826 if (skb_is_gso(skb)) { 1827 hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO; 1828 } else { 1829 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 1830 1831 if (skb->protocol == htons(ETH_P_AF_IUCV)) { 1832 l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 1833 l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80); 1834 memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2], 1835 iucv_trans_hdr(skb)->destUserID, 8); 1836 return; 1837 } 1838 1839 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1840 qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); 1841 /* some HW requires combined L3+L4 csum offload: */ 1842 if (ipv == 4) 1843 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; 1844 } 1845 } 1846 1847 if (ipv == 4 || IS_IQD(card)) { 1848 /* NETIF_F_HW_VLAN_CTAG_TX */ 1849 if (skb_vlan_tag_present(skb)) { 1850 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME; 1851 hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); 1852 } 1853 } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { 1854 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG; 1855 hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); 1856 } 1857 1858 rcu_read_lock(); 1859 dst = qeth_dst_check_rcu(skb, ipv); 1860 1861 if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ) 1862 cast_type = RTN_UNICAST; 1863 else 1864 cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv); 1865 l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type); 1866 1867 if (ipv == 4) { 1868 struct rtable *rt = (struct rtable *) dst; 1869 1870 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ? 1871 rt_nexthop(rt, ip_hdr(skb)->daddr) : 1872 ip_hdr(skb)->daddr; 1873 } else if (ipv == 6) { 1874 struct rt6_info *rt = (struct rt6_info *) dst; 1875 1876 if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) 1877 l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway; 1878 else 1879 l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr; 1880 1881 hdr->hdr.l3.flags |= QETH_HDR_IPV6; 1882 if (!IS_IQD(card)) 1883 hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; 1884 } else { 1885 /* OSA only: */ 1886 l3_hdr->flags |= QETH_HDR_PASSTHRU; 1887 } 1888 rcu_read_unlock(); 1889 } 1890 1891 static void qeth_l3_fixup_headers(struct sk_buff *skb) 1892 { 1893 struct iphdr *iph = ip_hdr(skb); 1894 1895 /* this is safe, IPv6 traffic takes a different path */ 1896 if (skb->ip_summed == CHECKSUM_PARTIAL) 1897 iph->check = 0; 1898 if (skb_is_gso(skb)) { 1899 iph->tot_len = 0; 1900 tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr, 1901 iph->daddr, 0); 1902 } 1903 } 1904 1905 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, 1906 struct qeth_qdio_out_q *queue, int ipv) 1907 { 1908 unsigned int hw_hdr_len; 1909 int rc; 1910 1911 /* re-use the L2 header area for the HW header: */ 1912 hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) : 1913 sizeof(struct qeth_hdr); 1914 rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); 1915 if (rc) 1916 return rc; 1917 skb_pull(skb, ETH_HLEN); 1918 1919 qeth_l3_fixup_headers(skb); 1920 return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); 1921 } 1922 1923 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, 1924 struct net_device *dev) 1925 { 1926 struct qeth_card *card = dev->ml_priv; 1927 u16 txq = skb_get_queue_mapping(skb); 1928 int ipv = qeth_get_ip_version(skb); 1929 struct qeth_qdio_out_q *queue; 1930 int rc; 1931 1932 if (!skb_is_gso(skb)) 1933 qdisc_skb_cb(skb)->pkt_len = skb->len; 1934 if (IS_IQD(card)) { 1935 queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; 1936 1937 if (card->options.sniffer) 1938 goto tx_drop; 1939 if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || 1940 (card->options.cq == QETH_CQ_ENABLED && 1941 skb->protocol != htons(ETH_P_AF_IUCV))) 1942 goto tx_drop; 1943 } else { 1944 queue = card->qdio.out_qs[txq]; 1945 } 1946 1947 if (!(dev->flags & IFF_BROADCAST) && 1948 qeth_l3_get_cast_type(skb) == RTN_BROADCAST) 1949 goto tx_drop; 1950 1951 if (ipv == 4 || IS_IQD(card)) 1952 rc = qeth_l3_xmit(card, skb, queue, ipv); 1953 else 1954 rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); 1955 1956 if (!rc) 1957 return NETDEV_TX_OK; 1958 1959 tx_drop: 1960 QETH_TXQ_STAT_INC(queue, tx_dropped); 1961 kfree_skb(skb); 1962 return NETDEV_TX_OK; 1963 } 1964 1965 static void qeth_l3_set_rx_mode(struct net_device *dev) 1966 { 1967 struct qeth_card *card = dev->ml_priv; 1968 1969 schedule_work(&card->rx_mode_work); 1970 } 1971 1972 /* 1973 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting 1974 * NOARP on the netdevice is no option because it also turns off neighbor 1975 * solicitation. For IPv4 we install a neighbor_setup function. We don't want 1976 * arp resolution but we want the hard header (packet socket will work 1977 * e.g. tcpdump) 1978 */ 1979 static int qeth_l3_neigh_setup_noarp(struct neighbour *n) 1980 { 1981 n->nud_state = NUD_NOARP; 1982 memcpy(n->ha, "FAKELL", 6); 1983 n->output = n->ops->connected_output; 1984 return 0; 1985 } 1986 1987 static int 1988 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) 1989 { 1990 if (np->tbl->family == AF_INET) 1991 np->neigh_setup = qeth_l3_neigh_setup_noarp; 1992 1993 return 0; 1994 } 1995 1996 static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, 1997 struct net_device *dev, 1998 netdev_features_t features) 1999 { 2000 if (qeth_get_ip_version(skb) != 4) 2001 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2002 return qeth_features_check(skb, dev, features); 2003 } 2004 2005 static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 2006 struct net_device *sb_dev) 2007 { 2008 return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb), 2009 sb_dev); 2010 } 2011 2012 static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb, 2013 struct net_device *sb_dev) 2014 { 2015 struct qeth_card *card = dev->ml_priv; 2016 2017 return qeth_get_priority_queue(card, skb); 2018 } 2019 2020 static const struct net_device_ops qeth_l3_netdev_ops = { 2021 .ndo_open = qeth_open, 2022 .ndo_stop = qeth_stop, 2023 .ndo_get_stats64 = qeth_get_stats64, 2024 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2025 .ndo_select_queue = qeth_l3_iqd_select_queue, 2026 .ndo_validate_addr = eth_validate_addr, 2027 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2028 .ndo_do_ioctl = qeth_do_ioctl, 2029 .ndo_fix_features = qeth_fix_features, 2030 .ndo_set_features = qeth_set_features, 2031 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2032 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2033 .ndo_tx_timeout = qeth_tx_timeout, 2034 }; 2035 2036 static const struct net_device_ops qeth_l3_osa_netdev_ops = { 2037 .ndo_open = qeth_open, 2038 .ndo_stop = qeth_stop, 2039 .ndo_get_stats64 = qeth_get_stats64, 2040 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2041 .ndo_features_check = qeth_l3_osa_features_check, 2042 .ndo_select_queue = qeth_l3_osa_select_queue, 2043 .ndo_validate_addr = eth_validate_addr, 2044 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2045 .ndo_do_ioctl = qeth_do_ioctl, 2046 .ndo_fix_features = qeth_fix_features, 2047 .ndo_set_features = qeth_set_features, 2048 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2049 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2050 .ndo_tx_timeout = qeth_tx_timeout, 2051 .ndo_neigh_setup = qeth_l3_neigh_setup, 2052 }; 2053 2054 static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) 2055 { 2056 unsigned int headroom; 2057 int rc; 2058 2059 if (IS_OSD(card) || IS_OSX(card)) { 2060 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 2061 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 2062 pr_info("qeth_l3: ignoring TR device\n"); 2063 return -ENODEV; 2064 } 2065 2066 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; 2067 2068 /*IPv6 address autoconfiguration stuff*/ 2069 qeth_l3_get_unique_id(card); 2070 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 2071 card->dev->dev_id = card->info.unique_id & 0xffff; 2072 2073 if (!IS_VM_NIC(card)) { 2074 card->dev->features |= NETIF_F_SG; 2075 card->dev->hw_features |= NETIF_F_TSO | 2076 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2077 card->dev->vlan_features |= NETIF_F_TSO | 2078 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2079 } 2080 2081 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { 2082 card->dev->hw_features |= NETIF_F_IPV6_CSUM; 2083 card->dev->vlan_features |= NETIF_F_IPV6_CSUM; 2084 } 2085 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { 2086 card->dev->hw_features |= NETIF_F_TSO6; 2087 card->dev->vlan_features |= NETIF_F_TSO6; 2088 } 2089 2090 /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */ 2091 if (card->dev->hw_features & NETIF_F_TSO6) 2092 headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN; 2093 else if (card->dev->hw_features & NETIF_F_TSO) 2094 headroom = sizeof(struct qeth_hdr_tso); 2095 else 2096 headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; 2097 } else if (IS_IQD(card)) { 2098 card->dev->flags |= IFF_NOARP; 2099 card->dev->netdev_ops = &qeth_l3_netdev_ops; 2100 headroom = sizeof(struct qeth_hdr) - ETH_HLEN; 2101 2102 rc = qeth_l3_iqd_read_initial_mac(card); 2103 if (rc) 2104 goto out; 2105 } else 2106 return -ENODEV; 2107 2108 card->dev->needed_headroom = headroom; 2109 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 2110 NETIF_F_HW_VLAN_CTAG_RX | 2111 NETIF_F_HW_VLAN_CTAG_FILTER; 2112 2113 netif_keep_dst(card->dev); 2114 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) 2115 netif_set_gso_max_size(card->dev, 2116 PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); 2117 2118 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 2119 rc = register_netdev(card->dev); 2120 if (!rc && carrier_ok) 2121 netif_carrier_on(card->dev); 2122 2123 out: 2124 if (rc) 2125 card->dev->netdev_ops = NULL; 2126 return rc; 2127 } 2128 2129 static const struct device_type qeth_l3_devtype = { 2130 .name = "qeth_layer3", 2131 .groups = qeth_l3_attr_groups, 2132 }; 2133 2134 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 2135 { 2136 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2137 int rc; 2138 2139 hash_init(card->ip_htable); 2140 mutex_init(&card->ip_lock); 2141 card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0, 2142 dev_name(&gdev->dev)); 2143 if (!card->cmd_wq) 2144 return -ENOMEM; 2145 2146 if (gdev->dev.type == &qeth_generic_devtype) { 2147 rc = qeth_l3_create_device_attributes(&gdev->dev); 2148 if (rc) { 2149 destroy_workqueue(card->cmd_wq); 2150 return rc; 2151 } 2152 } 2153 2154 hash_init(card->ip_mc_htable); 2155 INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); 2156 return 0; 2157 } 2158 2159 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) 2160 { 2161 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2162 2163 if (cgdev->dev.type == &qeth_generic_devtype) 2164 qeth_l3_remove_device_attributes(&cgdev->dev); 2165 2166 qeth_set_allowed_threads(card, 0, 1); 2167 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2168 2169 if (cgdev->state == CCWGROUP_ONLINE) 2170 qeth_l3_set_offline(cgdev); 2171 2172 cancel_work_sync(&card->close_dev_work); 2173 if (qeth_netdev_is_registered(card->dev)) 2174 unregister_netdev(card->dev); 2175 2176 flush_workqueue(card->cmd_wq); 2177 destroy_workqueue(card->cmd_wq); 2178 qeth_l3_clear_ip_htable(card, 0); 2179 qeth_l3_clear_ipato_list(card); 2180 } 2181 2182 static int qeth_l3_set_online(struct ccwgroup_device *gdev) 2183 { 2184 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2185 struct net_device *dev = card->dev; 2186 int rc = 0; 2187 bool carrier_ok; 2188 2189 mutex_lock(&card->discipline_mutex); 2190 mutex_lock(&card->conf_mutex); 2191 QETH_CARD_TEXT(card, 2, "setonlin"); 2192 2193 rc = qeth_core_hardsetup_card(card, &carrier_ok); 2194 if (rc) { 2195 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 2196 rc = -ENODEV; 2197 goto out_remove; 2198 } 2199 2200 card->state = CARD_STATE_HARDSETUP; 2201 qeth_print_status_message(card); 2202 2203 /* softsetup */ 2204 QETH_CARD_TEXT(card, 2, "softsetp"); 2205 2206 rc = qeth_l3_setadapter_parms(card); 2207 if (rc) 2208 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 2209 if (!card->options.sniffer) { 2210 rc = qeth_l3_start_ipassists(card); 2211 if (rc) { 2212 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2213 goto out_remove; 2214 } 2215 rc = qeth_l3_setrouting_v4(card); 2216 if (rc) 2217 QETH_CARD_TEXT_(card, 2, "4err%04x", rc); 2218 rc = qeth_l3_setrouting_v6(card); 2219 if (rc) 2220 QETH_CARD_TEXT_(card, 2, "5err%04x", rc); 2221 } 2222 2223 rc = qeth_init_qdio_queues(card); 2224 if (rc) { 2225 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2226 rc = -ENODEV; 2227 goto out_remove; 2228 } 2229 card->state = CARD_STATE_SOFTSETUP; 2230 2231 qeth_set_allowed_threads(card, 0xffffffff, 0); 2232 qeth_l3_recover_ip(card); 2233 2234 if (!qeth_netdev_is_registered(dev)) { 2235 rc = qeth_l3_setup_netdev(card, carrier_ok); 2236 if (rc) 2237 goto out_remove; 2238 } else { 2239 rtnl_lock(); 2240 if (carrier_ok) 2241 netif_carrier_on(dev); 2242 else 2243 netif_carrier_off(dev); 2244 2245 netif_device_attach(dev); 2246 qeth_enable_hw_features(dev); 2247 2248 if (card->info.open_when_online) { 2249 card->info.open_when_online = 0; 2250 dev_open(dev, NULL); 2251 } 2252 rtnl_unlock(); 2253 } 2254 qeth_trace_features(card); 2255 /* let user_space know that device is online */ 2256 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 2257 mutex_unlock(&card->conf_mutex); 2258 mutex_unlock(&card->discipline_mutex); 2259 return 0; 2260 out_remove: 2261 qeth_l3_stop_card(card); 2262 qeth_stop_channel(&card->data); 2263 qeth_stop_channel(&card->write); 2264 qeth_stop_channel(&card->read); 2265 qdio_free(CARD_DDEV(card)); 2266 2267 mutex_unlock(&card->conf_mutex); 2268 mutex_unlock(&card->discipline_mutex); 2269 return rc; 2270 } 2271 2272 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, 2273 int recovery_mode) 2274 { 2275 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2276 int rc = 0, rc2 = 0, rc3 = 0; 2277 2278 mutex_lock(&card->discipline_mutex); 2279 mutex_lock(&card->conf_mutex); 2280 QETH_CARD_TEXT(card, 3, "setoffl"); 2281 2282 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { 2283 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 2284 card->info.hwtrap = 1; 2285 } 2286 2287 rtnl_lock(); 2288 card->info.open_when_online = card->dev->flags & IFF_UP; 2289 dev_close(card->dev); 2290 netif_device_detach(card->dev); 2291 netif_carrier_off(card->dev); 2292 rtnl_unlock(); 2293 2294 qeth_l3_stop_card(card); 2295 if (card->options.cq == QETH_CQ_ENABLED) { 2296 rtnl_lock(); 2297 call_netdevice_notifiers(NETDEV_REBOOT, card->dev); 2298 rtnl_unlock(); 2299 } 2300 2301 rc = qeth_stop_channel(&card->data); 2302 rc2 = qeth_stop_channel(&card->write); 2303 rc3 = qeth_stop_channel(&card->read); 2304 if (!rc) 2305 rc = (rc2) ? rc2 : rc3; 2306 if (rc) 2307 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2308 qdio_free(CARD_DDEV(card)); 2309 2310 /* let user_space know that device is offline */ 2311 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); 2312 mutex_unlock(&card->conf_mutex); 2313 mutex_unlock(&card->discipline_mutex); 2314 return 0; 2315 } 2316 2317 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) 2318 { 2319 return __qeth_l3_set_offline(cgdev, 0); 2320 } 2321 2322 static int qeth_l3_recover(void *ptr) 2323 { 2324 struct qeth_card *card; 2325 int rc = 0; 2326 2327 card = (struct qeth_card *) ptr; 2328 QETH_CARD_TEXT(card, 2, "recover1"); 2329 QETH_CARD_HEX(card, 2, &card, sizeof(void *)); 2330 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 2331 return 0; 2332 QETH_CARD_TEXT(card, 2, "recover2"); 2333 dev_warn(&card->gdev->dev, 2334 "A recovery process has been started for the device\n"); 2335 __qeth_l3_set_offline(card->gdev, 1); 2336 rc = qeth_l3_set_online(card->gdev); 2337 if (!rc) 2338 dev_info(&card->gdev->dev, 2339 "Device successfully recovered!\n"); 2340 else { 2341 ccwgroup_set_offline(card->gdev); 2342 dev_warn(&card->gdev->dev, "The qeth device driver " 2343 "failed to recover an error on the device\n"); 2344 } 2345 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 2346 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 2347 return 0; 2348 } 2349 2350 /* Returns zero if the command is successfully "consumed" */ 2351 static int qeth_l3_control_event(struct qeth_card *card, 2352 struct qeth_ipa_cmd *cmd) 2353 { 2354 return 1; 2355 } 2356 2357 struct qeth_discipline qeth_l3_discipline = { 2358 .devtype = &qeth_l3_devtype, 2359 .process_rx_buffer = qeth_l3_process_inbound_buffer, 2360 .recover = qeth_l3_recover, 2361 .setup = qeth_l3_probe_device, 2362 .remove = qeth_l3_remove_device, 2363 .set_online = qeth_l3_set_online, 2364 .set_offline = qeth_l3_set_offline, 2365 .do_ioctl = qeth_l3_do_ioctl, 2366 .control_event_handler = qeth_l3_control_event, 2367 }; 2368 EXPORT_SYMBOL_GPL(qeth_l3_discipline); 2369 2370 static int qeth_l3_handle_ip_event(struct qeth_card *card, 2371 struct qeth_ipaddr *addr, 2372 unsigned long event) 2373 { 2374 switch (event) { 2375 case NETDEV_UP: 2376 qeth_l3_modify_ip(card, addr, true); 2377 return NOTIFY_OK; 2378 case NETDEV_DOWN: 2379 qeth_l3_modify_ip(card, addr, false); 2380 return NOTIFY_OK; 2381 default: 2382 return NOTIFY_DONE; 2383 } 2384 } 2385 2386 struct qeth_l3_ip_event_work { 2387 struct work_struct work; 2388 struct qeth_card *card; 2389 struct qeth_ipaddr addr; 2390 }; 2391 2392 #define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work) 2393 2394 static void qeth_l3_add_ip_worker(struct work_struct *work) 2395 { 2396 struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); 2397 2398 qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true); 2399 kfree(work); 2400 } 2401 2402 static void qeth_l3_delete_ip_worker(struct work_struct *work) 2403 { 2404 struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); 2405 2406 qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false); 2407 kfree(work); 2408 } 2409 2410 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) 2411 { 2412 if (is_vlan_dev(dev)) 2413 dev = vlan_dev_real_dev(dev); 2414 if (dev->netdev_ops == &qeth_l3_osa_netdev_ops || 2415 dev->netdev_ops == &qeth_l3_netdev_ops) 2416 return (struct qeth_card *) dev->ml_priv; 2417 return NULL; 2418 } 2419 2420 static int qeth_l3_ip_event(struct notifier_block *this, 2421 unsigned long event, void *ptr) 2422 { 2423 2424 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 2425 struct net_device *dev = ifa->ifa_dev->dev; 2426 struct qeth_ipaddr addr; 2427 struct qeth_card *card; 2428 2429 if (dev_net(dev) != &init_net) 2430 return NOTIFY_DONE; 2431 2432 card = qeth_l3_get_card_from_dev(dev); 2433 if (!card) 2434 return NOTIFY_DONE; 2435 QETH_CARD_TEXT(card, 3, "ipevent"); 2436 2437 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); 2438 addr.u.a4.addr = ifa->ifa_address; 2439 addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask); 2440 2441 return qeth_l3_handle_ip_event(card, &addr, event); 2442 } 2443 2444 static struct notifier_block qeth_l3_ip_notifier = { 2445 qeth_l3_ip_event, 2446 NULL, 2447 }; 2448 2449 static int qeth_l3_ip6_event(struct notifier_block *this, 2450 unsigned long event, void *ptr) 2451 { 2452 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 2453 struct net_device *dev = ifa->idev->dev; 2454 struct qeth_l3_ip_event_work *ip_work; 2455 struct qeth_card *card; 2456 2457 if (event != NETDEV_UP && event != NETDEV_DOWN) 2458 return NOTIFY_DONE; 2459 2460 card = qeth_l3_get_card_from_dev(dev); 2461 if (!card) 2462 return NOTIFY_DONE; 2463 QETH_CARD_TEXT(card, 3, "ip6event"); 2464 if (!qeth_is_supported(card, IPA_IPV6)) 2465 return NOTIFY_DONE; 2466 2467 ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC); 2468 if (!ip_work) 2469 return NOTIFY_DONE; 2470 2471 if (event == NETDEV_UP) 2472 INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker); 2473 else 2474 INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker); 2475 2476 ip_work->card = card; 2477 qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL, 2478 QETH_PROT_IPV6); 2479 ip_work->addr.u.a6.addr = ifa->addr; 2480 ip_work->addr.u.a6.pfxlen = ifa->prefix_len; 2481 2482 queue_work(card->cmd_wq, &ip_work->work); 2483 return NOTIFY_OK; 2484 } 2485 2486 static struct notifier_block qeth_l3_ip6_notifier = { 2487 qeth_l3_ip6_event, 2488 NULL, 2489 }; 2490 2491 static int qeth_l3_register_notifiers(void) 2492 { 2493 int rc; 2494 2495 QETH_DBF_TEXT(SETUP, 5, "regnotif"); 2496 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); 2497 if (rc) 2498 return rc; 2499 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); 2500 if (rc) { 2501 unregister_inetaddr_notifier(&qeth_l3_ip_notifier); 2502 return rc; 2503 } 2504 return 0; 2505 } 2506 2507 static void qeth_l3_unregister_notifiers(void) 2508 { 2509 QETH_DBF_TEXT(SETUP, 5, "unregnot"); 2510 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 2511 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 2512 } 2513 2514 static int __init qeth_l3_init(void) 2515 { 2516 pr_info("register layer 3 discipline\n"); 2517 return qeth_l3_register_notifiers(); 2518 } 2519 2520 static void __exit qeth_l3_exit(void) 2521 { 2522 qeth_l3_unregister_notifiers(); 2523 pr_info("unregister layer 3 discipline\n"); 2524 } 2525 2526 module_init(qeth_l3_init); 2527 module_exit(qeth_l3_exit); 2528 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 2529 MODULE_DESCRIPTION("qeth layer 3 discipline"); 2530 MODULE_LICENSE("GPL"); 2531