1 /* 2 * drivers/s390/net/qeth_l3_main.c 3 * 4 * Copyright IBM Corp. 2007, 2009 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>, 7 * Thomas Spatzier <tspat@de.ibm.com>, 8 * Frank Blaschka <frank.blaschka@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "qeth" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/etherdevice.h> 20 #include <linux/mii.h> 21 #include <linux/ip.h> 22 #include <linux/ipv6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/igmp.h> 25 26 #include <net/ip.h> 27 #include <net/arp.h> 28 #include <net/ip6_checksum.h> 29 30 #include "qeth_l3.h" 31 32 static int qeth_l3_set_offline(struct ccwgroup_device *); 33 static int qeth_l3_recover(void *); 34 static int qeth_l3_stop(struct net_device *); 35 static void qeth_l3_set_multicast_list(struct net_device *); 36 static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *); 37 static int qeth_l3_register_addr_entry(struct qeth_card *, 38 struct qeth_ipaddr *); 39 static int qeth_l3_deregister_addr_entry(struct qeth_card *, 40 struct qeth_ipaddr *); 41 static int __qeth_l3_set_online(struct ccwgroup_device *, int); 42 static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 43 44 int qeth_l3_set_large_send(struct qeth_card *card, 45 enum qeth_large_send_types type) 46 { 47 int rc = 0; 48 49 card->options.large_send = type; 50 if (card->dev == NULL) 51 return 0; 52 53 if (card->options.large_send == QETH_LARGE_SEND_TSO) { 54 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { 55 card->dev->features |= NETIF_F_TSO | NETIF_F_SG | 56 NETIF_F_HW_CSUM; 57 } else { 58 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | 59 NETIF_F_HW_CSUM); 60 card->options.large_send = QETH_LARGE_SEND_NO; 61 rc = -EOPNOTSUPP; 62 } 63 } else { 64 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | 65 NETIF_F_HW_CSUM); 66 card->options.large_send = QETH_LARGE_SEND_NO; 67 } 68 return rc; 69 } 70 71 static int qeth_l3_isxdigit(char *buf) 72 { 73 while (*buf) { 74 if (!isxdigit(*buf++)) 75 return 0; 76 } 77 return 1; 78 } 79 80 void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) 81 { 82 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); 83 } 84 85 int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) 86 { 87 int count = 0, rc = 0; 88 int in[4]; 89 char c; 90 91 rc = sscanf(buf, "%u.%u.%u.%u%c", 92 &in[0], &in[1], &in[2], &in[3], &c); 93 if (rc != 4 && (rc != 5 || c != '\n')) 94 return -EINVAL; 95 for (count = 0; count < 4; count++) { 96 if (in[count] > 255) 97 return -EINVAL; 98 addr[count] = in[count]; 99 } 100 return 0; 101 } 102 103 void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 104 { 105 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" 106 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x", 107 addr[0], addr[1], addr[2], addr[3], 108 addr[4], addr[5], addr[6], addr[7], 109 addr[8], addr[9], addr[10], addr[11], 110 addr[12], addr[13], addr[14], addr[15]); 111 } 112 113 int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) 114 { 115 const char *end, *end_tmp, *start; 116 __u16 *in; 117 char num[5]; 118 int num2, cnt, out, found, save_cnt; 119 unsigned short in_tmp[8] = {0, }; 120 121 cnt = out = found = save_cnt = num2 = 0; 122 end = start = buf; 123 in = (__u16 *) addr; 124 memset(in, 0, 16); 125 while (*end) { 126 end = strchr(start, ':'); 127 if (end == NULL) { 128 end = buf + strlen(buf); 129 end_tmp = strchr(start, '\n'); 130 if (end_tmp != NULL) 131 end = end_tmp; 132 out = 1; 133 } 134 if ((end - start)) { 135 memset(num, 0, 5); 136 if ((end - start) > 4) 137 return -EINVAL; 138 memcpy(num, start, end - start); 139 if (!qeth_l3_isxdigit(num)) 140 return -EINVAL; 141 sscanf(start, "%x", &num2); 142 if (found) 143 in_tmp[save_cnt++] = num2; 144 else 145 in[cnt++] = num2; 146 if (out) 147 break; 148 } else { 149 if (found) 150 return -EINVAL; 151 found = 1; 152 } 153 start = ++end; 154 } 155 if (cnt + save_cnt > 8) 156 return -EINVAL; 157 cnt = 7; 158 while (save_cnt) 159 in[cnt--] = in_tmp[--save_cnt]; 160 return 0; 161 } 162 163 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, 164 char *buf) 165 { 166 if (proto == QETH_PROT_IPV4) 167 qeth_l3_ipaddr4_to_string(addr, buf); 168 else if (proto == QETH_PROT_IPV6) 169 qeth_l3_ipaddr6_to_string(addr, buf); 170 } 171 172 int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto, 173 __u8 *addr) 174 { 175 if (proto == QETH_PROT_IPV4) 176 return qeth_l3_string_to_ipaddr4(buf, addr); 177 else if (proto == QETH_PROT_IPV6) 178 return qeth_l3_string_to_ipaddr6(buf, addr); 179 else 180 return -EINVAL; 181 } 182 183 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 184 { 185 int i, j; 186 u8 octet; 187 188 for (i = 0; i < len; ++i) { 189 octet = addr[i]; 190 for (j = 7; j >= 0; --j) { 191 bits[i*8 + j] = octet & 1; 192 octet >>= 1; 193 } 194 } 195 } 196 197 static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 198 struct qeth_ipaddr *addr) 199 { 200 struct qeth_ipato_entry *ipatoe; 201 u8 addr_bits[128] = {0, }; 202 u8 ipatoe_bits[128] = {0, }; 203 int rc = 0; 204 205 if (!card->ipato.enabled) 206 return 0; 207 208 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 209 (addr->proto == QETH_PROT_IPV4)? 4:16); 210 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 211 if (addr->proto != ipatoe->proto) 212 continue; 213 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, 214 (ipatoe->proto == QETH_PROT_IPV4) ? 215 4 : 16); 216 if (addr->proto == QETH_PROT_IPV4) 217 rc = !memcmp(addr_bits, ipatoe_bits, 218 min(32, ipatoe->mask_bits)); 219 else 220 rc = !memcmp(addr_bits, ipatoe_bits, 221 min(128, ipatoe->mask_bits)); 222 if (rc) 223 break; 224 } 225 /* invert? */ 226 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) 227 rc = !rc; 228 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) 229 rc = !rc; 230 231 return rc; 232 } 233 234 /* 235 * Add IP to be added to todo list. If there is already an "add todo" 236 * in this list we just incremenent the reference count. 237 * Returns 0 if we just incremented reference count. 238 */ 239 static int __qeth_l3_insert_ip_todo(struct qeth_card *card, 240 struct qeth_ipaddr *addr, int add) 241 { 242 struct qeth_ipaddr *tmp, *t; 243 int found = 0; 244 245 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { 246 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && 247 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) 248 return 0; 249 if ((tmp->proto == QETH_PROT_IPV4) && 250 (addr->proto == QETH_PROT_IPV4) && 251 (tmp->type == addr->type) && 252 (tmp->is_multicast == addr->is_multicast) && 253 (tmp->u.a4.addr == addr->u.a4.addr) && 254 (tmp->u.a4.mask == addr->u.a4.mask)) { 255 found = 1; 256 break; 257 } 258 if ((tmp->proto == QETH_PROT_IPV6) && 259 (addr->proto == QETH_PROT_IPV6) && 260 (tmp->type == addr->type) && 261 (tmp->is_multicast == addr->is_multicast) && 262 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) && 263 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, 264 sizeof(struct in6_addr)) == 0)) { 265 found = 1; 266 break; 267 } 268 } 269 if (found) { 270 if (addr->users != 0) 271 tmp->users += addr->users; 272 else 273 tmp->users += add ? 1 : -1; 274 if (tmp->users == 0) { 275 list_del(&tmp->entry); 276 kfree(tmp); 277 } 278 return 0; 279 } else { 280 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC) 281 list_add(&addr->entry, card->ip_tbd_list); 282 else { 283 if (addr->users == 0) 284 addr->users += add ? 1 : -1; 285 if (add && (addr->type == QETH_IP_TYPE_NORMAL) && 286 qeth_l3_is_addr_covered_by_ipato(card, addr)) { 287 QETH_DBF_TEXT(TRACE, 2, "tkovaddr"); 288 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; 289 } 290 list_add_tail(&addr->entry, card->ip_tbd_list); 291 } 292 return 1; 293 } 294 } 295 296 static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 297 { 298 unsigned long flags; 299 int rc = 0; 300 301 QETH_DBF_TEXT(TRACE, 4, "delip"); 302 303 if (addr->proto == QETH_PROT_IPV4) 304 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); 305 else { 306 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); 307 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); 308 } 309 spin_lock_irqsave(&card->ip_lock, flags); 310 rc = __qeth_l3_insert_ip_todo(card, addr, 0); 311 spin_unlock_irqrestore(&card->ip_lock, flags); 312 return rc; 313 } 314 315 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 316 { 317 unsigned long flags; 318 int rc = 0; 319 320 QETH_DBF_TEXT(TRACE, 4, "addip"); 321 if (addr->proto == QETH_PROT_IPV4) 322 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); 323 else { 324 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); 325 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); 326 } 327 spin_lock_irqsave(&card->ip_lock, flags); 328 rc = __qeth_l3_insert_ip_todo(card, addr, 1); 329 spin_unlock_irqrestore(&card->ip_lock, flags); 330 return rc; 331 } 332 333 334 static struct qeth_ipaddr *qeth_l3_get_addr_buffer( 335 enum qeth_prot_versions prot) 336 { 337 struct qeth_ipaddr *addr; 338 339 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); 340 if (addr == NULL) { 341 return NULL; 342 } 343 addr->type = QETH_IP_TYPE_NORMAL; 344 addr->proto = prot; 345 return addr; 346 } 347 348 static void qeth_l3_delete_mc_addresses(struct qeth_card *card) 349 { 350 struct qeth_ipaddr *iptodo; 351 unsigned long flags; 352 353 QETH_DBF_TEXT(TRACE, 4, "delmc"); 354 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 355 if (!iptodo) { 356 QETH_DBF_TEXT(TRACE, 2, "dmcnomem"); 357 return; 358 } 359 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; 360 spin_lock_irqsave(&card->ip_lock, flags); 361 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0)) 362 kfree(iptodo); 363 spin_unlock_irqrestore(&card->ip_lock, flags); 364 } 365 366 /* 367 * Add/remove address to/from card's ip list, i.e. try to add or remove 368 * reference to/from an IP address that is already registered on the card. 369 * Returns: 370 * 0 address was on card and its reference count has been adjusted, 371 * but is still > 0, so nothing has to be done 372 * also returns 0 if card was not on card and the todo was to delete 373 * the address -> there is also nothing to be done 374 * 1 address was not on card and the todo is to add it to the card's ip 375 * list 376 * -1 address was on card and its reference count has been decremented 377 * to <= 0 by the todo -> address must be removed from card 378 */ 379 static int __qeth_l3_ref_ip_on_card(struct qeth_card *card, 380 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr) 381 { 382 struct qeth_ipaddr *addr; 383 int found = 0; 384 385 list_for_each_entry(addr, &card->ip_list, entry) { 386 if ((addr->proto == QETH_PROT_IPV4) && 387 (todo->proto == QETH_PROT_IPV4) && 388 (addr->type == todo->type) && 389 (addr->u.a4.addr == todo->u.a4.addr) && 390 (addr->u.a4.mask == todo->u.a4.mask)) { 391 found = 1; 392 break; 393 } 394 if ((addr->proto == QETH_PROT_IPV6) && 395 (todo->proto == QETH_PROT_IPV6) && 396 (addr->type == todo->type) && 397 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) && 398 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr, 399 sizeof(struct in6_addr)) == 0)) { 400 found = 1; 401 break; 402 } 403 } 404 if (found) { 405 addr->users += todo->users; 406 if (addr->users <= 0) { 407 *__addr = addr; 408 return -1; 409 } else { 410 /* for VIPA and RXIP limit refcount to 1 */ 411 if (addr->type != QETH_IP_TYPE_NORMAL) 412 addr->users = 1; 413 return 0; 414 } 415 } 416 if (todo->users > 0) { 417 /* for VIPA and RXIP limit refcount to 1 */ 418 if (todo->type != QETH_IP_TYPE_NORMAL) 419 todo->users = 1; 420 return 1; 421 } else 422 return 0; 423 } 424 425 static void __qeth_l3_delete_all_mc(struct qeth_card *card, 426 unsigned long *flags) 427 { 428 struct list_head fail_list; 429 struct qeth_ipaddr *addr, *tmp; 430 int rc; 431 432 INIT_LIST_HEAD(&fail_list); 433 again: 434 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) { 435 if (addr->is_multicast) { 436 list_del(&addr->entry); 437 spin_unlock_irqrestore(&card->ip_lock, *flags); 438 rc = qeth_l3_deregister_addr_entry(card, addr); 439 spin_lock_irqsave(&card->ip_lock, *flags); 440 if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) 441 kfree(addr); 442 else 443 list_add_tail(&addr->entry, &fail_list); 444 goto again; 445 } 446 } 447 list_splice(&fail_list, &card->ip_list); 448 } 449 450 static void qeth_l3_set_ip_addr_list(struct qeth_card *card) 451 { 452 struct list_head *tbd_list; 453 struct qeth_ipaddr *todo, *addr; 454 unsigned long flags; 455 int rc; 456 457 QETH_DBF_TEXT(TRACE, 2, "sdiplist"); 458 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); 459 460 spin_lock_irqsave(&card->ip_lock, flags); 461 tbd_list = card->ip_tbd_list; 462 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 463 if (!card->ip_tbd_list) { 464 QETH_DBF_TEXT(TRACE, 0, "silnomem"); 465 card->ip_tbd_list = tbd_list; 466 spin_unlock_irqrestore(&card->ip_lock, flags); 467 return; 468 } else 469 INIT_LIST_HEAD(card->ip_tbd_list); 470 471 while (!list_empty(tbd_list)) { 472 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry); 473 list_del(&todo->entry); 474 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) { 475 __qeth_l3_delete_all_mc(card, &flags); 476 kfree(todo); 477 continue; 478 } 479 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr); 480 if (rc == 0) { 481 /* nothing to be done; only adjusted refcount */ 482 kfree(todo); 483 } else if (rc == 1) { 484 /* new entry to be added to on-card list */ 485 spin_unlock_irqrestore(&card->ip_lock, flags); 486 rc = qeth_l3_register_addr_entry(card, todo); 487 spin_lock_irqsave(&card->ip_lock, flags); 488 if (!rc || (rc == IPA_RC_LAN_OFFLINE)) 489 list_add_tail(&todo->entry, &card->ip_list); 490 else 491 kfree(todo); 492 } else if (rc == -1) { 493 /* on-card entry to be removed */ 494 list_del_init(&addr->entry); 495 spin_unlock_irqrestore(&card->ip_lock, flags); 496 rc = qeth_l3_deregister_addr_entry(card, addr); 497 spin_lock_irqsave(&card->ip_lock, flags); 498 if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED)) 499 kfree(addr); 500 else 501 list_add_tail(&addr->entry, &card->ip_list); 502 kfree(todo); 503 } 504 } 505 spin_unlock_irqrestore(&card->ip_lock, flags); 506 kfree(tbd_list); 507 } 508 509 static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, 510 int recover) 511 { 512 struct qeth_ipaddr *addr, *tmp; 513 unsigned long flags; 514 515 QETH_DBF_TEXT(TRACE, 4, "clearip"); 516 spin_lock_irqsave(&card->ip_lock, flags); 517 /* clear todo list */ 518 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { 519 list_del(&addr->entry); 520 kfree(addr); 521 } 522 523 while (!list_empty(&card->ip_list)) { 524 addr = list_entry(card->ip_list.next, 525 struct qeth_ipaddr, entry); 526 list_del_init(&addr->entry); 527 if (clean) { 528 spin_unlock_irqrestore(&card->ip_lock, flags); 529 qeth_l3_deregister_addr_entry(card, addr); 530 spin_lock_irqsave(&card->ip_lock, flags); 531 } 532 if (!recover || addr->is_multicast) { 533 kfree(addr); 534 continue; 535 } 536 list_add_tail(&addr->entry, card->ip_tbd_list); 537 } 538 spin_unlock_irqrestore(&card->ip_lock, flags); 539 } 540 541 static int qeth_l3_address_exists_in_list(struct list_head *list, 542 struct qeth_ipaddr *addr, int same_type) 543 { 544 struct qeth_ipaddr *tmp; 545 546 list_for_each_entry(tmp, list, entry) { 547 if ((tmp->proto == QETH_PROT_IPV4) && 548 (addr->proto == QETH_PROT_IPV4) && 549 ((same_type && (tmp->type == addr->type)) || 550 (!same_type && (tmp->type != addr->type))) && 551 (tmp->u.a4.addr == addr->u.a4.addr)) 552 return 1; 553 554 if ((tmp->proto == QETH_PROT_IPV6) && 555 (addr->proto == QETH_PROT_IPV6) && 556 ((same_type && (tmp->type == addr->type)) || 557 (!same_type && (tmp->type != addr->type))) && 558 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, 559 sizeof(struct in6_addr)) == 0)) 560 return 1; 561 562 } 563 return 0; 564 } 565 566 static int qeth_l3_send_setdelmc(struct qeth_card *card, 567 struct qeth_ipaddr *addr, int ipacmd) 568 { 569 int rc; 570 struct qeth_cmd_buffer *iob; 571 struct qeth_ipa_cmd *cmd; 572 573 QETH_DBF_TEXT(TRACE, 4, "setdelmc"); 574 575 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 576 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 577 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); 578 if (addr->proto == QETH_PROT_IPV6) 579 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr, 580 sizeof(struct in6_addr)); 581 else 582 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); 583 584 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 585 586 return rc; 587 } 588 589 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) 590 { 591 int i, j; 592 for (i = 0; i < 16; i++) { 593 j = (len) - (i * 8); 594 if (j >= 8) 595 netmask[i] = 0xff; 596 else if (j > 0) 597 netmask[i] = (u8)(0xFF00 >> j); 598 else 599 netmask[i] = 0; 600 } 601 } 602 603 static int qeth_l3_send_setdelip(struct qeth_card *card, 604 struct qeth_ipaddr *addr, int ipacmd, unsigned int flags) 605 { 606 int rc; 607 struct qeth_cmd_buffer *iob; 608 struct qeth_ipa_cmd *cmd; 609 __u8 netmask[16]; 610 611 QETH_DBF_TEXT(TRACE, 4, "setdelip"); 612 QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags); 613 614 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 615 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 616 if (addr->proto == QETH_PROT_IPV6) { 617 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 618 sizeof(struct in6_addr)); 619 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); 620 memcpy(cmd->data.setdelip6.mask, netmask, 621 sizeof(struct in6_addr)); 622 cmd->data.setdelip6.flags = flags; 623 } else { 624 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); 625 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); 626 cmd->data.setdelip4.flags = flags; 627 } 628 629 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 630 631 return rc; 632 } 633 634 static int qeth_l3_send_setrouting(struct qeth_card *card, 635 enum qeth_routing_types type, enum qeth_prot_versions prot) 636 { 637 int rc; 638 struct qeth_ipa_cmd *cmd; 639 struct qeth_cmd_buffer *iob; 640 641 QETH_DBF_TEXT(TRACE, 4, "setroutg"); 642 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); 643 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 644 cmd->data.setrtg.type = (type); 645 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 646 647 return rc; 648 } 649 650 static void qeth_l3_correct_routing_type(struct qeth_card *card, 651 enum qeth_routing_types *type, enum qeth_prot_versions prot) 652 { 653 if (card->info.type == QETH_CARD_TYPE_IQD) { 654 switch (*type) { 655 case NO_ROUTER: 656 case PRIMARY_CONNECTOR: 657 case SECONDARY_CONNECTOR: 658 case MULTICAST_ROUTER: 659 return; 660 default: 661 goto out_inval; 662 } 663 } else { 664 switch (*type) { 665 case NO_ROUTER: 666 case PRIMARY_ROUTER: 667 case SECONDARY_ROUTER: 668 return; 669 case MULTICAST_ROUTER: 670 if (qeth_is_ipafunc_supported(card, prot, 671 IPA_OSA_MC_ROUTER)) 672 return; 673 default: 674 goto out_inval; 675 } 676 } 677 out_inval: 678 *type = NO_ROUTER; 679 } 680 681 int qeth_l3_setrouting_v4(struct qeth_card *card) 682 { 683 int rc; 684 685 QETH_DBF_TEXT(TRACE, 3, "setrtg4"); 686 687 qeth_l3_correct_routing_type(card, &card->options.route4.type, 688 QETH_PROT_IPV4); 689 690 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 691 QETH_PROT_IPV4); 692 if (rc) { 693 card->options.route4.type = NO_ROUTER; 694 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 695 " on %s. Type set to 'no router'.\n", rc, 696 QETH_CARD_IFNAME(card)); 697 } 698 return rc; 699 } 700 701 int qeth_l3_setrouting_v6(struct qeth_card *card) 702 { 703 int rc = 0; 704 705 QETH_DBF_TEXT(TRACE, 3, "setrtg6"); 706 #ifdef CONFIG_QETH_IPV6 707 708 if (!qeth_is_supported(card, IPA_IPV6)) 709 return 0; 710 qeth_l3_correct_routing_type(card, &card->options.route6.type, 711 QETH_PROT_IPV6); 712 713 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 714 QETH_PROT_IPV6); 715 if (rc) { 716 card->options.route6.type = NO_ROUTER; 717 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 718 " on %s. Type set to 'no router'.\n", rc, 719 QETH_CARD_IFNAME(card)); 720 } 721 #endif 722 return rc; 723 } 724 725 /* 726 * IP address takeover related functions 727 */ 728 static void qeth_l3_clear_ipato_list(struct qeth_card *card) 729 { 730 731 struct qeth_ipato_entry *ipatoe, *tmp; 732 unsigned long flags; 733 734 spin_lock_irqsave(&card->ip_lock, flags); 735 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 736 list_del(&ipatoe->entry); 737 kfree(ipatoe); 738 } 739 spin_unlock_irqrestore(&card->ip_lock, flags); 740 } 741 742 int qeth_l3_add_ipato_entry(struct qeth_card *card, 743 struct qeth_ipato_entry *new) 744 { 745 struct qeth_ipato_entry *ipatoe; 746 unsigned long flags; 747 int rc = 0; 748 749 QETH_DBF_TEXT(TRACE, 2, "addipato"); 750 spin_lock_irqsave(&card->ip_lock, flags); 751 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 752 if (ipatoe->proto != new->proto) 753 continue; 754 if (!memcmp(ipatoe->addr, new->addr, 755 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && 756 (ipatoe->mask_bits == new->mask_bits)) { 757 rc = -EEXIST; 758 break; 759 } 760 } 761 if (!rc) 762 list_add_tail(&new->entry, &card->ipato.entries); 763 764 spin_unlock_irqrestore(&card->ip_lock, flags); 765 return rc; 766 } 767 768 void qeth_l3_del_ipato_entry(struct qeth_card *card, 769 enum qeth_prot_versions proto, u8 *addr, int mask_bits) 770 { 771 struct qeth_ipato_entry *ipatoe, *tmp; 772 unsigned long flags; 773 774 QETH_DBF_TEXT(TRACE, 2, "delipato"); 775 spin_lock_irqsave(&card->ip_lock, flags); 776 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 777 if (ipatoe->proto != proto) 778 continue; 779 if (!memcmp(ipatoe->addr, addr, 780 (proto == QETH_PROT_IPV4)? 4:16) && 781 (ipatoe->mask_bits == mask_bits)) { 782 list_del(&ipatoe->entry); 783 kfree(ipatoe); 784 } 785 } 786 spin_unlock_irqrestore(&card->ip_lock, flags); 787 } 788 789 /* 790 * VIPA related functions 791 */ 792 int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, 793 const u8 *addr) 794 { 795 struct qeth_ipaddr *ipaddr; 796 unsigned long flags; 797 int rc = 0; 798 799 ipaddr = qeth_l3_get_addr_buffer(proto); 800 if (ipaddr) { 801 if (proto == QETH_PROT_IPV4) { 802 QETH_DBF_TEXT(TRACE, 2, "addvipa4"); 803 memcpy(&ipaddr->u.a4.addr, addr, 4); 804 ipaddr->u.a4.mask = 0; 805 } else if (proto == QETH_PROT_IPV6) { 806 QETH_DBF_TEXT(TRACE, 2, "addvipa6"); 807 memcpy(&ipaddr->u.a6.addr, addr, 16); 808 ipaddr->u.a6.pfxlen = 0; 809 } 810 ipaddr->type = QETH_IP_TYPE_VIPA; 811 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG; 812 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG; 813 } else 814 return -ENOMEM; 815 spin_lock_irqsave(&card->ip_lock, flags); 816 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || 817 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) 818 rc = -EEXIST; 819 spin_unlock_irqrestore(&card->ip_lock, flags); 820 if (rc) { 821 return rc; 822 } 823 if (!qeth_l3_add_ip(card, ipaddr)) 824 kfree(ipaddr); 825 qeth_l3_set_ip_addr_list(card); 826 return rc; 827 } 828 829 void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, 830 const u8 *addr) 831 { 832 struct qeth_ipaddr *ipaddr; 833 834 ipaddr = qeth_l3_get_addr_buffer(proto); 835 if (ipaddr) { 836 if (proto == QETH_PROT_IPV4) { 837 QETH_DBF_TEXT(TRACE, 2, "delvipa4"); 838 memcpy(&ipaddr->u.a4.addr, addr, 4); 839 ipaddr->u.a4.mask = 0; 840 } else if (proto == QETH_PROT_IPV6) { 841 QETH_DBF_TEXT(TRACE, 2, "delvipa6"); 842 memcpy(&ipaddr->u.a6.addr, addr, 16); 843 ipaddr->u.a6.pfxlen = 0; 844 } 845 ipaddr->type = QETH_IP_TYPE_VIPA; 846 } else 847 return; 848 if (!qeth_l3_delete_ip(card, ipaddr)) 849 kfree(ipaddr); 850 qeth_l3_set_ip_addr_list(card); 851 } 852 853 /* 854 * proxy ARP related functions 855 */ 856 int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, 857 const u8 *addr) 858 { 859 struct qeth_ipaddr *ipaddr; 860 unsigned long flags; 861 int rc = 0; 862 863 ipaddr = qeth_l3_get_addr_buffer(proto); 864 if (ipaddr) { 865 if (proto == QETH_PROT_IPV4) { 866 QETH_DBF_TEXT(TRACE, 2, "addrxip4"); 867 memcpy(&ipaddr->u.a4.addr, addr, 4); 868 ipaddr->u.a4.mask = 0; 869 } else if (proto == QETH_PROT_IPV6) { 870 QETH_DBF_TEXT(TRACE, 2, "addrxip6"); 871 memcpy(&ipaddr->u.a6.addr, addr, 16); 872 ipaddr->u.a6.pfxlen = 0; 873 } 874 ipaddr->type = QETH_IP_TYPE_RXIP; 875 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG; 876 ipaddr->del_flags = 0; 877 } else 878 return -ENOMEM; 879 spin_lock_irqsave(&card->ip_lock, flags); 880 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || 881 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) 882 rc = -EEXIST; 883 spin_unlock_irqrestore(&card->ip_lock, flags); 884 if (rc) { 885 return rc; 886 } 887 if (!qeth_l3_add_ip(card, ipaddr)) 888 kfree(ipaddr); 889 qeth_l3_set_ip_addr_list(card); 890 return 0; 891 } 892 893 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, 894 const u8 *addr) 895 { 896 struct qeth_ipaddr *ipaddr; 897 898 ipaddr = qeth_l3_get_addr_buffer(proto); 899 if (ipaddr) { 900 if (proto == QETH_PROT_IPV4) { 901 QETH_DBF_TEXT(TRACE, 2, "addrxip4"); 902 memcpy(&ipaddr->u.a4.addr, addr, 4); 903 ipaddr->u.a4.mask = 0; 904 } else if (proto == QETH_PROT_IPV6) { 905 QETH_DBF_TEXT(TRACE, 2, "addrxip6"); 906 memcpy(&ipaddr->u.a6.addr, addr, 16); 907 ipaddr->u.a6.pfxlen = 0; 908 } 909 ipaddr->type = QETH_IP_TYPE_RXIP; 910 } else 911 return; 912 if (!qeth_l3_delete_ip(card, ipaddr)) 913 kfree(ipaddr); 914 qeth_l3_set_ip_addr_list(card); 915 } 916 917 static int qeth_l3_register_addr_entry(struct qeth_card *card, 918 struct qeth_ipaddr *addr) 919 { 920 char buf[50]; 921 int rc = 0; 922 int cnt = 3; 923 924 if (addr->proto == QETH_PROT_IPV4) { 925 QETH_DBF_TEXT(TRACE, 2, "setaddr4"); 926 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); 927 } else if (addr->proto == QETH_PROT_IPV6) { 928 QETH_DBF_TEXT(TRACE, 2, "setaddr6"); 929 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); 930 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); 931 } else { 932 QETH_DBF_TEXT(TRACE, 2, "setaddr?"); 933 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); 934 } 935 do { 936 if (addr->is_multicast) 937 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); 938 else 939 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, 940 addr->set_flags); 941 if (rc) 942 QETH_DBF_TEXT(TRACE, 2, "failed"); 943 } while ((--cnt > 0) && rc); 944 if (rc) { 945 QETH_DBF_TEXT(TRACE, 2, "FAILED"); 946 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); 947 dev_warn(&card->gdev->dev, 948 "Registering IP address %s failed\n", buf); 949 } 950 return rc; 951 } 952 953 static int qeth_l3_deregister_addr_entry(struct qeth_card *card, 954 struct qeth_ipaddr *addr) 955 { 956 int rc = 0; 957 958 if (addr->proto == QETH_PROT_IPV4) { 959 QETH_DBF_TEXT(TRACE, 2, "deladdr4"); 960 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); 961 } else if (addr->proto == QETH_PROT_IPV6) { 962 QETH_DBF_TEXT(TRACE, 2, "deladdr6"); 963 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); 964 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); 965 } else { 966 QETH_DBF_TEXT(TRACE, 2, "deladdr?"); 967 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); 968 } 969 if (addr->is_multicast) 970 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); 971 else 972 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, 973 addr->del_flags); 974 if (rc) 975 QETH_DBF_TEXT(TRACE, 2, "failed"); 976 977 return rc; 978 } 979 980 static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) 981 { 982 if (cast_type == RTN_MULTICAST) 983 return QETH_CAST_MULTICAST; 984 if (cast_type == RTN_BROADCAST) 985 return QETH_CAST_BROADCAST; 986 return QETH_CAST_UNICAST; 987 } 988 989 static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) 990 { 991 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6; 992 if (cast_type == RTN_MULTICAST) 993 return ct | QETH_CAST_MULTICAST; 994 if (cast_type == RTN_ANYCAST) 995 return ct | QETH_CAST_ANYCAST; 996 if (cast_type == RTN_BROADCAST) 997 return ct | QETH_CAST_BROADCAST; 998 return ct | QETH_CAST_UNICAST; 999 } 1000 1001 static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command, 1002 __u32 mode) 1003 { 1004 int rc; 1005 struct qeth_cmd_buffer *iob; 1006 struct qeth_ipa_cmd *cmd; 1007 1008 QETH_DBF_TEXT(TRACE, 4, "adpmode"); 1009 1010 iob = qeth_get_adapter_cmd(card, command, 1011 sizeof(struct qeth_ipacmd_setadpparms)); 1012 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1013 cmd->data.setadapterparms.data.mode = mode; 1014 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb, 1015 NULL); 1016 return rc; 1017 } 1018 1019 static int qeth_l3_setadapter_hstr(struct qeth_card *card) 1020 { 1021 int rc; 1022 1023 QETH_DBF_TEXT(TRACE, 4, "adphstr"); 1024 1025 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { 1026 rc = qeth_l3_send_setadp_mode(card, 1027 IPA_SETADP_SET_BROADCAST_MODE, 1028 card->options.broadcast_mode); 1029 if (rc) 1030 QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on " 1031 "device %s: x%x\n", 1032 CARD_BUS_ID(card), rc); 1033 rc = qeth_l3_send_setadp_mode(card, 1034 IPA_SETADP_ALTER_MAC_ADDRESS, 1035 card->options.macaddr_mode); 1036 if (rc) 1037 QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on " 1038 "device %s: x%x\n", CARD_BUS_ID(card), rc); 1039 return rc; 1040 } 1041 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) 1042 QETH_DBF_MESSAGE(2, "set adapter parameters not available " 1043 "to set broadcast mode, using ALLRINGS " 1044 "on device %s:\n", CARD_BUS_ID(card)); 1045 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) 1046 QETH_DBF_MESSAGE(2, "set adapter parameters not available " 1047 "to set macaddr mode, using NONCANONICAL " 1048 "on device %s:\n", CARD_BUS_ID(card)); 1049 return 0; 1050 } 1051 1052 static int qeth_l3_setadapter_parms(struct qeth_card *card) 1053 { 1054 int rc; 1055 1056 QETH_DBF_TEXT(SETUP, 2, "setadprm"); 1057 1058 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 1059 dev_info(&card->gdev->dev, 1060 "set adapter parameters not supported.\n"); 1061 QETH_DBF_TEXT(SETUP, 2, " notsupp"); 1062 return 0; 1063 } 1064 rc = qeth_query_setadapterparms(card); 1065 if (rc) { 1066 QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " 1067 "0x%x\n", dev_name(&card->gdev->dev), rc); 1068 return rc; 1069 } 1070 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { 1071 rc = qeth_setadpparms_change_macaddr(card); 1072 if (rc) 1073 dev_warn(&card->gdev->dev, "Reading the adapter MAC" 1074 " address failed\n"); 1075 } 1076 1077 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || 1078 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) 1079 rc = qeth_l3_setadapter_hstr(card); 1080 1081 return rc; 1082 } 1083 1084 static int qeth_l3_default_setassparms_cb(struct qeth_card *card, 1085 struct qeth_reply *reply, unsigned long data) 1086 { 1087 struct qeth_ipa_cmd *cmd; 1088 1089 QETH_DBF_TEXT(TRACE, 4, "defadpcb"); 1090 1091 cmd = (struct qeth_ipa_cmd *) data; 1092 if (cmd->hdr.return_code == 0) { 1093 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 1094 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 1095 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 1096 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 1097 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 1098 } 1099 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && 1100 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { 1101 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; 1102 QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask); 1103 } 1104 return 0; 1105 } 1106 1107 static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( 1108 struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code, 1109 __u16 len, enum qeth_prot_versions prot) 1110 { 1111 struct qeth_cmd_buffer *iob; 1112 struct qeth_ipa_cmd *cmd; 1113 1114 QETH_DBF_TEXT(TRACE, 4, "getasscm"); 1115 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); 1116 1117 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1118 cmd->data.setassparms.hdr.assist_no = ipa_func; 1119 cmd->data.setassparms.hdr.length = 8 + len; 1120 cmd->data.setassparms.hdr.command_code = cmd_code; 1121 cmd->data.setassparms.hdr.return_code = 0; 1122 cmd->data.setassparms.hdr.seq_no = 0; 1123 1124 return iob; 1125 } 1126 1127 static int qeth_l3_send_setassparms(struct qeth_card *card, 1128 struct qeth_cmd_buffer *iob, __u16 len, long data, 1129 int (*reply_cb)(struct qeth_card *, struct qeth_reply *, 1130 unsigned long), 1131 void *reply_param) 1132 { 1133 int rc; 1134 struct qeth_ipa_cmd *cmd; 1135 1136 QETH_DBF_TEXT(TRACE, 4, "sendassp"); 1137 1138 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1139 if (len <= sizeof(__u32)) 1140 cmd->data.setassparms.data.flags_32bit = (__u32) data; 1141 else /* (len > sizeof(__u32)) */ 1142 memcpy(&cmd->data.setassparms.data, (void *) data, len); 1143 1144 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); 1145 return rc; 1146 } 1147 1148 #ifdef CONFIG_QETH_IPV6 1149 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, 1150 enum qeth_ipa_funcs ipa_func, __u16 cmd_code) 1151 { 1152 int rc; 1153 struct qeth_cmd_buffer *iob; 1154 1155 QETH_DBF_TEXT(TRACE, 4, "simassp6"); 1156 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1157 0, QETH_PROT_IPV6); 1158 rc = qeth_l3_send_setassparms(card, iob, 0, 0, 1159 qeth_l3_default_setassparms_cb, NULL); 1160 return rc; 1161 } 1162 #endif 1163 1164 static int qeth_l3_send_simple_setassparms(struct qeth_card *card, 1165 enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data) 1166 { 1167 int rc; 1168 int length = 0; 1169 struct qeth_cmd_buffer *iob; 1170 1171 QETH_DBF_TEXT(TRACE, 4, "simassp4"); 1172 if (data) 1173 length = sizeof(__u32); 1174 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1175 length, QETH_PROT_IPV4); 1176 rc = qeth_l3_send_setassparms(card, iob, length, data, 1177 qeth_l3_default_setassparms_cb, NULL); 1178 return rc; 1179 } 1180 1181 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) 1182 { 1183 int rc; 1184 1185 QETH_DBF_TEXT(TRACE, 3, "ipaarp"); 1186 1187 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1188 dev_info(&card->gdev->dev, 1189 "ARP processing not supported on %s!\n", 1190 QETH_CARD_IFNAME(card)); 1191 return 0; 1192 } 1193 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, 1194 IPA_CMD_ASS_START, 0); 1195 if (rc) { 1196 dev_warn(&card->gdev->dev, 1197 "Starting ARP processing support for %s failed\n", 1198 QETH_CARD_IFNAME(card)); 1199 } 1200 return rc; 1201 } 1202 1203 static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) 1204 { 1205 int rc; 1206 1207 QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); 1208 1209 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { 1210 dev_info(&card->gdev->dev, 1211 "Hardware IP fragmentation not supported on %s\n", 1212 QETH_CARD_IFNAME(card)); 1213 return -EOPNOTSUPP; 1214 } 1215 1216 rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, 1217 IPA_CMD_ASS_START, 0); 1218 if (rc) { 1219 dev_warn(&card->gdev->dev, 1220 "Starting IP fragmentation support for %s failed\n", 1221 QETH_CARD_IFNAME(card)); 1222 } else 1223 dev_info(&card->gdev->dev, 1224 "Hardware IP fragmentation enabled \n"); 1225 return rc; 1226 } 1227 1228 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) 1229 { 1230 int rc; 1231 1232 QETH_DBF_TEXT(TRACE, 3, "stsrcmac"); 1233 1234 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { 1235 dev_info(&card->gdev->dev, 1236 "Inbound source MAC-address not supported on %s\n", 1237 QETH_CARD_IFNAME(card)); 1238 return -EOPNOTSUPP; 1239 } 1240 1241 rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, 1242 IPA_CMD_ASS_START, 0); 1243 if (rc) 1244 dev_warn(&card->gdev->dev, 1245 "Starting source MAC-address support for %s failed\n", 1246 QETH_CARD_IFNAME(card)); 1247 return rc; 1248 } 1249 1250 static int qeth_l3_start_ipa_vlan(struct qeth_card *card) 1251 { 1252 int rc = 0; 1253 1254 QETH_DBF_TEXT(TRACE, 3, "strtvlan"); 1255 1256 if (!qeth_is_supported(card, IPA_FULL_VLAN)) { 1257 dev_info(&card->gdev->dev, 1258 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); 1259 return -EOPNOTSUPP; 1260 } 1261 1262 rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, 1263 IPA_CMD_ASS_START, 0); 1264 if (rc) { 1265 dev_warn(&card->gdev->dev, 1266 "Starting VLAN support for %s failed\n", 1267 QETH_CARD_IFNAME(card)); 1268 } else { 1269 dev_info(&card->gdev->dev, "VLAN enabled\n"); 1270 } 1271 return rc; 1272 } 1273 1274 static int qeth_l3_start_ipa_multicast(struct qeth_card *card) 1275 { 1276 int rc; 1277 1278 QETH_DBF_TEXT(TRACE, 3, "stmcast"); 1279 1280 if (!qeth_is_supported(card, IPA_MULTICASTING)) { 1281 dev_info(&card->gdev->dev, 1282 "Multicast not supported on %s\n", 1283 QETH_CARD_IFNAME(card)); 1284 return -EOPNOTSUPP; 1285 } 1286 1287 rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, 1288 IPA_CMD_ASS_START, 0); 1289 if (rc) { 1290 dev_warn(&card->gdev->dev, 1291 "Starting multicast support for %s failed\n", 1292 QETH_CARD_IFNAME(card)); 1293 } else { 1294 dev_info(&card->gdev->dev, "Multicast enabled\n"); 1295 card->dev->flags |= IFF_MULTICAST; 1296 } 1297 return rc; 1298 } 1299 1300 static int qeth_l3_query_ipassists_cb(struct qeth_card *card, 1301 struct qeth_reply *reply, unsigned long data) 1302 { 1303 struct qeth_ipa_cmd *cmd; 1304 1305 QETH_DBF_TEXT(SETUP, 2, "qipasscb"); 1306 1307 cmd = (struct qeth_ipa_cmd *) data; 1308 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 1309 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 1310 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 1311 } else { 1312 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 1313 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 1314 } 1315 QETH_DBF_TEXT(SETUP, 2, "suppenbl"); 1316 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported); 1317 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled); 1318 return 0; 1319 } 1320 1321 static int qeth_l3_query_ipassists(struct qeth_card *card, 1322 enum qeth_prot_versions prot) 1323 { 1324 int rc; 1325 struct qeth_cmd_buffer *iob; 1326 1327 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); 1328 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); 1329 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL); 1330 return rc; 1331 } 1332 1333 #ifdef CONFIG_QETH_IPV6 1334 static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 1335 { 1336 int rc; 1337 1338 QETH_DBF_TEXT(TRACE, 3, "softipv6"); 1339 1340 if (card->info.type == QETH_CARD_TYPE_IQD) 1341 goto out; 1342 1343 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); 1344 if (rc) { 1345 dev_err(&card->gdev->dev, 1346 "Activating IPv6 support for %s failed\n", 1347 QETH_CARD_IFNAME(card)); 1348 return rc; 1349 } 1350 rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, 1351 IPA_CMD_ASS_START, 3); 1352 if (rc) { 1353 dev_err(&card->gdev->dev, 1354 "Activating IPv6 support for %s failed\n", 1355 QETH_CARD_IFNAME(card)); 1356 return rc; 1357 } 1358 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, 1359 IPA_CMD_ASS_START); 1360 if (rc) { 1361 dev_err(&card->gdev->dev, 1362 "Activating IPv6 support for %s failed\n", 1363 QETH_CARD_IFNAME(card)); 1364 return rc; 1365 } 1366 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, 1367 IPA_CMD_ASS_START); 1368 if (rc) { 1369 dev_warn(&card->gdev->dev, 1370 "Enabling the passthrough mode for %s failed\n", 1371 QETH_CARD_IFNAME(card)); 1372 return rc; 1373 } 1374 out: 1375 dev_info(&card->gdev->dev, "IPV6 enabled\n"); 1376 return 0; 1377 } 1378 #endif 1379 1380 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) 1381 { 1382 int rc = 0; 1383 1384 QETH_DBF_TEXT(TRACE, 3, "strtipv6"); 1385 1386 if (!qeth_is_supported(card, IPA_IPV6)) { 1387 dev_info(&card->gdev->dev, 1388 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); 1389 return 0; 1390 } 1391 #ifdef CONFIG_QETH_IPV6 1392 rc = qeth_l3_softsetup_ipv6(card); 1393 #endif 1394 return rc ; 1395 } 1396 1397 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) 1398 { 1399 int rc; 1400 1401 QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); 1402 card->info.broadcast_capable = 0; 1403 if (!qeth_is_supported(card, IPA_FILTERING)) { 1404 dev_info(&card->gdev->dev, 1405 "Broadcast not supported on %s\n", 1406 QETH_CARD_IFNAME(card)); 1407 rc = -EOPNOTSUPP; 1408 goto out; 1409 } 1410 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, 1411 IPA_CMD_ASS_START, 0); 1412 if (rc) { 1413 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " 1414 "%s failed\n", QETH_CARD_IFNAME(card)); 1415 goto out; 1416 } 1417 1418 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, 1419 IPA_CMD_ASS_CONFIGURE, 1); 1420 if (rc) { 1421 dev_warn(&card->gdev->dev, 1422 "Setting up broadcast filtering for %s failed\n", 1423 QETH_CARD_IFNAME(card)); 1424 goto out; 1425 } 1426 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; 1427 dev_info(&card->gdev->dev, "Broadcast enabled\n"); 1428 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, 1429 IPA_CMD_ASS_ENABLE, 1); 1430 if (rc) { 1431 dev_warn(&card->gdev->dev, "Setting up broadcast echo " 1432 "filtering for %s failed\n", QETH_CARD_IFNAME(card)); 1433 goto out; 1434 } 1435 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; 1436 out: 1437 if (card->info.broadcast_capable) 1438 card->dev->flags |= IFF_BROADCAST; 1439 else 1440 card->dev->flags &= ~IFF_BROADCAST; 1441 return rc; 1442 } 1443 1444 static int qeth_l3_send_checksum_command(struct qeth_card *card) 1445 { 1446 int rc; 1447 1448 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, 1449 IPA_CMD_ASS_START, 0); 1450 if (rc) { 1451 dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " 1452 "failed, using SW checksumming\n", 1453 QETH_CARD_IFNAME(card)); 1454 return rc; 1455 } 1456 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, 1457 IPA_CMD_ASS_ENABLE, 1458 card->info.csum_mask); 1459 if (rc) { 1460 dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " 1461 "failed, using SW checksumming\n", 1462 QETH_CARD_IFNAME(card)); 1463 return rc; 1464 } 1465 return 0; 1466 } 1467 1468 int qeth_l3_set_rx_csum(struct qeth_card *card, 1469 enum qeth_checksum_types csum_type) 1470 { 1471 int rc = 0; 1472 1473 if (card->options.checksum_type == HW_CHECKSUMMING) { 1474 if ((csum_type != HW_CHECKSUMMING) && 1475 (card->state != CARD_STATE_DOWN)) { 1476 rc = qeth_l3_send_simple_setassparms(card, 1477 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); 1478 if (rc) 1479 return -EIO; 1480 } 1481 } else { 1482 if (csum_type == HW_CHECKSUMMING) { 1483 if (card->state != CARD_STATE_DOWN) { 1484 if (!qeth_is_supported(card, 1485 IPA_INBOUND_CHECKSUM)) 1486 return -EPERM; 1487 rc = qeth_l3_send_checksum_command(card); 1488 if (rc) 1489 return -EIO; 1490 } 1491 } 1492 } 1493 card->options.checksum_type = csum_type; 1494 return rc; 1495 } 1496 1497 static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1498 { 1499 int rc = 0; 1500 1501 QETH_DBF_TEXT(TRACE, 3, "strtcsum"); 1502 1503 if (card->options.checksum_type == NO_CHECKSUMMING) { 1504 dev_info(&card->gdev->dev, 1505 "Using no checksumming on %s.\n", 1506 QETH_CARD_IFNAME(card)); 1507 return 0; 1508 } 1509 if (card->options.checksum_type == SW_CHECKSUMMING) { 1510 dev_info(&card->gdev->dev, 1511 "Using SW checksumming on %s.\n", 1512 QETH_CARD_IFNAME(card)); 1513 return 0; 1514 } 1515 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { 1516 dev_info(&card->gdev->dev, 1517 "Inbound HW Checksumming not " 1518 "supported on %s,\ncontinuing " 1519 "using Inbound SW Checksumming\n", 1520 QETH_CARD_IFNAME(card)); 1521 card->options.checksum_type = SW_CHECKSUMMING; 1522 return 0; 1523 } 1524 rc = qeth_l3_send_checksum_command(card); 1525 if (!rc) 1526 dev_info(&card->gdev->dev, 1527 "HW Checksumming (inbound) enabled\n"); 1528 1529 return rc; 1530 } 1531 1532 static int qeth_l3_start_ipa_tso(struct qeth_card *card) 1533 { 1534 int rc; 1535 1536 QETH_DBF_TEXT(TRACE, 3, "sttso"); 1537 1538 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { 1539 dev_info(&card->gdev->dev, 1540 "Outbound TSO not supported on %s\n", 1541 QETH_CARD_IFNAME(card)); 1542 rc = -EOPNOTSUPP; 1543 } else { 1544 rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, 1545 IPA_CMD_ASS_START, 0); 1546 if (rc) 1547 dev_warn(&card->gdev->dev, "Starting outbound TCP " 1548 "segmentation offload for %s failed\n", 1549 QETH_CARD_IFNAME(card)); 1550 else 1551 dev_info(&card->gdev->dev, 1552 "Outbound TSO enabled\n"); 1553 } 1554 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { 1555 card->options.large_send = QETH_LARGE_SEND_NO; 1556 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); 1557 } 1558 return rc; 1559 } 1560 1561 static int qeth_l3_start_ipassists(struct qeth_card *card) 1562 { 1563 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1564 1565 qeth_set_access_ctrl_online(card); /* go on*/ 1566 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1567 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1568 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1569 qeth_l3_start_ipa_vlan(card); /* go on*/ 1570 qeth_l3_start_ipa_multicast(card); /* go on*/ 1571 qeth_l3_start_ipa_ipv6(card); /* go on*/ 1572 qeth_l3_start_ipa_broadcast(card); /* go on*/ 1573 qeth_l3_start_ipa_checksum(card); /* go on*/ 1574 qeth_l3_start_ipa_tso(card); /* go on*/ 1575 return 0; 1576 } 1577 1578 static int qeth_l3_put_unique_id(struct qeth_card *card) 1579 { 1580 1581 int rc = 0; 1582 struct qeth_cmd_buffer *iob; 1583 struct qeth_ipa_cmd *cmd; 1584 1585 QETH_DBF_TEXT(TRACE, 2, "puniqeid"); 1586 1587 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == 1588 UNIQUE_ID_NOT_BY_CARD) 1589 return -1; 1590 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR, 1591 QETH_PROT_IPV6); 1592 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1593 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1594 card->info.unique_id; 1595 memcpy(&cmd->data.create_destroy_addr.unique_id[0], 1596 card->dev->dev_addr, OSA_ADDR_LEN); 1597 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 1598 return rc; 1599 } 1600 1601 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 1602 struct qeth_reply *reply, unsigned long data) 1603 { 1604 struct qeth_ipa_cmd *cmd; 1605 1606 cmd = (struct qeth_ipa_cmd *) data; 1607 if (cmd->hdr.return_code == 0) 1608 memcpy(card->dev->dev_addr, 1609 cmd->data.create_destroy_addr.unique_id, ETH_ALEN); 1610 else 1611 random_ether_addr(card->dev->dev_addr); 1612 1613 return 0; 1614 } 1615 1616 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) 1617 { 1618 int rc = 0; 1619 struct qeth_cmd_buffer *iob; 1620 struct qeth_ipa_cmd *cmd; 1621 1622 QETH_DBF_TEXT(SETUP, 2, "hsrmac"); 1623 1624 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1625 QETH_PROT_IPV6); 1626 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1627 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1628 card->info.unique_id; 1629 1630 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, 1631 NULL); 1632 return rc; 1633 } 1634 1635 static int qeth_l3_get_unique_id_cb(struct qeth_card *card, 1636 struct qeth_reply *reply, unsigned long data) 1637 { 1638 struct qeth_ipa_cmd *cmd; 1639 1640 cmd = (struct qeth_ipa_cmd *) data; 1641 if (cmd->hdr.return_code == 0) 1642 card->info.unique_id = *((__u16 *) 1643 &cmd->data.create_destroy_addr.unique_id[6]); 1644 else { 1645 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1646 UNIQUE_ID_NOT_BY_CARD; 1647 dev_warn(&card->gdev->dev, "The network adapter failed to " 1648 "generate a unique ID\n"); 1649 } 1650 return 0; 1651 } 1652 1653 static int qeth_l3_get_unique_id(struct qeth_card *card) 1654 { 1655 int rc = 0; 1656 struct qeth_cmd_buffer *iob; 1657 struct qeth_ipa_cmd *cmd; 1658 1659 QETH_DBF_TEXT(SETUP, 2, "guniqeid"); 1660 1661 if (!qeth_is_supported(card, IPA_IPV6)) { 1662 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1663 UNIQUE_ID_NOT_BY_CARD; 1664 return 0; 1665 } 1666 1667 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1668 QETH_PROT_IPV6); 1669 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1670 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1671 card->info.unique_id; 1672 1673 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); 1674 return rc; 1675 } 1676 1677 static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1678 struct net_device *dev) 1679 { 1680 if (dev->type == ARPHRD_IEEE802_TR) 1681 ip_tr_mc_map(ipm, mac); 1682 else 1683 ip_eth_mc_map(ipm, mac); 1684 } 1685 1686 static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) 1687 { 1688 struct qeth_ipaddr *ipm; 1689 struct ip_mc_list *im4; 1690 char buf[MAX_ADDR_LEN]; 1691 1692 QETH_DBF_TEXT(TRACE, 4, "addmc"); 1693 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1694 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); 1695 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1696 if (!ipm) 1697 continue; 1698 ipm->u.a4.addr = im4->multiaddr; 1699 memcpy(ipm->mac, buf, OSA_ADDR_LEN); 1700 ipm->is_multicast = 1; 1701 if (!qeth_l3_add_ip(card, ipm)) 1702 kfree(ipm); 1703 } 1704 } 1705 1706 static void qeth_l3_add_vlan_mc(struct qeth_card *card) 1707 { 1708 struct in_device *in_dev; 1709 struct vlan_group *vg; 1710 int i; 1711 1712 QETH_DBF_TEXT(TRACE, 4, "addmcvl"); 1713 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) 1714 return; 1715 1716 vg = card->vlangrp; 1717 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1718 struct net_device *netdev = vlan_group_get_device(vg, i); 1719 if (netdev == NULL || 1720 !(netdev->flags & IFF_UP)) 1721 continue; 1722 in_dev = in_dev_get(netdev); 1723 if (!in_dev) 1724 continue; 1725 read_lock(&in_dev->mc_list_lock); 1726 qeth_l3_add_mc(card, in_dev); 1727 read_unlock(&in_dev->mc_list_lock); 1728 in_dev_put(in_dev); 1729 } 1730 } 1731 1732 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) 1733 { 1734 struct in_device *in4_dev; 1735 1736 QETH_DBF_TEXT(TRACE, 4, "chkmcv4"); 1737 in4_dev = in_dev_get(card->dev); 1738 if (in4_dev == NULL) 1739 return; 1740 read_lock(&in4_dev->mc_list_lock); 1741 qeth_l3_add_mc(card, in4_dev); 1742 qeth_l3_add_vlan_mc(card); 1743 read_unlock(&in4_dev->mc_list_lock); 1744 in_dev_put(in4_dev); 1745 } 1746 1747 #ifdef CONFIG_QETH_IPV6 1748 static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 1749 { 1750 struct qeth_ipaddr *ipm; 1751 struct ifmcaddr6 *im6; 1752 char buf[MAX_ADDR_LEN]; 1753 1754 QETH_DBF_TEXT(TRACE, 4, "addmc6"); 1755 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { 1756 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); 1757 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1758 if (!ipm) 1759 continue; 1760 ipm->is_multicast = 1; 1761 memcpy(ipm->mac, buf, OSA_ADDR_LEN); 1762 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, 1763 sizeof(struct in6_addr)); 1764 if (!qeth_l3_add_ip(card, ipm)) 1765 kfree(ipm); 1766 } 1767 } 1768 1769 static void qeth_l3_add_vlan_mc6(struct qeth_card *card) 1770 { 1771 struct inet6_dev *in_dev; 1772 struct vlan_group *vg; 1773 int i; 1774 1775 QETH_DBF_TEXT(TRACE, 4, "admc6vl"); 1776 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) 1777 return; 1778 1779 vg = card->vlangrp; 1780 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1781 struct net_device *netdev = vlan_group_get_device(vg, i); 1782 if (netdev == NULL || 1783 !(netdev->flags & IFF_UP)) 1784 continue; 1785 in_dev = in6_dev_get(netdev); 1786 if (!in_dev) 1787 continue; 1788 read_lock_bh(&in_dev->lock); 1789 qeth_l3_add_mc6(card, in_dev); 1790 read_unlock_bh(&in_dev->lock); 1791 in6_dev_put(in_dev); 1792 } 1793 } 1794 1795 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) 1796 { 1797 struct inet6_dev *in6_dev; 1798 1799 QETH_DBF_TEXT(TRACE, 4, "chkmcv6"); 1800 if (!qeth_is_supported(card, IPA_IPV6)) 1801 return ; 1802 in6_dev = in6_dev_get(card->dev); 1803 if (in6_dev == NULL) 1804 return; 1805 read_lock_bh(&in6_dev->lock); 1806 qeth_l3_add_mc6(card, in6_dev); 1807 qeth_l3_add_vlan_mc6(card); 1808 read_unlock_bh(&in6_dev->lock); 1809 in6_dev_put(in6_dev); 1810 } 1811 #endif /* CONFIG_QETH_IPV6 */ 1812 1813 static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, 1814 unsigned short vid) 1815 { 1816 struct in_device *in_dev; 1817 struct in_ifaddr *ifa; 1818 struct qeth_ipaddr *addr; 1819 1820 QETH_DBF_TEXT(TRACE, 4, "frvaddr4"); 1821 1822 in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); 1823 if (!in_dev) 1824 return; 1825 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 1826 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1827 if (addr) { 1828 addr->u.a4.addr = ifa->ifa_address; 1829 addr->u.a4.mask = ifa->ifa_mask; 1830 addr->type = QETH_IP_TYPE_NORMAL; 1831 if (!qeth_l3_delete_ip(card, addr)) 1832 kfree(addr); 1833 } 1834 } 1835 in_dev_put(in_dev); 1836 } 1837 1838 static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, 1839 unsigned short vid) 1840 { 1841 #ifdef CONFIG_QETH_IPV6 1842 struct inet6_dev *in6_dev; 1843 struct inet6_ifaddr *ifa; 1844 struct qeth_ipaddr *addr; 1845 1846 QETH_DBF_TEXT(TRACE, 4, "frvaddr6"); 1847 1848 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); 1849 if (!in6_dev) 1850 return; 1851 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) { 1852 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1853 if (addr) { 1854 memcpy(&addr->u.a6.addr, &ifa->addr, 1855 sizeof(struct in6_addr)); 1856 addr->u.a6.pfxlen = ifa->prefix_len; 1857 addr->type = QETH_IP_TYPE_NORMAL; 1858 if (!qeth_l3_delete_ip(card, addr)) 1859 kfree(addr); 1860 } 1861 } 1862 in6_dev_put(in6_dev); 1863 #endif /* CONFIG_QETH_IPV6 */ 1864 } 1865 1866 static void qeth_l3_free_vlan_addresses(struct qeth_card *card, 1867 unsigned short vid) 1868 { 1869 if (!card->vlangrp) 1870 return; 1871 qeth_l3_free_vlan_addresses4(card, vid); 1872 qeth_l3_free_vlan_addresses6(card, vid); 1873 } 1874 1875 static void qeth_l3_vlan_rx_register(struct net_device *dev, 1876 struct vlan_group *grp) 1877 { 1878 struct qeth_card *card = dev->ml_priv; 1879 unsigned long flags; 1880 1881 QETH_DBF_TEXT(TRACE, 4, "vlanreg"); 1882 spin_lock_irqsave(&card->vlanlock, flags); 1883 card->vlangrp = grp; 1884 spin_unlock_irqrestore(&card->vlanlock, flags); 1885 } 1886 1887 static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1888 { 1889 return; 1890 } 1891 1892 static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 1893 { 1894 struct qeth_card *card = dev->ml_priv; 1895 unsigned long flags; 1896 1897 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); 1898 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 1899 QETH_DBF_TEXT(TRACE, 3, "kidREC"); 1900 return; 1901 } 1902 spin_lock_irqsave(&card->vlanlock, flags); 1903 /* unregister IP addresses of vlan device */ 1904 qeth_l3_free_vlan_addresses(card, vid); 1905 vlan_group_set_device(card->vlangrp, vid, NULL); 1906 spin_unlock_irqrestore(&card->vlanlock, flags); 1907 qeth_l3_set_multicast_list(card->dev); 1908 } 1909 1910 static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, 1911 struct sk_buff *skb, struct qeth_hdr *hdr) 1912 { 1913 unsigned short vlan_id = 0; 1914 __be16 prot; 1915 struct iphdr *ip_hdr; 1916 unsigned char tg_addr[MAX_ADDR_LEN]; 1917 1918 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 1919 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : 1920 ETH_P_IP); 1921 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { 1922 case QETH_CAST_MULTICAST: 1923 switch (prot) { 1924 #ifdef CONFIG_QETH_IPV6 1925 case __constant_htons(ETH_P_IPV6): 1926 ndisc_mc_map((struct in6_addr *) 1927 skb->data + 24, 1928 tg_addr, card->dev, 0); 1929 break; 1930 #endif 1931 case __constant_htons(ETH_P_IP): 1932 ip_hdr = (struct iphdr *)skb->data; 1933 (card->dev->type == ARPHRD_IEEE802_TR) ? 1934 ip_tr_mc_map(ip_hdr->daddr, tg_addr): 1935 ip_eth_mc_map(ip_hdr->daddr, tg_addr); 1936 break; 1937 default: 1938 memcpy(tg_addr, card->dev->broadcast, 1939 card->dev->addr_len); 1940 } 1941 card->stats.multicast++; 1942 skb->pkt_type = PACKET_MULTICAST; 1943 break; 1944 case QETH_CAST_BROADCAST: 1945 memcpy(tg_addr, card->dev->broadcast, 1946 card->dev->addr_len); 1947 card->stats.multicast++; 1948 skb->pkt_type = PACKET_BROADCAST; 1949 break; 1950 case QETH_CAST_UNICAST: 1951 case QETH_CAST_ANYCAST: 1952 case QETH_CAST_NOCAST: 1953 default: 1954 skb->pkt_type = PACKET_HOST; 1955 memcpy(tg_addr, card->dev->dev_addr, 1956 card->dev->addr_len); 1957 } 1958 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 1959 card->dev->header_ops->create(skb, card->dev, prot, 1960 tg_addr, &hdr->hdr.l3.dest_addr[2], 1961 card->dev->addr_len); 1962 else 1963 card->dev->header_ops->create(skb, card->dev, prot, 1964 tg_addr, "FAKELL", card->dev->addr_len); 1965 } 1966 1967 #ifdef CONFIG_TR 1968 if (card->dev->type == ARPHRD_IEEE802_TR) 1969 skb->protocol = tr_type_trans(skb, card->dev); 1970 else 1971 #endif 1972 skb->protocol = eth_type_trans(skb, card->dev); 1973 1974 if (hdr->hdr.l3.ext_flags & 1975 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { 1976 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? 1977 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); 1978 } 1979 1980 switch (card->options.checksum_type) { 1981 case SW_CHECKSUMMING: 1982 skb->ip_summed = CHECKSUM_NONE; 1983 break; 1984 case NO_CHECKSUMMING: 1985 skb->ip_summed = CHECKSUM_UNNECESSARY; 1986 break; 1987 case HW_CHECKSUMMING: 1988 if ((hdr->hdr.l3.ext_flags & 1989 (QETH_HDR_EXT_CSUM_HDR_REQ | 1990 QETH_HDR_EXT_CSUM_TRANSP_REQ)) == 1991 (QETH_HDR_EXT_CSUM_HDR_REQ | 1992 QETH_HDR_EXT_CSUM_TRANSP_REQ)) 1993 skb->ip_summed = CHECKSUM_UNNECESSARY; 1994 else 1995 skb->ip_summed = CHECKSUM_NONE; 1996 } 1997 1998 return vlan_id; 1999 } 2000 2001 static void qeth_l3_process_inbound_buffer(struct qeth_card *card, 2002 struct qeth_qdio_buffer *buf, int index) 2003 { 2004 struct qdio_buffer_element *element; 2005 struct sk_buff *skb; 2006 struct qeth_hdr *hdr; 2007 int offset; 2008 __u16 vlan_tag = 0; 2009 unsigned int len; 2010 2011 /* get first element of current buffer */ 2012 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 2013 offset = 0; 2014 if (card->options.performance_stats) 2015 card->perf_stats.bufs_rec++; 2016 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, 2017 &offset, &hdr))) { 2018 skb->dev = card->dev; 2019 /* is device UP ? */ 2020 if (!(card->dev->flags & IFF_UP)) { 2021 dev_kfree_skb_any(skb); 2022 continue; 2023 } 2024 2025 switch (hdr->hdr.l3.id) { 2026 case QETH_HEADER_TYPE_LAYER3: 2027 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2028 len = skb->len; 2029 if (vlan_tag) 2030 if (card->vlangrp) 2031 vlan_hwaccel_rx(skb, card->vlangrp, 2032 vlan_tag); 2033 else { 2034 dev_kfree_skb_any(skb); 2035 continue; 2036 } 2037 else 2038 netif_rx(skb); 2039 break; 2040 default: 2041 dev_kfree_skb_any(skb); 2042 QETH_DBF_TEXT(TRACE, 3, "inbunkno"); 2043 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 2044 continue; 2045 } 2046 2047 card->stats.rx_packets++; 2048 card->stats.rx_bytes += len; 2049 } 2050 } 2051 2052 static int qeth_l3_verify_vlan_dev(struct net_device *dev, 2053 struct qeth_card *card) 2054 { 2055 int rc = 0; 2056 struct vlan_group *vg; 2057 int i; 2058 2059 vg = card->vlangrp; 2060 if (!vg) 2061 return rc; 2062 2063 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 2064 if (vlan_group_get_device(vg, i) == dev) { 2065 rc = QETH_VLAN_CARD; 2066 break; 2067 } 2068 } 2069 2070 if (rc && !(vlan_dev_real_dev(dev)->ml_priv == (void *)card)) 2071 return 0; 2072 2073 return rc; 2074 } 2075 2076 static int qeth_l3_verify_dev(struct net_device *dev) 2077 { 2078 struct qeth_card *card; 2079 unsigned long flags; 2080 int rc = 0; 2081 2082 read_lock_irqsave(&qeth_core_card_list.rwlock, flags); 2083 list_for_each_entry(card, &qeth_core_card_list.list, list) { 2084 if (card->dev == dev) { 2085 rc = QETH_REAL_CARD; 2086 break; 2087 } 2088 rc = qeth_l3_verify_vlan_dev(dev, card); 2089 if (rc) 2090 break; 2091 } 2092 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); 2093 2094 return rc; 2095 } 2096 2097 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) 2098 { 2099 struct qeth_card *card = NULL; 2100 int rc; 2101 2102 rc = qeth_l3_verify_dev(dev); 2103 if (rc == QETH_REAL_CARD) 2104 card = dev->ml_priv; 2105 else if (rc == QETH_VLAN_CARD) 2106 card = vlan_dev_real_dev(dev)->ml_priv; 2107 if (card && card->options.layer2) 2108 card = NULL; 2109 QETH_DBF_TEXT_(TRACE, 4, "%d", rc); 2110 return card ; 2111 } 2112 2113 static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) 2114 { 2115 int rc = 0; 2116 2117 QETH_DBF_TEXT(SETUP, 2, "stopcard"); 2118 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2119 2120 qeth_set_allowed_threads(card, 0, 1); 2121 if (card->read.state == CH_STATE_UP && 2122 card->write.state == CH_STATE_UP && 2123 (card->state == CARD_STATE_UP)) { 2124 if (recovery_mode) 2125 qeth_l3_stop(card->dev); 2126 else { 2127 if (card->dev) { 2128 rtnl_lock(); 2129 dev_close(card->dev); 2130 rtnl_unlock(); 2131 } 2132 } 2133 if (!card->use_hard_stop) { 2134 rc = qeth_send_stoplan(card); 2135 if (rc) 2136 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2137 } 2138 card->state = CARD_STATE_SOFTSETUP; 2139 } 2140 if (card->state == CARD_STATE_SOFTSETUP) { 2141 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1); 2142 qeth_clear_ipacmd_list(card); 2143 card->state = CARD_STATE_HARDSETUP; 2144 } 2145 if (card->state == CARD_STATE_HARDSETUP) { 2146 if (!card->use_hard_stop && 2147 (card->info.type != QETH_CARD_TYPE_IQD)) { 2148 rc = qeth_l3_put_unique_id(card); 2149 if (rc) 2150 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 2151 } 2152 qeth_qdio_clear_card(card, 0); 2153 qeth_clear_qdio_buffers(card); 2154 qeth_clear_working_pool_list(card); 2155 card->state = CARD_STATE_DOWN; 2156 } 2157 if (card->state == CARD_STATE_DOWN) { 2158 qeth_clear_cmd_buffers(&card->read); 2159 qeth_clear_cmd_buffers(&card->write); 2160 } 2161 card->use_hard_stop = 0; 2162 return rc; 2163 } 2164 2165 static void qeth_l3_set_multicast_list(struct net_device *dev) 2166 { 2167 struct qeth_card *card = dev->ml_priv; 2168 2169 QETH_DBF_TEXT(TRACE, 3, "setmulti"); 2170 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 2171 (card->state != CARD_STATE_UP)) 2172 return; 2173 qeth_l3_delete_mc_addresses(card); 2174 qeth_l3_add_multicast_ipv4(card); 2175 #ifdef CONFIG_QETH_IPV6 2176 qeth_l3_add_multicast_ipv6(card); 2177 #endif 2178 qeth_l3_set_ip_addr_list(card); 2179 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 2180 return; 2181 qeth_setadp_promisc_mode(card); 2182 } 2183 2184 static const char *qeth_l3_arp_get_error_cause(int *rc) 2185 { 2186 switch (*rc) { 2187 case QETH_IPA_ARP_RC_FAILED: 2188 *rc = -EIO; 2189 return "operation failed"; 2190 case QETH_IPA_ARP_RC_NOTSUPP: 2191 *rc = -EOPNOTSUPP; 2192 return "operation not supported"; 2193 case QETH_IPA_ARP_RC_OUT_OF_RANGE: 2194 *rc = -EINVAL; 2195 return "argument out of range"; 2196 case QETH_IPA_ARP_RC_Q_NOTSUPP: 2197 *rc = -EOPNOTSUPP; 2198 return "query operation not supported"; 2199 case QETH_IPA_ARP_RC_Q_NO_DATA: 2200 *rc = -ENOENT; 2201 return "no query data available"; 2202 default: 2203 return "unknown error"; 2204 } 2205 } 2206 2207 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) 2208 { 2209 int tmp; 2210 int rc; 2211 2212 QETH_DBF_TEXT(TRACE, 3, "arpstnoe"); 2213 2214 /* 2215 * currently GuestLAN only supports the ARP assist function 2216 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; 2217 * thus we say EOPNOTSUPP for this ARP function 2218 */ 2219 if (card->info.guestlan) 2220 return -EOPNOTSUPP; 2221 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2222 return -EOPNOTSUPP; 2223 } 2224 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, 2225 IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 2226 no_entries); 2227 if (rc) { 2228 tmp = rc; 2229 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " 2230 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), 2231 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2232 } 2233 return rc; 2234 } 2235 2236 static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 2237 struct qeth_arp_query_data *qdata, int entry_size, 2238 int uentry_size) 2239 { 2240 char *entry_ptr; 2241 char *uentry_ptr; 2242 int i; 2243 2244 entry_ptr = (char *)&qdata->data; 2245 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset); 2246 for (i = 0; i < qdata->no_entries; ++i) { 2247 /* strip off 32 bytes "media specific information" */ 2248 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32); 2249 entry_ptr += entry_size; 2250 uentry_ptr += uentry_size; 2251 } 2252 } 2253 2254 static int qeth_l3_arp_query_cb(struct qeth_card *card, 2255 struct qeth_reply *reply, unsigned long data) 2256 { 2257 struct qeth_ipa_cmd *cmd; 2258 struct qeth_arp_query_data *qdata; 2259 struct qeth_arp_query_info *qinfo; 2260 int entry_size; 2261 int uentry_size; 2262 int i; 2263 2264 QETH_DBF_TEXT(TRACE, 4, "arpquecb"); 2265 2266 qinfo = (struct qeth_arp_query_info *) reply->param; 2267 cmd = (struct qeth_ipa_cmd *) data; 2268 if (cmd->hdr.return_code) { 2269 QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code); 2270 return 0; 2271 } 2272 if (cmd->data.setassparms.hdr.return_code) { 2273 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 2274 QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code); 2275 return 0; 2276 } 2277 qdata = &cmd->data.setassparms.data.query_arp; 2278 switch (qdata->reply_bits) { 2279 case 5: 2280 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5); 2281 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2282 uentry_size = sizeof(struct qeth_arp_qi_entry5_short); 2283 break; 2284 case 7: 2285 /* fall through to default */ 2286 default: 2287 /* tr is the same as eth -> entry7 */ 2288 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7); 2289 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2290 uentry_size = sizeof(struct qeth_arp_qi_entry7_short); 2291 break; 2292 } 2293 /* check if there is enough room in userspace */ 2294 if ((qinfo->udata_len - qinfo->udata_offset) < 2295 qdata->no_entries * uentry_size){ 2296 QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); 2297 cmd->hdr.return_code = -ENOMEM; 2298 goto out_error; 2299 } 2300 QETH_DBF_TEXT_(TRACE, 4, "anore%i", 2301 cmd->data.setassparms.hdr.number_of_replies); 2302 QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); 2303 QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries); 2304 2305 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { 2306 /* strip off "media specific information" */ 2307 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size, 2308 uentry_size); 2309 } else 2310 /*copy entries to user buffer*/ 2311 memcpy(qinfo->udata + qinfo->udata_offset, 2312 (char *)&qdata->data, qdata->no_entries*uentry_size); 2313 2314 qinfo->no_entries += qdata->no_entries; 2315 qinfo->udata_offset += (qdata->no_entries*uentry_size); 2316 /* check if all replies received ... */ 2317 if (cmd->data.setassparms.hdr.seq_no < 2318 cmd->data.setassparms.hdr.number_of_replies) 2319 return 1; 2320 memcpy(qinfo->udata, &qinfo->no_entries, 4); 2321 /* keep STRIP_ENTRIES flag so the user program can distinguish 2322 * stripped entries from normal ones */ 2323 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2324 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 2325 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 2326 return 0; 2327 out_error: 2328 i = 0; 2329 memcpy(qinfo->udata, &i, 4); 2330 return 0; 2331 } 2332 2333 static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, 2334 struct qeth_cmd_buffer *iob, int len, 2335 int (*reply_cb)(struct qeth_card *, struct qeth_reply *, 2336 unsigned long), 2337 void *reply_param) 2338 { 2339 QETH_DBF_TEXT(TRACE, 4, "sendarp"); 2340 2341 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2342 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2343 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2344 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, 2345 reply_cb, reply_param); 2346 } 2347 2348 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 2349 { 2350 struct qeth_cmd_buffer *iob; 2351 struct qeth_arp_query_info qinfo = {0, }; 2352 int tmp; 2353 int rc; 2354 2355 QETH_DBF_TEXT(TRACE, 3, "arpquery"); 2356 2357 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 2358 IPA_ARP_PROCESSING)) { 2359 return -EOPNOTSUPP; 2360 } 2361 /* get size of userspace buffer and mask_bits -> 6 bytes */ 2362 if (copy_from_user(&qinfo, udata, 6)) 2363 return -EFAULT; 2364 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 2365 if (!qinfo.udata) 2366 return -ENOMEM; 2367 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 2368 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2369 IPA_CMD_ASS_ARP_QUERY_INFO, 2370 sizeof(int), QETH_PROT_IPV4); 2371 2372 rc = qeth_l3_send_ipa_arp_cmd(card, iob, 2373 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, 2374 qeth_l3_arp_query_cb, (void *)&qinfo); 2375 if (rc) { 2376 tmp = rc; 2377 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s " 2378 "(0x%x/%d)\n", QETH_CARD_IFNAME(card), 2379 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2380 if (copy_to_user(udata, qinfo.udata, 4)) 2381 rc = -EFAULT; 2382 } else { 2383 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 2384 rc = -EFAULT; 2385 } 2386 kfree(qinfo.udata); 2387 return rc; 2388 } 2389 2390 static int qeth_l3_arp_add_entry(struct qeth_card *card, 2391 struct qeth_arp_cache_entry *entry) 2392 { 2393 struct qeth_cmd_buffer *iob; 2394 char buf[16]; 2395 int tmp; 2396 int rc; 2397 2398 QETH_DBF_TEXT(TRACE, 3, "arpadent"); 2399 2400 /* 2401 * currently GuestLAN only supports the ARP assist function 2402 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; 2403 * thus we say EOPNOTSUPP for this ARP function 2404 */ 2405 if (card->info.guestlan) 2406 return -EOPNOTSUPP; 2407 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2408 return -EOPNOTSUPP; 2409 } 2410 2411 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2412 IPA_CMD_ASS_ARP_ADD_ENTRY, 2413 sizeof(struct qeth_arp_cache_entry), 2414 QETH_PROT_IPV4); 2415 rc = qeth_l3_send_setassparms(card, iob, 2416 sizeof(struct qeth_arp_cache_entry), 2417 (unsigned long) entry, 2418 qeth_l3_default_setassparms_cb, NULL); 2419 if (rc) { 2420 tmp = rc; 2421 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); 2422 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s " 2423 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), 2424 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2425 } 2426 return rc; 2427 } 2428 2429 static int qeth_l3_arp_remove_entry(struct qeth_card *card, 2430 struct qeth_arp_cache_entry *entry) 2431 { 2432 struct qeth_cmd_buffer *iob; 2433 char buf[16] = {0, }; 2434 int tmp; 2435 int rc; 2436 2437 QETH_DBF_TEXT(TRACE, 3, "arprment"); 2438 2439 /* 2440 * currently GuestLAN only supports the ARP assist function 2441 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY; 2442 * thus we say EOPNOTSUPP for this ARP function 2443 */ 2444 if (card->info.guestlan) 2445 return -EOPNOTSUPP; 2446 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2447 return -EOPNOTSUPP; 2448 } 2449 memcpy(buf, entry, 12); 2450 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2451 IPA_CMD_ASS_ARP_REMOVE_ENTRY, 2452 12, 2453 QETH_PROT_IPV4); 2454 rc = qeth_l3_send_setassparms(card, iob, 2455 12, (unsigned long)buf, 2456 qeth_l3_default_setassparms_cb, NULL); 2457 if (rc) { 2458 tmp = rc; 2459 memset(buf, 0, 16); 2460 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); 2461 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s" 2462 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), 2463 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2464 } 2465 return rc; 2466 } 2467 2468 static int qeth_l3_arp_flush_cache(struct qeth_card *card) 2469 { 2470 int rc; 2471 int tmp; 2472 2473 QETH_DBF_TEXT(TRACE, 3, "arpflush"); 2474 2475 /* 2476 * currently GuestLAN only supports the ARP assist function 2477 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; 2478 * thus we say EOPNOTSUPP for this ARP function 2479 */ 2480 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) 2481 return -EOPNOTSUPP; 2482 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2483 return -EOPNOTSUPP; 2484 } 2485 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, 2486 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); 2487 if (rc) { 2488 tmp = rc; 2489 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " 2490 "(0x%x/%d)\n", QETH_CARD_IFNAME(card), 2491 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2492 } 2493 return rc; 2494 } 2495 2496 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2497 { 2498 struct qeth_card *card = dev->ml_priv; 2499 struct qeth_arp_cache_entry arp_entry; 2500 struct mii_ioctl_data *mii_data; 2501 int rc = 0; 2502 2503 if (!card) 2504 return -ENODEV; 2505 2506 if ((card->state != CARD_STATE_UP) && 2507 (card->state != CARD_STATE_SOFTSETUP)) 2508 return -ENODEV; 2509 2510 switch (cmd) { 2511 case SIOC_QETH_ARP_SET_NO_ENTRIES: 2512 if (!capable(CAP_NET_ADMIN)) { 2513 rc = -EPERM; 2514 break; 2515 } 2516 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); 2517 break; 2518 case SIOC_QETH_ARP_QUERY_INFO: 2519 if (!capable(CAP_NET_ADMIN)) { 2520 rc = -EPERM; 2521 break; 2522 } 2523 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); 2524 break; 2525 case SIOC_QETH_ARP_ADD_ENTRY: 2526 if (!capable(CAP_NET_ADMIN)) { 2527 rc = -EPERM; 2528 break; 2529 } 2530 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, 2531 sizeof(struct qeth_arp_cache_entry))) 2532 rc = -EFAULT; 2533 else 2534 rc = qeth_l3_arp_add_entry(card, &arp_entry); 2535 break; 2536 case SIOC_QETH_ARP_REMOVE_ENTRY: 2537 if (!capable(CAP_NET_ADMIN)) { 2538 rc = -EPERM; 2539 break; 2540 } 2541 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, 2542 sizeof(struct qeth_arp_cache_entry))) 2543 rc = -EFAULT; 2544 else 2545 rc = qeth_l3_arp_remove_entry(card, &arp_entry); 2546 break; 2547 case SIOC_QETH_ARP_FLUSH_CACHE: 2548 if (!capable(CAP_NET_ADMIN)) { 2549 rc = -EPERM; 2550 break; 2551 } 2552 rc = qeth_l3_arp_flush_cache(card); 2553 break; 2554 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 2555 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 2556 break; 2557 case SIOC_QETH_GET_CARD_TYPE: 2558 if ((card->info.type == QETH_CARD_TYPE_OSAE) && 2559 !card->info.guestlan) 2560 return 1; 2561 return 0; 2562 break; 2563 case SIOCGMIIPHY: 2564 mii_data = if_mii(rq); 2565 mii_data->phy_id = 0; 2566 break; 2567 case SIOCGMIIREG: 2568 mii_data = if_mii(rq); 2569 if (mii_data->phy_id != 0) 2570 rc = -EINVAL; 2571 else 2572 mii_data->val_out = qeth_mdio_read(dev, 2573 mii_data->phy_id, 2574 mii_data->reg_num); 2575 break; 2576 default: 2577 rc = -EOPNOTSUPP; 2578 } 2579 if (rc) 2580 QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); 2581 return rc; 2582 } 2583 2584 int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) 2585 { 2586 int cast_type = RTN_UNSPEC; 2587 2588 if (skb_dst(skb) && skb_dst(skb)->neighbour) { 2589 cast_type = skb_dst(skb)->neighbour->type; 2590 if ((cast_type == RTN_BROADCAST) || 2591 (cast_type == RTN_MULTICAST) || 2592 (cast_type == RTN_ANYCAST)) 2593 return cast_type; 2594 else 2595 return RTN_UNSPEC; 2596 } 2597 /* try something else */ 2598 if (skb->protocol == ETH_P_IPV6) 2599 return (skb_network_header(skb)[24] == 0xff) ? 2600 RTN_MULTICAST : 0; 2601 else if (skb->protocol == ETH_P_IP) 2602 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? 2603 RTN_MULTICAST : 0; 2604 /* ... */ 2605 if (!memcmp(skb->data, skb->dev->broadcast, 6)) 2606 return RTN_BROADCAST; 2607 else { 2608 u16 hdr_mac; 2609 2610 hdr_mac = *((u16 *)skb->data); 2611 /* tr multicast? */ 2612 switch (card->info.link_type) { 2613 case QETH_LINK_TYPE_HSTR: 2614 case QETH_LINK_TYPE_LANE_TR: 2615 if ((hdr_mac == QETH_TR_MAC_NC) || 2616 (hdr_mac == QETH_TR_MAC_C)) 2617 return RTN_MULTICAST; 2618 break; 2619 /* eth or so multicast? */ 2620 default: 2621 if ((hdr_mac == QETH_ETH_MAC_V4) || 2622 (hdr_mac == QETH_ETH_MAC_V6)) 2623 return RTN_MULTICAST; 2624 } 2625 } 2626 return cast_type; 2627 } 2628 2629 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2630 struct sk_buff *skb, int ipv, int cast_type) 2631 { 2632 memset(hdr, 0, sizeof(struct qeth_hdr)); 2633 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2634 hdr->hdr.l3.ext_flags = 0; 2635 2636 /* 2637 * before we're going to overwrite this location with next hop ip. 2638 * v6 uses passthrough, v4 sets the tag in the QDIO header. 2639 */ 2640 if (card->vlangrp && vlan_tx_tag_present(skb)) { 2641 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) 2642 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; 2643 else 2644 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; 2645 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); 2646 } 2647 2648 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); 2649 if (ipv == 4) { 2650 /* IPv4 */ 2651 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); 2652 memset(hdr->hdr.l3.dest_addr, 0, 12); 2653 if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { 2654 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = 2655 *((u32 *) skb_dst(skb)->neighbour->primary_key); 2656 } else { 2657 /* fill in destination address used in ip header */ 2658 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = 2659 ip_hdr(skb)->daddr; 2660 } 2661 } else if (ipv == 6) { 2662 /* IPv6 */ 2663 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type); 2664 if (card->info.type == QETH_CARD_TYPE_IQD) 2665 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; 2666 if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { 2667 memcpy(hdr->hdr.l3.dest_addr, 2668 skb_dst(skb)->neighbour->primary_key, 16); 2669 } else { 2670 /* fill in destination address used in ip header */ 2671 memcpy(hdr->hdr.l3.dest_addr, 2672 &ipv6_hdr(skb)->daddr, 16); 2673 } 2674 } else { 2675 /* passthrough */ 2676 if ((skb->dev->type == ARPHRD_IEEE802_TR) && 2677 !memcmp(skb->data + sizeof(struct qeth_hdr) + 2678 sizeof(__u16), skb->dev->broadcast, 6)) { 2679 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | 2680 QETH_HDR_PASSTHRU; 2681 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr), 2682 skb->dev->broadcast, 6)) { 2683 /* broadcast? */ 2684 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | 2685 QETH_HDR_PASSTHRU; 2686 } else { 2687 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ? 2688 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU : 2689 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; 2690 } 2691 } 2692 } 2693 2694 static void qeth_tso_fill_header(struct qeth_card *card, 2695 struct qeth_hdr *qhdr, struct sk_buff *skb) 2696 { 2697 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; 2698 struct tcphdr *tcph = tcp_hdr(skb); 2699 struct iphdr *iph = ip_hdr(skb); 2700 struct ipv6hdr *ip6h = ipv6_hdr(skb); 2701 2702 /*fix header to TSO values ...*/ 2703 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 2704 /*set values which are fix for the first approach ...*/ 2705 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 2706 hdr->ext.imb_hdr_no = 1; 2707 hdr->ext.hdr_type = 1; 2708 hdr->ext.hdr_version = 1; 2709 hdr->ext.hdr_len = 28; 2710 /*insert non-fix values */ 2711 hdr->ext.mss = skb_shinfo(skb)->gso_size; 2712 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); 2713 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - 2714 sizeof(struct qeth_hdr_tso)); 2715 tcph->check = 0; 2716 if (skb->protocol == ETH_P_IPV6) { 2717 ip6h->payload_len = 0; 2718 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 2719 0, IPPROTO_TCP, 0); 2720 } else { 2721 /*OSA want us to set these values ...*/ 2722 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 2723 0, IPPROTO_TCP, 0); 2724 iph->tot_len = 0; 2725 iph->check = 0; 2726 } 2727 } 2728 2729 static void qeth_tx_csum(struct sk_buff *skb) 2730 { 2731 __wsum csum; 2732 int offset; 2733 2734 skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb)); 2735 offset = skb->csum_start - skb_headroom(skb); 2736 BUG_ON(offset >= skb_headlen(skb)); 2737 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2738 2739 offset += skb->csum_offset; 2740 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2741 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2742 } 2743 2744 static inline int qeth_l3_tso_elements(struct sk_buff *skb) 2745 { 2746 unsigned long tcpd = (unsigned long)tcp_hdr(skb) + 2747 tcp_hdr(skb)->doff * 4; 2748 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); 2749 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd); 2750 elements += skb_shinfo(skb)->nr_frags; 2751 return elements; 2752 } 2753 2754 static inline int qeth_l3_tso_check(struct sk_buff *skb) 2755 { 2756 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) - 2757 (unsigned long)skb->data; 2758 return (((unsigned long)skb->data & PAGE_MASK) != 2759 (((unsigned long)skb->data + len) & PAGE_MASK)); 2760 } 2761 2762 static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2763 { 2764 int rc; 2765 u16 *tag; 2766 struct qeth_hdr *hdr = NULL; 2767 int elements_needed = 0; 2768 int elems; 2769 struct qeth_card *card = dev->ml_priv; 2770 struct sk_buff *new_skb = NULL; 2771 int ipv = qeth_get_ip_version(skb); 2772 int cast_type = qeth_l3_get_cast_type(card, skb); 2773 struct qeth_qdio_out_q *queue = card->qdio.out_qs 2774 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 2775 int tx_bytes = skb->len; 2776 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2777 int data_offset = -1; 2778 int nr_frags; 2779 2780 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2781 (skb->protocol != htons(ETH_P_IPV6)) && 2782 (skb->protocol != htons(ETH_P_IP))) 2783 goto tx_drop; 2784 2785 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2786 card->stats.tx_carrier_errors++; 2787 goto tx_drop; 2788 } 2789 2790 if ((cast_type == RTN_BROADCAST) && 2791 (card->info.broadcast_capable == 0)) 2792 goto tx_drop; 2793 2794 if (card->options.performance_stats) { 2795 card->perf_stats.outbound_cnt++; 2796 card->perf_stats.outbound_start_time = qeth_get_micros(); 2797 } 2798 2799 if (skb_is_gso(skb)) 2800 large_send = card->options.large_send; 2801 else 2802 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2803 qeth_tx_csum(skb); 2804 if (card->options.performance_stats) 2805 card->perf_stats.tx_csum++; 2806 } 2807 2808 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2809 (skb_shinfo(skb)->nr_frags == 0)) { 2810 new_skb = skb; 2811 data_offset = ETH_HLEN; 2812 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2813 if (!hdr) 2814 goto tx_drop; 2815 elements_needed++; 2816 } else { 2817 /* create a clone with writeable headroom */ 2818 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) 2819 + VLAN_HLEN); 2820 if (!new_skb) 2821 goto tx_drop; 2822 } 2823 2824 if (card->info.type == QETH_CARD_TYPE_IQD) { 2825 if (data_offset < 0) 2826 skb_pull(new_skb, ETH_HLEN); 2827 } else { 2828 if (new_skb->protocol == htons(ETH_P_IP)) { 2829 if (card->dev->type == ARPHRD_IEEE802_TR) 2830 skb_pull(new_skb, TR_HLEN); 2831 else 2832 skb_pull(new_skb, ETH_HLEN); 2833 } 2834 2835 if (new_skb->protocol == ETH_P_IPV6 && card->vlangrp && 2836 vlan_tx_tag_present(new_skb)) { 2837 skb_push(new_skb, VLAN_HLEN); 2838 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); 2839 skb_copy_to_linear_data_offset(new_skb, 4, 2840 new_skb->data + 8, 4); 2841 skb_copy_to_linear_data_offset(new_skb, 8, 2842 new_skb->data + 12, 4); 2843 tag = (u16 *)(new_skb->data + 12); 2844 *tag = __constant_htons(ETH_P_8021Q); 2845 *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); 2846 new_skb->vlan_tci = 0; 2847 } 2848 } 2849 2850 netif_stop_queue(dev); 2851 2852 /* fix hardware limitation: as long as we do not have sbal 2853 * chaining we can not send long frag lists 2854 */ 2855 if (large_send == QETH_LARGE_SEND_TSO) { 2856 if (qeth_l3_tso_elements(new_skb) + 1 > 16) { 2857 if (skb_linearize(new_skb)) 2858 goto tx_drop; 2859 if (card->options.performance_stats) 2860 card->perf_stats.tx_lin++; 2861 } 2862 } 2863 2864 if ((large_send == QETH_LARGE_SEND_TSO) && 2865 (cast_type == RTN_UNSPEC)) { 2866 hdr = (struct qeth_hdr *)skb_push(new_skb, 2867 sizeof(struct qeth_hdr_tso)); 2868 if (qeth_l3_tso_check(new_skb)) 2869 QETH_DBF_MESSAGE(2, "tso skb misaligned\n"); 2870 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2871 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2872 qeth_tso_fill_header(card, hdr, new_skb); 2873 elements_needed++; 2874 } else { 2875 if (data_offset < 0) { 2876 hdr = (struct qeth_hdr *)skb_push(new_skb, 2877 sizeof(struct qeth_hdr)); 2878 qeth_l3_fill_header(card, hdr, new_skb, ipv, 2879 cast_type); 2880 } else { 2881 qeth_l3_fill_header(card, hdr, new_skb, ipv, 2882 cast_type); 2883 hdr->hdr.l3.length = new_skb->len - data_offset; 2884 } 2885 } 2886 2887 elems = qeth_get_elements_no(card, (void *)hdr, new_skb, 2888 elements_needed); 2889 if (!elems) { 2890 if (data_offset >= 0) 2891 kmem_cache_free(qeth_core_header_cache, hdr); 2892 goto tx_drop; 2893 } 2894 elements_needed += elems; 2895 nr_frags = skb_shinfo(new_skb)->nr_frags; 2896 2897 if (card->info.type != QETH_CARD_TYPE_IQD) 2898 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 2899 elements_needed); 2900 else 2901 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 2902 elements_needed, data_offset, 0); 2903 2904 if (!rc) { 2905 card->stats.tx_packets++; 2906 card->stats.tx_bytes += tx_bytes; 2907 if (new_skb != skb) 2908 dev_kfree_skb_any(skb); 2909 if (card->options.performance_stats) { 2910 if (large_send != QETH_LARGE_SEND_NO) { 2911 card->perf_stats.large_send_bytes += tx_bytes; 2912 card->perf_stats.large_send_cnt++; 2913 } 2914 if (nr_frags) { 2915 card->perf_stats.sg_skbs_sent++; 2916 /* nr_frags + skb->data */ 2917 card->perf_stats.sg_frags_sent += nr_frags + 1; 2918 } 2919 } 2920 rc = NETDEV_TX_OK; 2921 } else { 2922 if (data_offset >= 0) 2923 kmem_cache_free(qeth_core_header_cache, hdr); 2924 2925 if (rc == -EBUSY) { 2926 if (new_skb != skb) 2927 dev_kfree_skb_any(new_skb); 2928 return NETDEV_TX_BUSY; 2929 } else 2930 goto tx_drop; 2931 } 2932 2933 netif_wake_queue(dev); 2934 if (card->options.performance_stats) 2935 card->perf_stats.outbound_time += qeth_get_micros() - 2936 card->perf_stats.outbound_start_time; 2937 return rc; 2938 2939 tx_drop: 2940 card->stats.tx_dropped++; 2941 card->stats.tx_errors++; 2942 if ((new_skb != skb) && new_skb) 2943 dev_kfree_skb_any(new_skb); 2944 dev_kfree_skb_any(skb); 2945 netif_wake_queue(dev); 2946 return NETDEV_TX_OK; 2947 } 2948 2949 static int qeth_l3_open(struct net_device *dev) 2950 { 2951 struct qeth_card *card = dev->ml_priv; 2952 2953 QETH_DBF_TEXT(TRACE, 4, "qethopen"); 2954 if (card->state != CARD_STATE_SOFTSETUP) 2955 return -ENODEV; 2956 card->data.state = CH_STATE_UP; 2957 card->state = CARD_STATE_UP; 2958 netif_start_queue(dev); 2959 2960 if (!card->lan_online && netif_carrier_ok(dev)) 2961 netif_carrier_off(dev); 2962 return 0; 2963 } 2964 2965 static int qeth_l3_stop(struct net_device *dev) 2966 { 2967 struct qeth_card *card = dev->ml_priv; 2968 2969 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 2970 netif_tx_disable(dev); 2971 if (card->state == CARD_STATE_UP) 2972 card->state = CARD_STATE_SOFTSETUP; 2973 return 0; 2974 } 2975 2976 static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev) 2977 { 2978 struct qeth_card *card = dev->ml_priv; 2979 2980 return (card->options.checksum_type == HW_CHECKSUMMING); 2981 } 2982 2983 static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2984 { 2985 struct qeth_card *card = dev->ml_priv; 2986 enum qeth_checksum_types csum_type; 2987 2988 if (data) 2989 csum_type = HW_CHECKSUMMING; 2990 else 2991 csum_type = SW_CHECKSUMMING; 2992 2993 return qeth_l3_set_rx_csum(card, csum_type); 2994 } 2995 2996 static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2997 { 2998 struct qeth_card *card = dev->ml_priv; 2999 int rc = 0; 3000 3001 if (data) { 3002 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO); 3003 } else { 3004 dev->features &= ~NETIF_F_TSO; 3005 card->options.large_send = QETH_LARGE_SEND_NO; 3006 } 3007 return rc; 3008 } 3009 3010 static const struct ethtool_ops qeth_l3_ethtool_ops = { 3011 .get_link = ethtool_op_get_link, 3012 .get_tx_csum = ethtool_op_get_tx_csum, 3013 .set_tx_csum = ethtool_op_set_tx_hw_csum, 3014 .get_rx_csum = qeth_l3_ethtool_get_rx_csum, 3015 .set_rx_csum = qeth_l3_ethtool_set_rx_csum, 3016 .get_sg = ethtool_op_get_sg, 3017 .set_sg = ethtool_op_set_sg, 3018 .get_tso = ethtool_op_get_tso, 3019 .set_tso = qeth_l3_ethtool_set_tso, 3020 .get_strings = qeth_core_get_strings, 3021 .get_ethtool_stats = qeth_core_get_ethtool_stats, 3022 .get_sset_count = qeth_core_get_sset_count, 3023 .get_drvinfo = qeth_core_get_drvinfo, 3024 .get_settings = qeth_core_ethtool_get_settings, 3025 }; 3026 3027 /* 3028 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting 3029 * NOARP on the netdevice is no option because it also turns off neighbor 3030 * solicitation. For IPv4 we install a neighbor_setup function. We don't want 3031 * arp resolution but we want the hard header (packet socket will work 3032 * e.g. tcpdump) 3033 */ 3034 static int qeth_l3_neigh_setup_noarp(struct neighbour *n) 3035 { 3036 n->nud_state = NUD_NOARP; 3037 memcpy(n->ha, "FAKELL", 6); 3038 n->output = n->ops->connected_output; 3039 return 0; 3040 } 3041 3042 static int 3043 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) 3044 { 3045 if (np->tbl->family == AF_INET) 3046 np->neigh_setup = qeth_l3_neigh_setup_noarp; 3047 3048 return 0; 3049 } 3050 3051 static const struct net_device_ops qeth_l3_netdev_ops = { 3052 .ndo_open = qeth_l3_open, 3053 .ndo_stop = qeth_l3_stop, 3054 .ndo_get_stats = qeth_get_stats, 3055 .ndo_start_xmit = qeth_l3_hard_start_xmit, 3056 .ndo_validate_addr = eth_validate_addr, 3057 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3058 .ndo_do_ioctl = qeth_l3_do_ioctl, 3059 .ndo_change_mtu = qeth_change_mtu, 3060 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 3061 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3062 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3063 .ndo_tx_timeout = qeth_tx_timeout, 3064 }; 3065 3066 static const struct net_device_ops qeth_l3_osa_netdev_ops = { 3067 .ndo_open = qeth_l3_open, 3068 .ndo_stop = qeth_l3_stop, 3069 .ndo_get_stats = qeth_get_stats, 3070 .ndo_start_xmit = qeth_l3_hard_start_xmit, 3071 .ndo_validate_addr = eth_validate_addr, 3072 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3073 .ndo_do_ioctl = qeth_l3_do_ioctl, 3074 .ndo_change_mtu = qeth_change_mtu, 3075 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 3076 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3077 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3078 .ndo_tx_timeout = qeth_tx_timeout, 3079 .ndo_neigh_setup = qeth_l3_neigh_setup, 3080 }; 3081 3082 static int qeth_l3_setup_netdev(struct qeth_card *card) 3083 { 3084 if (card->info.type == QETH_CARD_TYPE_OSAE) { 3085 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 3086 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 3087 #ifdef CONFIG_TR 3088 card->dev = alloc_trdev(0); 3089 #endif 3090 if (!card->dev) 3091 return -ENODEV; 3092 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3093 } else { 3094 card->dev = alloc_etherdev(0); 3095 if (!card->dev) 3096 return -ENODEV; 3097 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; 3098 3099 /*IPv6 address autoconfiguration stuff*/ 3100 qeth_l3_get_unique_id(card); 3101 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 3102 card->dev->dev_id = card->info.unique_id & 3103 0xffff; 3104 } 3105 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 3106 card->dev = alloc_netdev(0, "hsi%d", ether_setup); 3107 if (!card->dev) 3108 return -ENODEV; 3109 card->dev->flags |= IFF_NOARP; 3110 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3111 qeth_l3_iqd_read_initial_mac(card); 3112 } else 3113 return -ENODEV; 3114 3115 card->dev->ml_priv = card; 3116 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 3117 card->dev->mtu = card->info.initial_mtu; 3118 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); 3119 card->dev->features |= NETIF_F_HW_VLAN_TX | 3120 NETIF_F_HW_VLAN_RX | 3121 NETIF_F_HW_VLAN_FILTER; 3122 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3123 card->dev->gso_max_size = 15 * PAGE_SIZE; 3124 3125 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3126 return register_netdev(card->dev); 3127 } 3128 3129 static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, 3130 unsigned int qdio_err, unsigned int queue, int first_element, 3131 int count, unsigned long card_ptr) 3132 { 3133 struct net_device *net_dev; 3134 struct qeth_card *card; 3135 struct qeth_qdio_buffer *buffer; 3136 int index; 3137 int i; 3138 3139 card = (struct qeth_card *) card_ptr; 3140 net_dev = card->dev; 3141 if (card->options.performance_stats) { 3142 card->perf_stats.inbound_cnt++; 3143 card->perf_stats.inbound_start_time = qeth_get_micros(); 3144 } 3145 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { 3146 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 3147 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 3148 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", 3149 first_element, count); 3150 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); 3151 qeth_schedule_recovery(card); 3152 return; 3153 } 3154 for (i = first_element; i < (first_element + count); ++i) { 3155 index = i % QDIO_MAX_BUFFERS_PER_Q; 3156 buffer = &card->qdio.in_q->bufs[index]; 3157 if (!(qdio_err && 3158 qeth_check_qdio_errors(buffer->buffer, 3159 qdio_err, "qinerr"))) 3160 qeth_l3_process_inbound_buffer(card, buffer, index); 3161 /* clear buffer and give back to hardware */ 3162 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 3163 qeth_queue_input_buffer(card, index); 3164 } 3165 if (card->options.performance_stats) 3166 card->perf_stats.inbound_time += qeth_get_micros() - 3167 card->perf_stats.inbound_start_time; 3168 } 3169 3170 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3171 { 3172 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3173 3174 qeth_l3_create_device_attributes(&gdev->dev); 3175 card->options.layer2 = 0; 3176 card->discipline.input_handler = (qdio_handler_t *) 3177 qeth_l3_qdio_input_handler; 3178 card->discipline.output_handler = (qdio_handler_t *) 3179 qeth_qdio_output_handler; 3180 card->discipline.recover = qeth_l3_recover; 3181 return 0; 3182 } 3183 3184 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) 3185 { 3186 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 3187 3188 qeth_set_allowed_threads(card, 0, 1); 3189 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 3190 3191 if (cgdev->state == CCWGROUP_ONLINE) { 3192 card->use_hard_stop = 1; 3193 qeth_l3_set_offline(cgdev); 3194 } 3195 3196 if (card->dev) { 3197 unregister_netdev(card->dev); 3198 card->dev = NULL; 3199 } 3200 3201 qeth_l3_remove_device_attributes(&cgdev->dev); 3202 qeth_l3_clear_ip_list(card, 0, 0); 3203 qeth_l3_clear_ipato_list(card); 3204 return; 3205 } 3206 3207 static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) 3208 { 3209 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3210 int rc = 0; 3211 enum qeth_card_states recover_flag; 3212 3213 BUG_ON(!card); 3214 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 3215 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 3216 3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3218 3219 recover_flag = card->state; 3220 rc = qeth_core_hardsetup_card(card); 3221 if (rc) { 3222 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3223 rc = -ENODEV; 3224 goto out_remove; 3225 } 3226 3227 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3228 3229 if (!card->dev && qeth_l3_setup_netdev(card)) { 3230 rc = -ENODEV; 3231 goto out_remove; 3232 } 3233 3234 card->state = CARD_STATE_HARDSETUP; 3235 qeth_print_status_message(card); 3236 3237 /* softsetup */ 3238 QETH_DBF_TEXT(SETUP, 2, "softsetp"); 3239 3240 rc = qeth_send_startlan(card); 3241 if (rc) { 3242 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3243 if (rc == 0xe080) { 3244 dev_warn(&card->gdev->dev, 3245 "The LAN is offline\n"); 3246 card->lan_online = 0; 3247 return 0; 3248 } 3249 rc = -ENODEV; 3250 goto out_remove; 3251 } else 3252 card->lan_online = 1; 3253 qeth_l3_set_large_send(card, card->options.large_send); 3254 3255 rc = qeth_l3_setadapter_parms(card); 3256 if (rc) 3257 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3258 rc = qeth_l3_start_ipassists(card); 3259 if (rc) 3260 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 3261 rc = qeth_l3_setrouting_v4(card); 3262 if (rc) 3263 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3264 rc = qeth_l3_setrouting_v6(card); 3265 if (rc) 3266 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 3267 netif_tx_disable(card->dev); 3268 3269 rc = qeth_init_qdio_queues(card); 3270 if (rc) { 3271 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3272 rc = -ENODEV; 3273 goto out_remove; 3274 } 3275 card->state = CARD_STATE_SOFTSETUP; 3276 netif_carrier_on(card->dev); 3277 3278 qeth_set_allowed_threads(card, 0xffffffff, 0); 3279 qeth_l3_set_ip_addr_list(card); 3280 if (recover_flag == CARD_STATE_RECOVER) { 3281 if (recovery_mode) 3282 qeth_l3_open(card->dev); 3283 else { 3284 rtnl_lock(); 3285 dev_open(card->dev); 3286 rtnl_unlock(); 3287 } 3288 qeth_l3_set_multicast_list(card->dev); 3289 } 3290 /* let user_space know that device is online */ 3291 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3292 return 0; 3293 out_remove: 3294 card->use_hard_stop = 1; 3295 qeth_l3_stop_card(card, 0); 3296 ccw_device_set_offline(CARD_DDEV(card)); 3297 ccw_device_set_offline(CARD_WDEV(card)); 3298 ccw_device_set_offline(CARD_RDEV(card)); 3299 if (recover_flag == CARD_STATE_RECOVER) 3300 card->state = CARD_STATE_RECOVER; 3301 else 3302 card->state = CARD_STATE_DOWN; 3303 return rc; 3304 } 3305 3306 static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3307 { 3308 return __qeth_l3_set_online(gdev, 0); 3309 } 3310 3311 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, 3312 int recovery_mode) 3313 { 3314 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 3315 int rc = 0, rc2 = 0, rc3 = 0; 3316 enum qeth_card_states recover_flag; 3317 3318 QETH_DBF_TEXT(SETUP, 3, "setoffl"); 3319 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); 3320 3321 if (card->dev && netif_carrier_ok(card->dev)) 3322 netif_carrier_off(card->dev); 3323 recover_flag = card->state; 3324 qeth_l3_stop_card(card, recovery_mode); 3325 rc = ccw_device_set_offline(CARD_DDEV(card)); 3326 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 3327 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 3328 if (!rc) 3329 rc = (rc2) ? rc2 : rc3; 3330 if (rc) 3331 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3332 if (recover_flag == CARD_STATE_UP) 3333 card->state = CARD_STATE_RECOVER; 3334 /* let user_space know that device is offline */ 3335 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); 3336 return 0; 3337 } 3338 3339 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) 3340 { 3341 return __qeth_l3_set_offline(cgdev, 0); 3342 } 3343 3344 static int qeth_l3_recover(void *ptr) 3345 { 3346 struct qeth_card *card; 3347 int rc = 0; 3348 3349 card = (struct qeth_card *) ptr; 3350 QETH_DBF_TEXT(TRACE, 2, "recover1"); 3351 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); 3352 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 3353 return 0; 3354 QETH_DBF_TEXT(TRACE, 2, "recover2"); 3355 dev_warn(&card->gdev->dev, 3356 "A recovery process has been started for the device\n"); 3357 card->use_hard_stop = 1; 3358 __qeth_l3_set_offline(card->gdev, 1); 3359 rc = __qeth_l3_set_online(card->gdev, 1); 3360 /* don't run another scheduled recovery */ 3361 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 3362 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 3363 if (!rc) 3364 dev_info(&card->gdev->dev, 3365 "Device successfully recovered!\n"); 3366 else { 3367 rtnl_lock(); 3368 dev_close(card->dev); 3369 rtnl_unlock(); 3370 dev_warn(&card->gdev->dev, "The qeth device driver " 3371 "failed to recover an error on the device\n"); 3372 } 3373 return 0; 3374 } 3375 3376 static void qeth_l3_shutdown(struct ccwgroup_device *gdev) 3377 { 3378 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3379 qeth_l3_clear_ip_list(card, 0, 0); 3380 qeth_qdio_clear_card(card, 0); 3381 qeth_clear_qdio_buffers(card); 3382 } 3383 3384 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) 3385 { 3386 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3387 3388 if (card->dev) 3389 netif_device_detach(card->dev); 3390 qeth_set_allowed_threads(card, 0, 1); 3391 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 3392 if (gdev->state == CCWGROUP_OFFLINE) 3393 return 0; 3394 if (card->state == CARD_STATE_UP) { 3395 card->use_hard_stop = 1; 3396 __qeth_l3_set_offline(card->gdev, 1); 3397 } else 3398 __qeth_l3_set_offline(card->gdev, 0); 3399 return 0; 3400 } 3401 3402 static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) 3403 { 3404 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3405 int rc = 0; 3406 3407 if (gdev->state == CCWGROUP_OFFLINE) 3408 goto out; 3409 3410 if (card->state == CARD_STATE_RECOVER) { 3411 rc = __qeth_l3_set_online(card->gdev, 1); 3412 if (rc) { 3413 if (card->dev) { 3414 rtnl_lock(); 3415 dev_close(card->dev); 3416 rtnl_unlock(); 3417 } 3418 } 3419 } else 3420 rc = __qeth_l3_set_online(card->gdev, 0); 3421 out: 3422 qeth_set_allowed_threads(card, 0xffffffff, 0); 3423 if (card->dev) 3424 netif_device_attach(card->dev); 3425 if (rc) 3426 dev_warn(&card->gdev->dev, "The qeth device driver " 3427 "failed to recover an error on the device\n"); 3428 return rc; 3429 } 3430 3431 struct ccwgroup_driver qeth_l3_ccwgroup_driver = { 3432 .probe = qeth_l3_probe_device, 3433 .remove = qeth_l3_remove_device, 3434 .set_online = qeth_l3_set_online, 3435 .set_offline = qeth_l3_set_offline, 3436 .shutdown = qeth_l3_shutdown, 3437 .freeze = qeth_l3_pm_suspend, 3438 .thaw = qeth_l3_pm_resume, 3439 .restore = qeth_l3_pm_resume, 3440 }; 3441 EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver); 3442 3443 static int qeth_l3_ip_event(struct notifier_block *this, 3444 unsigned long event, void *ptr) 3445 { 3446 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 3447 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev; 3448 struct qeth_ipaddr *addr; 3449 struct qeth_card *card; 3450 3451 if (dev_net(dev) != &init_net) 3452 return NOTIFY_DONE; 3453 3454 QETH_DBF_TEXT(TRACE, 3, "ipevent"); 3455 card = qeth_l3_get_card_from_dev(dev); 3456 if (!card) 3457 return NOTIFY_DONE; 3458 3459 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 3460 if (addr != NULL) { 3461 addr->u.a4.addr = ifa->ifa_address; 3462 addr->u.a4.mask = ifa->ifa_mask; 3463 addr->type = QETH_IP_TYPE_NORMAL; 3464 } else 3465 goto out; 3466 3467 switch (event) { 3468 case NETDEV_UP: 3469 if (!qeth_l3_add_ip(card, addr)) 3470 kfree(addr); 3471 break; 3472 case NETDEV_DOWN: 3473 if (!qeth_l3_delete_ip(card, addr)) 3474 kfree(addr); 3475 break; 3476 default: 3477 break; 3478 } 3479 qeth_l3_set_ip_addr_list(card); 3480 out: 3481 return NOTIFY_DONE; 3482 } 3483 3484 static struct notifier_block qeth_l3_ip_notifier = { 3485 qeth_l3_ip_event, 3486 NULL, 3487 }; 3488 3489 #ifdef CONFIG_QETH_IPV6 3490 /** 3491 * IPv6 event handler 3492 */ 3493 static int qeth_l3_ip6_event(struct notifier_block *this, 3494 unsigned long event, void *ptr) 3495 { 3496 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 3497 struct net_device *dev = (struct net_device *)ifa->idev->dev; 3498 struct qeth_ipaddr *addr; 3499 struct qeth_card *card; 3500 3501 QETH_DBF_TEXT(TRACE, 3, "ip6event"); 3502 3503 card = qeth_l3_get_card_from_dev(dev); 3504 if (!card) 3505 return NOTIFY_DONE; 3506 if (!qeth_is_supported(card, IPA_IPV6)) 3507 return NOTIFY_DONE; 3508 3509 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 3510 if (addr != NULL) { 3511 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr)); 3512 addr->u.a6.pfxlen = ifa->prefix_len; 3513 addr->type = QETH_IP_TYPE_NORMAL; 3514 } else 3515 goto out; 3516 3517 switch (event) { 3518 case NETDEV_UP: 3519 if (!qeth_l3_add_ip(card, addr)) 3520 kfree(addr); 3521 break; 3522 case NETDEV_DOWN: 3523 if (!qeth_l3_delete_ip(card, addr)) 3524 kfree(addr); 3525 break; 3526 default: 3527 break; 3528 } 3529 qeth_l3_set_ip_addr_list(card); 3530 out: 3531 return NOTIFY_DONE; 3532 } 3533 3534 static struct notifier_block qeth_l3_ip6_notifier = { 3535 qeth_l3_ip6_event, 3536 NULL, 3537 }; 3538 #endif 3539 3540 static int qeth_l3_register_notifiers(void) 3541 { 3542 int rc; 3543 3544 QETH_DBF_TEXT(TRACE, 5, "regnotif"); 3545 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); 3546 if (rc) 3547 return rc; 3548 #ifdef CONFIG_QETH_IPV6 3549 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); 3550 if (rc) { 3551 unregister_inetaddr_notifier(&qeth_l3_ip_notifier); 3552 return rc; 3553 } 3554 #else 3555 pr_warning("There is no IPv6 support for the layer 3 discipline\n"); 3556 #endif 3557 return 0; 3558 } 3559 3560 static void qeth_l3_unregister_notifiers(void) 3561 { 3562 3563 QETH_DBF_TEXT(TRACE, 5, "unregnot"); 3564 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 3565 #ifdef CONFIG_QETH_IPV6 3566 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 3567 #endif /* QETH_IPV6 */ 3568 } 3569 3570 static int __init qeth_l3_init(void) 3571 { 3572 int rc = 0; 3573 3574 pr_info("register layer 3 discipline\n"); 3575 rc = qeth_l3_register_notifiers(); 3576 return rc; 3577 } 3578 3579 static void __exit qeth_l3_exit(void) 3580 { 3581 qeth_l3_unregister_notifiers(); 3582 pr_info("unregister layer 3 discipline\n"); 3583 } 3584 3585 module_init(qeth_l3_init); 3586 module_exit(qeth_l3_exit); 3587 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 3588 MODULE_DESCRIPTION("qeth layer 3 discipline"); 3589 MODULE_LICENSE("GPL"); 3590