1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/kernel.h> 18 #include <linux/slab.h> 19 #include <linux/etherdevice.h> 20 #include <linux/if_bridge.h> 21 #include <linux/list.h> 22 #include <linux/hash.h> 23 #include <linux/hashtable.h> 24 #include <net/switchdev.h> 25 #include <asm/chsc.h> 26 #include <asm/css_chars.h> 27 #include <asm/setup.h> 28 #include "qeth_core.h" 29 #include "qeth_l2.h" 30 31 static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode) 32 { 33 int rc; 34 35 if (retcode) 36 QETH_CARD_TEXT_(card, 2, "err%04x", retcode); 37 switch (retcode) { 38 case IPA_RC_SUCCESS: 39 rc = 0; 40 break; 41 case IPA_RC_L2_UNSUPPORTED_CMD: 42 rc = -EOPNOTSUPP; 43 break; 44 case IPA_RC_L2_ADDR_TABLE_FULL: 45 rc = -ENOSPC; 46 break; 47 case IPA_RC_L2_DUP_MAC: 48 case IPA_RC_L2_DUP_LAYER3_MAC: 49 rc = -EADDRINUSE; 50 break; 51 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 52 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 53 rc = -EADDRNOTAVAIL; 54 break; 55 case IPA_RC_L2_MAC_NOT_FOUND: 56 rc = -ENOENT; 57 break; 58 default: 59 rc = -EIO; 60 break; 61 } 62 return rc; 63 } 64 65 static int qeth_l2_send_setdelmac_cb(struct qeth_card *card, 66 struct qeth_reply *reply, 67 unsigned long data) 68 { 69 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 70 71 return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code); 72 } 73 74 static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac, 75 enum qeth_ipa_cmds ipacmd) 76 { 77 struct qeth_ipa_cmd *cmd; 78 struct qeth_cmd_buffer *iob; 79 80 QETH_CARD_TEXT(card, 2, "L2sdmac"); 81 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4, 82 IPA_DATA_SIZEOF(setdelmac)); 83 if (!iob) 84 return -ENOMEM; 85 cmd = __ipa_cmd(iob); 86 cmd->data.setdelmac.mac_length = ETH_ALEN; 87 ether_addr_copy(cmd->data.setdelmac.mac, mac); 88 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL); 89 } 90 91 static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac) 92 { 93 int rc; 94 95 QETH_CARD_TEXT(card, 2, "L2Setmac"); 96 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); 97 if (rc == 0) { 98 dev_info(&card->gdev->dev, 99 "MAC address %pM successfully registered\n", mac); 100 } else { 101 switch (rc) { 102 case -EADDRINUSE: 103 dev_warn(&card->gdev->dev, 104 "MAC address %pM already exists\n", mac); 105 break; 106 case -EADDRNOTAVAIL: 107 dev_warn(&card->gdev->dev, 108 "MAC address %pM is not authorized\n", mac); 109 break; 110 } 111 } 112 return rc; 113 } 114 115 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) 116 { 117 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? 118 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; 119 int rc; 120 121 QETH_CARD_TEXT(card, 2, "L2Wmac"); 122 rc = qeth_l2_send_setdelmac(card, mac, cmd); 123 if (rc == -EADDRINUSE) 124 QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n", 125 ether_addr_to_u64(mac), CARD_DEVID(card)); 126 else if (rc) 127 QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n", 128 ether_addr_to_u64(mac), CARD_DEVID(card), rc); 129 return rc; 130 } 131 132 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) 133 { 134 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? 135 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; 136 int rc; 137 138 QETH_CARD_TEXT(card, 2, "L2Rmac"); 139 rc = qeth_l2_send_setdelmac(card, mac, cmd); 140 if (rc) 141 QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n", 142 ether_addr_to_u64(mac), CARD_DEVID(card), rc); 143 return rc; 144 } 145 146 static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card) 147 { 148 struct qeth_mac *mac; 149 struct hlist_node *tmp; 150 int i; 151 152 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { 153 hash_del(&mac->hnode); 154 kfree(mac); 155 } 156 } 157 158 static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, 159 struct qeth_hdr *hdr, struct sk_buff *skb, 160 __be16 proto, unsigned int data_len) 161 { 162 int cast_type = qeth_get_ether_cast_type(skb); 163 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 164 165 hdr->hdr.l2.pkt_length = data_len; 166 167 if (skb_is_gso(skb)) { 168 hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; 169 } else { 170 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; 171 if (skb->ip_summed == CHECKSUM_PARTIAL) 172 qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto); 173 } 174 175 /* set byte byte 3 to casting flags */ 176 if (cast_type == RTN_MULTICAST) 177 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST; 178 else if (cast_type == RTN_BROADCAST) 179 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST; 180 else 181 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; 182 183 /* VSWITCH relies on the VLAN 184 * information to be present in 185 * the QDIO header */ 186 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { 187 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN; 188 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI); 189 } 190 } 191 192 static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode) 193 { 194 if (retcode) 195 QETH_CARD_TEXT_(card, 2, "err%04x", retcode); 196 197 switch (retcode) { 198 case IPA_RC_SUCCESS: 199 return 0; 200 case IPA_RC_L2_INVALID_VLAN_ID: 201 return -EINVAL; 202 case IPA_RC_L2_DUP_VLAN_ID: 203 return -EEXIST; 204 case IPA_RC_L2_VLAN_ID_NOT_FOUND: 205 return -ENOENT; 206 case IPA_RC_L2_VLAN_ID_NOT_ALLOWED: 207 return -EPERM; 208 default: 209 return -EIO; 210 } 211 } 212 213 static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, 214 struct qeth_reply *reply, 215 unsigned long data) 216 { 217 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 218 219 QETH_CARD_TEXT(card, 2, "L2sdvcb"); 220 if (cmd->hdr.return_code) { 221 QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n", 222 cmd->data.setdelvlan.vlan_id, 223 CARD_DEVID(card), cmd->hdr.return_code); 224 QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); 225 } 226 return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code); 227 } 228 229 static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, 230 enum qeth_ipa_cmds ipacmd) 231 { 232 struct qeth_ipa_cmd *cmd; 233 struct qeth_cmd_buffer *iob; 234 235 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); 236 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4, 237 IPA_DATA_SIZEOF(setdelvlan)); 238 if (!iob) 239 return -ENOMEM; 240 cmd = __ipa_cmd(iob); 241 cmd->data.setdelvlan.vlan_id = i; 242 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL); 243 } 244 245 static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, 246 __be16 proto, u16 vid) 247 { 248 struct qeth_card *card = dev->ml_priv; 249 250 QETH_CARD_TEXT_(card, 4, "aid:%d", vid); 251 if (!vid) 252 return 0; 253 254 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); 255 } 256 257 static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, 258 __be16 proto, u16 vid) 259 { 260 struct qeth_card *card = dev->ml_priv; 261 262 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 263 if (!vid) 264 return 0; 265 266 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); 267 } 268 269 static void qeth_l2_set_pnso_mode(struct qeth_card *card, 270 enum qeth_pnso_mode mode) 271 { 272 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 273 WRITE_ONCE(card->info.pnso_mode, mode); 274 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 275 276 if (mode == QETH_PNSO_NONE) 277 drain_workqueue(card->event_wq); 278 } 279 280 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card) 281 { 282 struct switchdev_notifier_fdb_info info = {}; 283 284 QETH_CARD_TEXT(card, 2, "fdbflush"); 285 286 info.addr = NULL; 287 /* flush all VLANs: */ 288 info.vid = 0; 289 info.added_by_user = false; 290 info.offloaded = true; 291 292 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, 293 card->dev, &info.info, NULL); 294 } 295 296 static int qeth_l2_request_initial_mac(struct qeth_card *card) 297 { 298 int rc = 0; 299 300 QETH_CARD_TEXT(card, 2, "l2reqmac"); 301 302 if (MACHINE_IS_VM) { 303 rc = qeth_vm_request_mac(card); 304 if (!rc) 305 goto out; 306 QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n", 307 CARD_DEVID(card), rc); 308 QETH_CARD_TEXT_(card, 2, "err%04x", rc); 309 /* fall back to alternative mechanism: */ 310 } 311 312 rc = qeth_setadpparms_change_macaddr(card); 313 if (!rc) 314 goto out; 315 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n", 316 CARD_DEVID(card), rc); 317 QETH_CARD_TEXT_(card, 2, "1err%04x", rc); 318 319 /* Fall back once more, but some devices don't support a custom MAC 320 * address: 321 */ 322 if (IS_OSM(card) || IS_OSX(card)) 323 return (rc) ? rc : -EADDRNOTAVAIL; 324 eth_hw_addr_random(card->dev); 325 326 out: 327 QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len); 328 return 0; 329 } 330 331 static void qeth_l2_register_dev_addr(struct qeth_card *card) 332 { 333 if (!is_valid_ether_addr(card->dev->dev_addr)) 334 qeth_l2_request_initial_mac(card); 335 336 if (!qeth_l2_send_setmac(card, card->dev->dev_addr)) 337 card->info.dev_addr_is_registered = 1; 338 else 339 card->info.dev_addr_is_registered = 0; 340 } 341 342 static int qeth_l2_validate_addr(struct net_device *dev) 343 { 344 struct qeth_card *card = dev->ml_priv; 345 346 if (card->info.dev_addr_is_registered) 347 return eth_validate_addr(dev); 348 349 QETH_CARD_TEXT(card, 4, "nomacadr"); 350 return -EPERM; 351 } 352 353 static int qeth_l2_set_mac_address(struct net_device *dev, void *p) 354 { 355 struct sockaddr *addr = p; 356 struct qeth_card *card = dev->ml_priv; 357 u8 old_addr[ETH_ALEN]; 358 int rc = 0; 359 360 QETH_CARD_TEXT(card, 3, "setmac"); 361 362 if (IS_OSM(card) || IS_OSX(card)) { 363 QETH_CARD_TEXT(card, 3, "setmcTYP"); 364 return -EOPNOTSUPP; 365 } 366 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); 367 if (!is_valid_ether_addr(addr->sa_data)) 368 return -EADDRNOTAVAIL; 369 370 /* don't register the same address twice */ 371 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && 372 card->info.dev_addr_is_registered) 373 return 0; 374 375 /* add the new address, switch over, drop the old */ 376 rc = qeth_l2_send_setmac(card, addr->sa_data); 377 if (rc) 378 return rc; 379 ether_addr_copy(old_addr, dev->dev_addr); 380 eth_hw_addr_set(dev, addr->sa_data); 381 382 if (card->info.dev_addr_is_registered) 383 qeth_l2_remove_mac(card, old_addr); 384 card->info.dev_addr_is_registered = 1; 385 return 0; 386 } 387 388 static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable) 389 { 390 int role; 391 int rc; 392 393 QETH_CARD_TEXT(card, 3, "pmisc2br"); 394 395 if (enable) { 396 if (card->options.sbp.reflect_promisc_primary) 397 role = QETH_SBP_ROLE_PRIMARY; 398 else 399 role = QETH_SBP_ROLE_SECONDARY; 400 } else 401 role = QETH_SBP_ROLE_NONE; 402 403 rc = qeth_bridgeport_setrole(card, role); 404 QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc); 405 if (!rc) { 406 card->options.sbp.role = role; 407 card->info.promisc_mode = enable; 408 } 409 } 410 411 static void qeth_l2_set_promisc_mode(struct qeth_card *card) 412 { 413 bool enable = card->dev->flags & IFF_PROMISC; 414 415 if (card->info.promisc_mode == enable) 416 return; 417 418 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) { 419 qeth_setadp_promisc_mode(card, enable); 420 } else { 421 mutex_lock(&card->sbp_lock); 422 if (card->options.sbp.reflect_promisc) 423 qeth_l2_promisc_to_bridge(card, enable); 424 mutex_unlock(&card->sbp_lock); 425 } 426 } 427 428 /* New MAC address is added to the hash table and marked to be written on card 429 * only if there is not in the hash table storage already 430 * 431 */ 432 static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) 433 { 434 u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); 435 struct qeth_mac *mac; 436 437 hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) { 438 if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) { 439 mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 440 return; 441 } 442 } 443 444 mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC); 445 if (!mac) 446 return; 447 448 ether_addr_copy(mac->mac_addr, ha->addr); 449 mac->disp_flag = QETH_DISP_ADDR_ADD; 450 451 hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash); 452 } 453 454 static void qeth_l2_rx_mode_work(struct work_struct *work) 455 { 456 struct qeth_card *card = container_of(work, struct qeth_card, 457 rx_mode_work); 458 struct net_device *dev = card->dev; 459 struct netdev_hw_addr *ha; 460 struct qeth_mac *mac; 461 struct hlist_node *tmp; 462 int i; 463 int rc; 464 465 QETH_CARD_TEXT(card, 3, "setmulti"); 466 467 netif_addr_lock_bh(dev); 468 netdev_for_each_mc_addr(ha, dev) 469 qeth_l2_add_mac(card, ha); 470 netdev_for_each_uc_addr(ha, dev) 471 qeth_l2_add_mac(card, ha); 472 netif_addr_unlock_bh(dev); 473 474 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { 475 switch (mac->disp_flag) { 476 case QETH_DISP_ADDR_DELETE: 477 qeth_l2_remove_mac(card, mac->mac_addr); 478 hash_del(&mac->hnode); 479 kfree(mac); 480 break; 481 case QETH_DISP_ADDR_ADD: 482 rc = qeth_l2_write_mac(card, mac->mac_addr); 483 if (rc) { 484 hash_del(&mac->hnode); 485 kfree(mac); 486 break; 487 } 488 fallthrough; 489 default: 490 /* for next call to set_rx_mode(): */ 491 mac->disp_flag = QETH_DISP_ADDR_DELETE; 492 } 493 } 494 495 qeth_l2_set_promisc_mode(card); 496 } 497 498 static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, 499 struct net_device *dev) 500 { 501 struct qeth_card *card = dev->ml_priv; 502 u16 txq = skb_get_queue_mapping(skb); 503 struct qeth_qdio_out_q *queue; 504 int rc; 505 506 if (!skb_is_gso(skb)) 507 qdisc_skb_cb(skb)->pkt_len = skb->len; 508 if (IS_IQD(card)) 509 txq = qeth_iqd_translate_txq(dev, txq); 510 queue = card->qdio.out_qs[txq]; 511 512 rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb), 513 qeth_l2_fill_header); 514 if (!rc) 515 return NETDEV_TX_OK; 516 517 QETH_TXQ_STAT_INC(queue, tx_dropped); 518 kfree_skb(skb); 519 return NETDEV_TX_OK; 520 } 521 522 static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb, 523 struct net_device *sb_dev) 524 { 525 struct qeth_card *card = dev->ml_priv; 526 527 if (IS_IQD(card)) 528 return qeth_iqd_select_queue(dev, skb, 529 qeth_get_ether_cast_type(skb), 530 sb_dev); 531 if (qeth_uses_tx_prio_queueing(card)) 532 return qeth_get_priority_queue(card, skb); 533 534 return netdev_pick_tx(dev, skb, sb_dev); 535 } 536 537 static void qeth_l2_set_rx_mode(struct net_device *dev) 538 { 539 struct qeth_card *card = dev->ml_priv; 540 541 schedule_work(&card->rx_mode_work); 542 } 543 544 /** 545 * qeth_l2_pnso() - perform network subchannel operation 546 * @card: qeth_card structure pointer 547 * @oc: Operation Code 548 * @cnc: Boolean Change-Notification Control 549 * @cb: Callback function will be executed for each element 550 * of the address list 551 * @priv: Pointer to pass to the callback function. 552 * 553 * Collects network information in a network address list and calls the 554 * callback function for every entry in the list. If "change-notification- 555 * control" is set, further changes in the address list will be reported 556 * via the IPA command. 557 */ 558 static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc, 559 void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry), 560 void *priv) 561 { 562 struct ccw_device *ddev = CARD_DDEV(card); 563 struct chsc_pnso_area *rr; 564 u32 prev_instance = 0; 565 int isfirstblock = 1; 566 int i, size, elems; 567 int rc; 568 569 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); 570 if (rr == NULL) 571 return -ENOMEM; 572 do { 573 QETH_CARD_TEXT(card, 2, "PNSO"); 574 /* on the first iteration, naihdr.resume_token will be zero */ 575 rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token, 576 cnc); 577 if (rc) 578 continue; 579 if (cb == NULL) 580 continue; 581 582 size = rr->naihdr.naids; 583 if (size != sizeof(struct chsc_pnso_naid_l2)) { 584 WARN_ON_ONCE(1); 585 continue; 586 } 587 588 elems = (rr->response.length - sizeof(struct chsc_header) - 589 sizeof(struct chsc_pnso_naihdr)) / size; 590 591 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { 592 /* Inform the caller that they need to scrap */ 593 /* the data that was already reported via cb */ 594 rc = -EAGAIN; 595 break; 596 } 597 isfirstblock = 0; 598 prev_instance = rr->naihdr.instance; 599 for (i = 0; i < elems; i++) 600 (*cb)(priv, &rr->entries[i]); 601 } while ((rc == -EBUSY) || (!rc && /* list stored */ 602 /* resume token is non-zero => list incomplete */ 603 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); 604 605 if (rc) 606 QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code); 607 608 free_page((unsigned long)rr); 609 return rc; 610 } 611 612 static bool qeth_is_my_net_if_token(struct qeth_card *card, 613 struct net_if_token *token) 614 { 615 return ((card->info.ddev_devno == token->devnum) && 616 (card->info.cssid == token->cssid) && 617 (card->info.iid == token->iid) && 618 (card->info.ssid == token->ssid) && 619 (card->info.chpid == token->chpid) && 620 (card->info.chid == token->chid)); 621 } 622 623 /** 624 * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge 625 * @card: qeth_card structure pointer 626 * @code: event bitmask: high order bit 0x80 set to 627 * 1 - removal of an object 628 * 0 - addition of an object 629 * Object type(s): 630 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC 631 * @token: "network token" structure identifying 'physical' location 632 * of the target 633 * @addr_lnid: structure with MAC address and VLAN ID of the target 634 */ 635 static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code, 636 struct net_if_token *token, 637 struct mac_addr_lnid *addr_lnid) 638 { 639 struct switchdev_notifier_fdb_info info = {}; 640 u8 ntfy_mac[ETH_ALEN]; 641 642 ether_addr_copy(ntfy_mac, addr_lnid->mac); 643 /* Ignore VLAN only changes */ 644 if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR)) 645 return; 646 /* Ignore mcast entries */ 647 if (is_multicast_ether_addr(ntfy_mac)) 648 return; 649 /* Ignore my own addresses */ 650 if (qeth_is_my_net_if_token(card, token)) 651 return; 652 653 info.addr = ntfy_mac; 654 /* don't report VLAN IDs */ 655 info.vid = 0; 656 info.added_by_user = false; 657 info.offloaded = true; 658 659 if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) { 660 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, 661 card->dev, &info.info, NULL); 662 QETH_CARD_TEXT(card, 4, "andelmac"); 663 QETH_CARD_TEXT_(card, 4, 664 "mc%012llx", ether_addr_to_u64(ntfy_mac)); 665 } else { 666 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, 667 card->dev, &info.info, NULL); 668 QETH_CARD_TEXT(card, 4, "anaddmac"); 669 QETH_CARD_TEXT_(card, 4, 670 "mc%012llx", ether_addr_to_u64(ntfy_mac)); 671 } 672 } 673 674 static void qeth_l2_dev2br_an_set_cb(void *priv, 675 struct chsc_pnso_naid_l2 *entry) 676 { 677 u8 code = IPA_ADDR_CHANGE_CODE_MACADDR; 678 struct qeth_card *card = priv; 679 680 if (entry->addr_lnid.lnid < VLAN_N_VID) 681 code |= IPA_ADDR_CHANGE_CODE_VLANID; 682 qeth_l2_dev2br_fdb_notify(card, code, 683 (struct net_if_token *)&entry->nit, 684 (struct mac_addr_lnid *)&entry->addr_lnid); 685 } 686 687 /** 688 * qeth_l2_dev2br_an_set() - 689 * Enable or disable 'dev to bridge network address notification' 690 * @card: qeth_card structure pointer 691 * @enable: Enable or disable 'dev to bridge network address notification' 692 * 693 * Returns negative errno-compatible error indication or 0 on success. 694 * 695 * On enable, emits a series of address notifications for all 696 * currently registered hosts. 697 */ 698 static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable) 699 { 700 int rc; 701 702 if (enable) { 703 QETH_CARD_TEXT(card, 2, "anseton"); 704 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1, 705 qeth_l2_dev2br_an_set_cb, card); 706 if (rc == -EAGAIN) 707 /* address notification enabled, but inconsistent 708 * addresses reported -> disable address notification 709 */ 710 qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, 711 NULL, NULL); 712 } else { 713 QETH_CARD_TEXT(card, 2, "ansetoff"); 714 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL); 715 } 716 717 return rc; 718 } 719 720 struct qeth_l2_br2dev_event_work { 721 struct work_struct work; 722 struct net_device *br_dev; 723 struct net_device *lsync_dev; 724 struct net_device *dst_dev; 725 unsigned long event; 726 unsigned char addr[ETH_ALEN]; 727 }; 728 729 static const struct net_device_ops qeth_l2_netdev_ops; 730 731 static bool qeth_l2_must_learn(struct net_device *netdev, 732 struct net_device *dstdev) 733 { 734 struct qeth_priv *priv; 735 736 priv = netdev_priv(netdev); 737 return (netdev != dstdev && 738 (priv->brport_features & BR_LEARNING_SYNC) && 739 !(br_port_flag_is_set(netdev, BR_ISOLATED) && 740 br_port_flag_is_set(dstdev, BR_ISOLATED)) && 741 netdev->netdev_ops == &qeth_l2_netdev_ops); 742 } 743 744 /** 745 * qeth_l2_br2dev_worker() - update local MACs 746 * @work: bridge to device FDB update 747 * 748 * Update local MACs of a learning_sync bridgeport so it can receive 749 * messages for a destination port. 750 * In case of an isolated learning_sync port, also update its isolated 751 * siblings. 752 */ 753 static void qeth_l2_br2dev_worker(struct work_struct *work) 754 { 755 struct qeth_l2_br2dev_event_work *br2dev_event_work = 756 container_of(work, struct qeth_l2_br2dev_event_work, work); 757 struct net_device *lsyncdev = br2dev_event_work->lsync_dev; 758 struct net_device *dstdev = br2dev_event_work->dst_dev; 759 struct net_device *brdev = br2dev_event_work->br_dev; 760 unsigned long event = br2dev_event_work->event; 761 unsigned char *addr = br2dev_event_work->addr; 762 struct qeth_card *card = lsyncdev->ml_priv; 763 struct net_device *lowerdev; 764 struct list_head *iter; 765 int err = 0; 766 767 kfree(br2dev_event_work); 768 QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event); 769 QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr)); 770 771 rcu_read_lock(); 772 /* Verify preconditions are still valid: */ 773 if (!netif_is_bridge_port(lsyncdev) || 774 brdev != netdev_master_upper_dev_get_rcu(lsyncdev)) 775 goto unlock; 776 if (!qeth_l2_must_learn(lsyncdev, dstdev)) 777 goto unlock; 778 779 if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) { 780 /* Update lsyncdev and its isolated sibling(s): */ 781 iter = &brdev->adj_list.lower; 782 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter); 783 while (lowerdev) { 784 if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) { 785 switch (event) { 786 case SWITCHDEV_FDB_ADD_TO_DEVICE: 787 err = dev_uc_add(lowerdev, addr); 788 break; 789 case SWITCHDEV_FDB_DEL_TO_DEVICE: 790 err = dev_uc_del(lowerdev, addr); 791 break; 792 default: 793 break; 794 } 795 if (err) { 796 QETH_CARD_TEXT(card, 2, "b2derris"); 797 QETH_CARD_TEXT_(card, 2, 798 "err%02lx%03d", event, 799 lowerdev->ifindex); 800 } 801 } 802 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter); 803 } 804 } else { 805 switch (event) { 806 case SWITCHDEV_FDB_ADD_TO_DEVICE: 807 err = dev_uc_add(lsyncdev, addr); 808 break; 809 case SWITCHDEV_FDB_DEL_TO_DEVICE: 810 err = dev_uc_del(lsyncdev, addr); 811 break; 812 default: 813 break; 814 } 815 if (err) 816 QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event); 817 } 818 819 unlock: 820 rcu_read_unlock(); 821 dev_put(brdev); 822 dev_put(lsyncdev); 823 dev_put(dstdev); 824 } 825 826 static int qeth_l2_br2dev_queue_work(struct net_device *brdev, 827 struct net_device *lsyncdev, 828 struct net_device *dstdev, 829 unsigned long event, 830 const unsigned char *addr) 831 { 832 struct qeth_l2_br2dev_event_work *worker_data; 833 struct qeth_card *card; 834 835 worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC); 836 if (!worker_data) 837 return -ENOMEM; 838 INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker); 839 worker_data->br_dev = brdev; 840 worker_data->lsync_dev = lsyncdev; 841 worker_data->dst_dev = dstdev; 842 worker_data->event = event; 843 ether_addr_copy(worker_data->addr, addr); 844 845 card = lsyncdev->ml_priv; 846 /* Take a reference on the sw port devices and the bridge */ 847 dev_hold(brdev); 848 dev_hold(lsyncdev); 849 dev_hold(dstdev); 850 queue_work(card->event_wq, &worker_data->work); 851 return 0; 852 } 853 854 /* Called under rtnl_lock */ 855 static int qeth_l2_switchdev_event(struct notifier_block *unused, 856 unsigned long event, void *ptr) 857 { 858 struct net_device *dstdev, *brdev, *lowerdev; 859 struct switchdev_notifier_fdb_info *fdb_info; 860 struct switchdev_notifier_info *info = ptr; 861 struct list_head *iter; 862 struct qeth_card *card; 863 int rc; 864 865 if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE || 866 event == SWITCHDEV_FDB_DEL_TO_DEVICE)) 867 return NOTIFY_DONE; 868 869 dstdev = switchdev_notifier_info_to_dev(info); 870 brdev = netdev_master_upper_dev_get_rcu(dstdev); 871 if (!brdev || !netif_is_bridge_master(brdev)) 872 return NOTIFY_DONE; 873 fdb_info = container_of(info, 874 struct switchdev_notifier_fdb_info, 875 info); 876 iter = &brdev->adj_list.lower; 877 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter); 878 while (lowerdev) { 879 if (qeth_l2_must_learn(lowerdev, dstdev)) { 880 card = lowerdev->ml_priv; 881 QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event); 882 rc = qeth_l2_br2dev_queue_work(brdev, lowerdev, 883 dstdev, event, 884 fdb_info->addr); 885 if (rc) { 886 QETH_CARD_TEXT(card, 2, "b2dqwerr"); 887 return NOTIFY_BAD; 888 } 889 } 890 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter); 891 } 892 return NOTIFY_DONE; 893 } 894 895 static struct notifier_block qeth_l2_sw_notifier = { 896 .notifier_call = qeth_l2_switchdev_event, 897 }; 898 899 static refcount_t qeth_l2_switchdev_notify_refcnt; 900 901 /* Called under rtnl_lock */ 902 static void qeth_l2_br2dev_get(void) 903 { 904 int rc; 905 906 if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) { 907 rc = register_switchdev_notifier(&qeth_l2_sw_notifier); 908 if (rc) { 909 QETH_DBF_MESSAGE(2, 910 "failed to register qeth_l2_sw_notifier: %d\n", 911 rc); 912 } else { 913 refcount_set(&qeth_l2_switchdev_notify_refcnt, 1); 914 QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n"); 915 } 916 } 917 QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d", 918 qeth_l2_switchdev_notify_refcnt.refs.counter); 919 } 920 921 /* Called under rtnl_lock */ 922 static void qeth_l2_br2dev_put(void) 923 { 924 int rc; 925 926 if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) { 927 rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier); 928 if (rc) { 929 QETH_DBF_MESSAGE(2, 930 "failed to unregister qeth_l2_sw_notifier: %d\n", 931 rc); 932 } else { 933 QETH_DBF_MESSAGE(2, 934 "qeth_l2_sw_notifier unregistered\n"); 935 } 936 } 937 QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d", 938 qeth_l2_switchdev_notify_refcnt.refs.counter); 939 } 940 941 static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 942 struct net_device *dev, u32 filter_mask, 943 int nlflags) 944 { 945 struct qeth_priv *priv = netdev_priv(dev); 946 struct qeth_card *card = dev->ml_priv; 947 u16 mode = BRIDGE_MODE_UNDEF; 948 949 /* Do not even show qeth devs that cannot do bridge_setlink */ 950 if (!priv->brport_hw_features || !netif_device_present(dev) || 951 qeth_bridgeport_is_in_use(card)) 952 return -EOPNOTSUPP; 953 954 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, 955 mode, priv->brport_features, 956 priv->brport_hw_features, 957 nlflags, filter_mask, NULL); 958 } 959 960 static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = { 961 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, 962 }; 963 964 /** 965 * qeth_l2_bridge_setlink() - set bridgeport attributes 966 * @dev: netdevice 967 * @nlh: netlink message header 968 * @flags: bridge flags (here: BRIDGE_FLAGS_SELF) 969 * @extack: extended ACK report struct 970 * 971 * Called under rtnl_lock 972 */ 973 static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 974 u16 flags, struct netlink_ext_ack *extack) 975 { 976 struct qeth_priv *priv = netdev_priv(dev); 977 struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1]; 978 struct qeth_card *card = dev->ml_priv; 979 struct nlattr *attr, *nested_attr; 980 bool enable, has_protinfo = false; 981 int rem1, rem2; 982 int rc; 983 984 if (!netif_device_present(dev)) 985 return -ENODEV; 986 987 nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) { 988 if (nla_type(attr) == IFLA_PROTINFO) { 989 rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr, 990 qeth_brport_policy, extack); 991 if (rc) 992 return rc; 993 has_protinfo = true; 994 } else if (nla_type(attr) == IFLA_AF_SPEC) { 995 nla_for_each_nested(nested_attr, attr, rem2) { 996 if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS) 997 continue; 998 NL_SET_ERR_MSG_ATTR(extack, nested_attr, 999 "Unsupported attribute"); 1000 return -EINVAL; 1001 } 1002 } else { 1003 NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute"); 1004 return -EINVAL; 1005 } 1006 } 1007 if (!has_protinfo) 1008 return 0; 1009 if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC]) 1010 return -EINVAL; 1011 if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) { 1012 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC], 1013 "Operation not supported by HW"); 1014 return -EOPNOTSUPP; 1015 } 1016 if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) { 1017 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC], 1018 "Requires NET_SWITCHDEV"); 1019 return -EOPNOTSUPP; 1020 } 1021 enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]); 1022 1023 if (enable == !!(priv->brport_features & BR_LEARNING_SYNC)) 1024 return 0; 1025 1026 mutex_lock(&card->sbp_lock); 1027 /* do not change anything if BridgePort is enabled */ 1028 if (qeth_bridgeport_is_in_use(card)) { 1029 NL_SET_ERR_MSG(extack, "n/a (BridgePort)"); 1030 rc = -EBUSY; 1031 } else if (enable) { 1032 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO); 1033 rc = qeth_l2_dev2br_an_set(card, true); 1034 if (rc) { 1035 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); 1036 } else { 1037 priv->brport_features |= BR_LEARNING_SYNC; 1038 qeth_l2_br2dev_get(); 1039 } 1040 } else { 1041 rc = qeth_l2_dev2br_an_set(card, false); 1042 if (!rc) { 1043 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); 1044 priv->brport_features ^= BR_LEARNING_SYNC; 1045 qeth_l2_dev2br_fdb_flush(card); 1046 qeth_l2_br2dev_put(); 1047 } 1048 } 1049 mutex_unlock(&card->sbp_lock); 1050 1051 return rc; 1052 } 1053 1054 static const struct net_device_ops qeth_l2_netdev_ops = { 1055 .ndo_open = qeth_open, 1056 .ndo_stop = qeth_stop, 1057 .ndo_get_stats64 = qeth_get_stats64, 1058 .ndo_start_xmit = qeth_l2_hard_start_xmit, 1059 .ndo_features_check = qeth_features_check, 1060 .ndo_select_queue = qeth_l2_select_queue, 1061 .ndo_validate_addr = qeth_l2_validate_addr, 1062 .ndo_set_rx_mode = qeth_l2_set_rx_mode, 1063 .ndo_eth_ioctl = qeth_do_ioctl, 1064 .ndo_siocdevprivate = qeth_siocdevprivate, 1065 .ndo_set_mac_address = qeth_l2_set_mac_address, 1066 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, 1067 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, 1068 .ndo_tx_timeout = qeth_tx_timeout, 1069 .ndo_fix_features = qeth_fix_features, 1070 .ndo_set_features = qeth_set_features, 1071 .ndo_bridge_getlink = qeth_l2_bridge_getlink, 1072 .ndo_bridge_setlink = qeth_l2_bridge_setlink, 1073 }; 1074 1075 static int qeth_l2_setup_netdev(struct qeth_card *card) 1076 { 1077 card->dev->needed_headroom = sizeof(struct qeth_hdr); 1078 card->dev->netdev_ops = &qeth_l2_netdev_ops; 1079 card->dev->priv_flags |= IFF_UNICAST_FLT; 1080 1081 if (IS_OSM(card)) { 1082 card->dev->features |= NETIF_F_VLAN_CHALLENGED; 1083 } else { 1084 if (!IS_VM_NIC(card)) 1085 card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1086 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1087 } 1088 1089 if (IS_OSD(card) && !IS_VM_NIC(card)) { 1090 card->dev->features |= NETIF_F_SG; 1091 /* OSA 3S and earlier has no RX/TX support */ 1092 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { 1093 card->dev->hw_features |= NETIF_F_IP_CSUM; 1094 card->dev->vlan_features |= NETIF_F_IP_CSUM; 1095 } 1096 } 1097 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { 1098 card->dev->hw_features |= NETIF_F_IPV6_CSUM; 1099 card->dev->vlan_features |= NETIF_F_IPV6_CSUM; 1100 } 1101 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) || 1102 qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) { 1103 card->dev->hw_features |= NETIF_F_RXCSUM; 1104 card->dev->vlan_features |= NETIF_F_RXCSUM; 1105 } 1106 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { 1107 card->dev->hw_features |= NETIF_F_TSO; 1108 card->dev->vlan_features |= NETIF_F_TSO; 1109 } 1110 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { 1111 card->dev->hw_features |= NETIF_F_TSO6; 1112 card->dev->vlan_features |= NETIF_F_TSO6; 1113 } 1114 1115 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1116 card->dev->needed_headroom = sizeof(struct qeth_hdr_tso); 1117 netif_keep_dst(card->dev); 1118 netif_set_gso_max_size(card->dev, 1119 PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); 1120 } 1121 1122 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 1123 return register_netdev(card->dev); 1124 } 1125 1126 static void qeth_l2_trace_features(struct qeth_card *card) 1127 { 1128 /* Set BridgePort features */ 1129 QETH_CARD_TEXT(card, 2, "featuSBP"); 1130 QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs, 1131 sizeof(card->options.sbp.supported_funcs)); 1132 /* VNIC Characteristics features */ 1133 QETH_CARD_TEXT(card, 2, "feaVNICC"); 1134 QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars, 1135 sizeof(card->options.vnicc.sup_chars)); 1136 } 1137 1138 static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) 1139 { 1140 if (!card->options.sbp.reflect_promisc && 1141 card->options.sbp.role != QETH_SBP_ROLE_NONE) { 1142 /* Conditional to avoid spurious error messages */ 1143 qeth_bridgeport_setrole(card, card->options.sbp.role); 1144 /* Let the callback function refresh the stored role value. */ 1145 qeth_bridgeport_query_ports(card, &card->options.sbp.role, 1146 NULL); 1147 } 1148 if (card->options.sbp.hostnotification) { 1149 if (qeth_bridgeport_an_set(card, 1)) 1150 card->options.sbp.hostnotification = 0; 1151 } 1152 } 1153 1154 /** 1155 * qeth_l2_detect_dev2br_support() - 1156 * Detect whether this card supports 'dev to bridge fdb network address 1157 * change notification' and thus can support the learning_sync bridgeport 1158 * attribute 1159 * @card: qeth_card structure pointer 1160 */ 1161 static void qeth_l2_detect_dev2br_support(struct qeth_card *card) 1162 { 1163 struct qeth_priv *priv = netdev_priv(card->dev); 1164 bool dev2br_supported; 1165 1166 QETH_CARD_TEXT(card, 2, "d2brsup"); 1167 if (!IS_IQD(card)) 1168 return; 1169 1170 /* dev2br requires valid cssid,iid,chid */ 1171 dev2br_supported = card->info.ids_valid && 1172 css_general_characteristics.enarf; 1173 QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported); 1174 1175 if (dev2br_supported) 1176 priv->brport_hw_features |= BR_LEARNING_SYNC; 1177 else 1178 priv->brport_hw_features &= ~BR_LEARNING_SYNC; 1179 } 1180 1181 static void qeth_l2_enable_brport_features(struct qeth_card *card) 1182 { 1183 struct qeth_priv *priv = netdev_priv(card->dev); 1184 int rc; 1185 1186 if (priv->brport_features & BR_LEARNING_SYNC) { 1187 if (priv->brport_hw_features & BR_LEARNING_SYNC) { 1188 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO); 1189 rc = qeth_l2_dev2br_an_set(card, true); 1190 if (rc == -EAGAIN) { 1191 /* Recoverable error, retry once */ 1192 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); 1193 qeth_l2_dev2br_fdb_flush(card); 1194 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO); 1195 rc = qeth_l2_dev2br_an_set(card, true); 1196 } 1197 if (rc) { 1198 netdev_err(card->dev, 1199 "failed to enable bridge learning_sync: %d\n", 1200 rc); 1201 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); 1202 qeth_l2_dev2br_fdb_flush(card); 1203 priv->brport_features ^= BR_LEARNING_SYNC; 1204 } 1205 } else { 1206 dev_warn(&card->gdev->dev, 1207 "bridge learning_sync not supported\n"); 1208 priv->brport_features ^= BR_LEARNING_SYNC; 1209 } 1210 } 1211 } 1212 1213 /* SETBRIDGEPORT support, async notifications */ 1214 1215 enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset}; 1216 1217 /** 1218 * qeth_bridge_emit_host_event() - bridgeport address change notification 1219 * @card: qeth_card structure pointer, for udev events. 1220 * @evtype: "normal" register/unregister, or abort, or reset. For abort 1221 * and reset token and addr_lnid are unused and may be NULL. 1222 * @code: event bitmask: high order bit 0x80 value 1 means removal of an 1223 * object, 0 - addition of an object. 1224 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC. 1225 * @token: "network token" structure identifying physical address of the port. 1226 * @addr_lnid: pointer to structure with MAC address and VLAN ID. 1227 * 1228 * This function is called when registrations and deregistrations are 1229 * reported by the hardware, and also when notifications are enabled - 1230 * for all currently registered addresses. 1231 */ 1232 static void qeth_bridge_emit_host_event(struct qeth_card *card, 1233 enum qeth_an_event_type evtype, 1234 u8 code, 1235 struct net_if_token *token, 1236 struct mac_addr_lnid *addr_lnid) 1237 { 1238 char str[7][32]; 1239 char *env[8]; 1240 int i = 0; 1241 1242 switch (evtype) { 1243 case anev_reg_unreg: 1244 snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s", 1245 (code & IPA_ADDR_CHANGE_CODE_REMOVAL) 1246 ? "deregister" : "register"); 1247 env[i] = str[i]; i++; 1248 if (code & IPA_ADDR_CHANGE_CODE_VLANID) { 1249 snprintf(str[i], sizeof(str[i]), "VLAN=%d", 1250 addr_lnid->lnid); 1251 env[i] = str[i]; i++; 1252 } 1253 if (code & IPA_ADDR_CHANGE_CODE_MACADDR) { 1254 snprintf(str[i], sizeof(str[i]), "MAC=%pM", 1255 addr_lnid->mac); 1256 env[i] = str[i]; i++; 1257 } 1258 snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x", 1259 token->cssid, token->ssid, token->devnum); 1260 env[i] = str[i]; i++; 1261 snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid); 1262 env[i] = str[i]; i++; 1263 snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x", 1264 token->chpid); 1265 env[i] = str[i]; i++; 1266 snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid); 1267 env[i] = str[i]; i++; 1268 break; 1269 case anev_abort: 1270 snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort"); 1271 env[i] = str[i]; i++; 1272 break; 1273 case anev_reset: 1274 snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset"); 1275 env[i] = str[i]; i++; 1276 break; 1277 } 1278 env[i] = NULL; 1279 kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env); 1280 } 1281 1282 struct qeth_bridge_state_data { 1283 struct work_struct worker; 1284 struct qeth_card *card; 1285 u8 role; 1286 u8 state; 1287 }; 1288 1289 static void qeth_bridge_state_change_worker(struct work_struct *work) 1290 { 1291 struct qeth_bridge_state_data *data = 1292 container_of(work, struct qeth_bridge_state_data, worker); 1293 char env_locrem[32]; 1294 char env_role[32]; 1295 char env_state[32]; 1296 char *env[] = { 1297 env_locrem, 1298 env_role, 1299 env_state, 1300 NULL 1301 }; 1302 1303 snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); 1304 snprintf(env_role, sizeof(env_role), "ROLE=%s", 1305 (data->role == QETH_SBP_ROLE_NONE) ? "none" : 1306 (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : 1307 (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : 1308 "<INVALID>"); 1309 snprintf(env_state, sizeof(env_state), "STATE=%s", 1310 (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : 1311 (data->state == QETH_SBP_STATE_STANDBY) ? "standby" : 1312 (data->state == QETH_SBP_STATE_ACTIVE) ? "active" : 1313 "<INVALID>"); 1314 kobject_uevent_env(&data->card->gdev->dev.kobj, 1315 KOBJ_CHANGE, env); 1316 kfree(data); 1317 } 1318 1319 static void qeth_bridge_state_change(struct qeth_card *card, 1320 struct qeth_ipa_cmd *cmd) 1321 { 1322 struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data; 1323 struct qeth_bridge_state_data *data; 1324 1325 QETH_CARD_TEXT(card, 2, "brstchng"); 1326 if (qports->num_entries == 0) { 1327 QETH_CARD_TEXT(card, 2, "BPempty"); 1328 return; 1329 } 1330 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { 1331 QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); 1332 return; 1333 } 1334 1335 data = kzalloc(sizeof(*data), GFP_ATOMIC); 1336 if (!data) { 1337 QETH_CARD_TEXT(card, 2, "BPSalloc"); 1338 return; 1339 } 1340 INIT_WORK(&data->worker, qeth_bridge_state_change_worker); 1341 data->card = card; 1342 /* Information for the local port: */ 1343 data->role = qports->entry[0].role; 1344 data->state = qports->entry[0].state; 1345 1346 queue_work(card->event_wq, &data->worker); 1347 } 1348 1349 struct qeth_addr_change_data { 1350 struct delayed_work dwork; 1351 struct qeth_card *card; 1352 struct qeth_ipacmd_addr_change ac_event; 1353 }; 1354 1355 static void qeth_l2_dev2br_worker(struct work_struct *work) 1356 { 1357 struct delayed_work *dwork = to_delayed_work(work); 1358 struct qeth_addr_change_data *data; 1359 struct qeth_card *card; 1360 struct qeth_priv *priv; 1361 unsigned int i; 1362 int rc; 1363 1364 data = container_of(dwork, struct qeth_addr_change_data, dwork); 1365 card = data->card; 1366 priv = netdev_priv(card->dev); 1367 1368 QETH_CARD_TEXT(card, 4, "dev2brew"); 1369 1370 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE) 1371 goto free; 1372 1373 if (data->ac_event.lost_event_mask) { 1374 /* Potential re-config in progress, try again later: */ 1375 if (!rtnl_trylock()) { 1376 queue_delayed_work(card->event_wq, dwork, 1377 msecs_to_jiffies(100)); 1378 return; 1379 } 1380 1381 if (!netif_device_present(card->dev)) { 1382 rtnl_unlock(); 1383 goto free; 1384 } 1385 1386 QETH_DBF_MESSAGE(3, 1387 "Address change notification overflow on device %x\n", 1388 CARD_DEVID(card)); 1389 /* Card fdb and bridge fdb are out of sync, card has stopped 1390 * notifications (no need to drain_workqueue). Purge all 1391 * 'extern_learn' entries from the parent bridge and restart 1392 * the notifications. 1393 */ 1394 qeth_l2_dev2br_fdb_flush(card); 1395 rc = qeth_l2_dev2br_an_set(card, true); 1396 if (rc) { 1397 /* TODO: if we want to retry after -EAGAIN, be 1398 * aware there could be stale entries in the 1399 * workqueue now, that need to be drained. 1400 * For now we give up: 1401 */ 1402 netdev_err(card->dev, 1403 "bridge learning_sync failed to recover: %d\n", 1404 rc); 1405 WRITE_ONCE(card->info.pnso_mode, 1406 QETH_PNSO_NONE); 1407 /* To remove fdb entries reported by an_set: */ 1408 qeth_l2_dev2br_fdb_flush(card); 1409 priv->brport_features ^= BR_LEARNING_SYNC; 1410 } else { 1411 QETH_DBF_MESSAGE(3, 1412 "Address Notification resynced on device %x\n", 1413 CARD_DEVID(card)); 1414 } 1415 1416 rtnl_unlock(); 1417 } else { 1418 for (i = 0; i < data->ac_event.num_entries; i++) { 1419 struct qeth_ipacmd_addr_change_entry *entry = 1420 &data->ac_event.entry[i]; 1421 qeth_l2_dev2br_fdb_notify(card, 1422 entry->change_code, 1423 &entry->token, 1424 &entry->addr_lnid); 1425 } 1426 } 1427 1428 free: 1429 kfree(data); 1430 } 1431 1432 static void qeth_addr_change_event_worker(struct work_struct *work) 1433 { 1434 struct delayed_work *dwork = to_delayed_work(work); 1435 struct qeth_addr_change_data *data; 1436 struct qeth_card *card; 1437 int i; 1438 1439 data = container_of(dwork, struct qeth_addr_change_data, dwork); 1440 card = data->card; 1441 1442 QETH_CARD_TEXT(data->card, 4, "adrchgew"); 1443 1444 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE) 1445 goto free; 1446 1447 if (data->ac_event.lost_event_mask) { 1448 /* Potential re-config in progress, try again later: */ 1449 if (!mutex_trylock(&card->sbp_lock)) { 1450 queue_delayed_work(card->event_wq, dwork, 1451 msecs_to_jiffies(100)); 1452 return; 1453 } 1454 1455 dev_info(&data->card->gdev->dev, 1456 "Address change notification stopped on %s (%s)\n", 1457 netdev_name(card->dev), 1458 (data->ac_event.lost_event_mask == 0x01) 1459 ? "Overflow" 1460 : (data->ac_event.lost_event_mask == 0x02) 1461 ? "Bridge port state change" 1462 : "Unknown reason"); 1463 1464 data->card->options.sbp.hostnotification = 0; 1465 card->info.pnso_mode = QETH_PNSO_NONE; 1466 mutex_unlock(&data->card->sbp_lock); 1467 qeth_bridge_emit_host_event(data->card, anev_abort, 1468 0, NULL, NULL); 1469 } else 1470 for (i = 0; i < data->ac_event.num_entries; i++) { 1471 struct qeth_ipacmd_addr_change_entry *entry = 1472 &data->ac_event.entry[i]; 1473 qeth_bridge_emit_host_event(data->card, 1474 anev_reg_unreg, 1475 entry->change_code, 1476 &entry->token, 1477 &entry->addr_lnid); 1478 } 1479 1480 free: 1481 kfree(data); 1482 } 1483 1484 static void qeth_addr_change_event(struct qeth_card *card, 1485 struct qeth_ipa_cmd *cmd) 1486 { 1487 struct qeth_ipacmd_addr_change *hostevs = 1488 &cmd->data.addrchange; 1489 struct qeth_addr_change_data *data; 1490 int extrasize; 1491 1492 if (card->info.pnso_mode == QETH_PNSO_NONE) 1493 return; 1494 1495 QETH_CARD_TEXT(card, 4, "adrchgev"); 1496 if (cmd->hdr.return_code != 0x0000) { 1497 if (cmd->hdr.return_code == 0x0010) { 1498 if (hostevs->lost_event_mask == 0x00) 1499 hostevs->lost_event_mask = 0xff; 1500 } else { 1501 QETH_CARD_TEXT_(card, 2, "ACHN%04x", 1502 cmd->hdr.return_code); 1503 return; 1504 } 1505 } 1506 extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) * 1507 hostevs->num_entries; 1508 data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize, 1509 GFP_ATOMIC); 1510 if (!data) { 1511 QETH_CARD_TEXT(card, 2, "ACNalloc"); 1512 return; 1513 } 1514 if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT) 1515 INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker); 1516 else 1517 INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker); 1518 data->card = card; 1519 memcpy(&data->ac_event, hostevs, 1520 sizeof(struct qeth_ipacmd_addr_change) + extrasize); 1521 queue_delayed_work(card->event_wq, &data->dwork, 0); 1522 } 1523 1524 /* SETBRIDGEPORT support; sending commands */ 1525 1526 struct _qeth_sbp_cbctl { 1527 union { 1528 u32 supported; 1529 struct { 1530 enum qeth_sbp_roles *role; 1531 enum qeth_sbp_states *state; 1532 } qports; 1533 } data; 1534 }; 1535 1536 static int qeth_bridgeport_makerc(struct qeth_card *card, 1537 struct qeth_ipa_cmd *cmd) 1538 { 1539 struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp; 1540 enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code; 1541 u16 ipa_rc = cmd->hdr.return_code; 1542 u16 sbp_rc = sbp->hdr.return_code; 1543 int rc; 1544 1545 if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS) 1546 return 0; 1547 1548 if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) || 1549 (!IS_IQD(card) && ipa_rc == sbp_rc)) { 1550 switch (sbp_rc) { 1551 case IPA_RC_SUCCESS: 1552 rc = 0; 1553 break; 1554 case IPA_RC_L2_UNSUPPORTED_CMD: 1555 case IPA_RC_UNSUPPORTED_COMMAND: 1556 rc = -EOPNOTSUPP; 1557 break; 1558 case IPA_RC_SBP_OSA_NOT_CONFIGURED: 1559 case IPA_RC_SBP_IQD_NOT_CONFIGURED: 1560 rc = -ENODEV; /* maybe not the best code here? */ 1561 dev_err(&card->gdev->dev, 1562 "The device is not configured as a Bridge Port\n"); 1563 break; 1564 case IPA_RC_SBP_OSA_OS_MISMATCH: 1565 case IPA_RC_SBP_IQD_OS_MISMATCH: 1566 rc = -EPERM; 1567 dev_err(&card->gdev->dev, 1568 "A Bridge Port is already configured by a different operating system\n"); 1569 break; 1570 case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY: 1571 case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY: 1572 switch (setcmd) { 1573 case IPA_SBP_SET_PRIMARY_BRIDGE_PORT: 1574 rc = -EEXIST; 1575 dev_err(&card->gdev->dev, 1576 "The LAN already has a primary Bridge Port\n"); 1577 break; 1578 case IPA_SBP_SET_SECONDARY_BRIDGE_PORT: 1579 rc = -EBUSY; 1580 dev_err(&card->gdev->dev, 1581 "The device is already a primary Bridge Port\n"); 1582 break; 1583 default: 1584 rc = -EIO; 1585 } 1586 break; 1587 case IPA_RC_SBP_OSA_CURRENT_SECOND: 1588 case IPA_RC_SBP_IQD_CURRENT_SECOND: 1589 rc = -EBUSY; 1590 dev_err(&card->gdev->dev, 1591 "The device is already a secondary Bridge Port\n"); 1592 break; 1593 case IPA_RC_SBP_OSA_LIMIT_SECOND: 1594 case IPA_RC_SBP_IQD_LIMIT_SECOND: 1595 rc = -EEXIST; 1596 dev_err(&card->gdev->dev, 1597 "The LAN cannot have more secondary Bridge Ports\n"); 1598 break; 1599 case IPA_RC_SBP_OSA_CURRENT_PRIMARY: 1600 case IPA_RC_SBP_IQD_CURRENT_PRIMARY: 1601 rc = -EBUSY; 1602 dev_err(&card->gdev->dev, 1603 "The device is already a primary Bridge Port\n"); 1604 break; 1605 case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN: 1606 case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN: 1607 rc = -EACCES; 1608 dev_err(&card->gdev->dev, 1609 "The device is not authorized to be a Bridge Port\n"); 1610 break; 1611 default: 1612 rc = -EIO; 1613 } 1614 } else { 1615 switch (ipa_rc) { 1616 case IPA_RC_NOTSUPP: 1617 rc = -EOPNOTSUPP; 1618 break; 1619 case IPA_RC_UNSUPPORTED_COMMAND: 1620 rc = -EOPNOTSUPP; 1621 break; 1622 default: 1623 rc = -EIO; 1624 } 1625 } 1626 1627 if (rc) { 1628 QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc); 1629 QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc); 1630 } 1631 return rc; 1632 } 1633 1634 static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, 1635 enum qeth_ipa_sbp_cmd sbp_cmd, 1636 unsigned int data_length) 1637 { 1638 enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD : 1639 IPA_CMD_SETBRIDGEPORT_OSA; 1640 struct qeth_ipacmd_sbp_hdr *hdr; 1641 struct qeth_cmd_buffer *iob; 1642 1643 iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE, 1644 data_length + 1645 offsetof(struct qeth_ipacmd_setbridgeport, 1646 data)); 1647 if (!iob) 1648 return iob; 1649 1650 hdr = &__ipa_cmd(iob)->data.sbp.hdr; 1651 hdr->cmdlength = sizeof(*hdr) + data_length; 1652 hdr->command_code = sbp_cmd; 1653 hdr->used_total = 1; 1654 hdr->seq_no = 1; 1655 return iob; 1656 } 1657 1658 static int qeth_bridgeport_query_support_cb(struct qeth_card *card, 1659 struct qeth_reply *reply, unsigned long data) 1660 { 1661 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1662 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; 1663 int rc; 1664 1665 QETH_CARD_TEXT(card, 2, "brqsupcb"); 1666 rc = qeth_bridgeport_makerc(card, cmd); 1667 if (rc) 1668 return rc; 1669 1670 cbctl->data.supported = 1671 cmd->data.sbp.data.query_cmds_supp.supported_cmds; 1672 return 0; 1673 } 1674 1675 /** 1676 * qeth_bridgeport_query_support() - store bitmask of supported subfunctions. 1677 * @card: qeth_card structure pointer. 1678 * 1679 * Sets bitmask of supported setbridgeport subfunctions in the qeth_card 1680 * strucutre: card->options.sbp.supported_funcs. 1681 */ 1682 static void qeth_bridgeport_query_support(struct qeth_card *card) 1683 { 1684 struct qeth_cmd_buffer *iob; 1685 struct _qeth_sbp_cbctl cbctl; 1686 1687 QETH_CARD_TEXT(card, 2, "brqsuppo"); 1688 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED, 1689 SBP_DATA_SIZEOF(query_cmds_supp)); 1690 if (!iob) 1691 return; 1692 1693 if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, 1694 &cbctl)) { 1695 card->options.sbp.role = QETH_SBP_ROLE_NONE; 1696 card->options.sbp.supported_funcs = 0; 1697 return; 1698 } 1699 card->options.sbp.supported_funcs = cbctl.data.supported; 1700 } 1701 1702 static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, 1703 struct qeth_reply *reply, unsigned long data) 1704 { 1705 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1706 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; 1707 struct qeth_sbp_port_data *qports; 1708 int rc; 1709 1710 QETH_CARD_TEXT(card, 2, "brqprtcb"); 1711 rc = qeth_bridgeport_makerc(card, cmd); 1712 if (rc) 1713 return rc; 1714 1715 qports = &cmd->data.sbp.data.port_data; 1716 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { 1717 QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); 1718 return -EINVAL; 1719 } 1720 /* first entry contains the state of the local port */ 1721 if (qports->num_entries > 0) { 1722 if (cbctl->data.qports.role) 1723 *cbctl->data.qports.role = qports->entry[0].role; 1724 if (cbctl->data.qports.state) 1725 *cbctl->data.qports.state = qports->entry[0].state; 1726 } 1727 return 0; 1728 } 1729 1730 /** 1731 * qeth_bridgeport_query_ports() - query local bridgeport status. 1732 * @card: qeth_card structure pointer. 1733 * @role: Role of the port: 0-none, 1-primary, 2-secondary. 1734 * @state: State of the port: 0-inactive, 1-standby, 2-active. 1735 * 1736 * Returns negative errno-compatible error indication or 0 on success. 1737 * 1738 * 'role' and 'state' are not updated in case of hardware operation failure. 1739 */ 1740 int qeth_bridgeport_query_ports(struct qeth_card *card, 1741 enum qeth_sbp_roles *role, enum qeth_sbp_states *state) 1742 { 1743 struct qeth_cmd_buffer *iob; 1744 struct _qeth_sbp_cbctl cbctl = { 1745 .data = { 1746 .qports = { 1747 .role = role, 1748 .state = state, 1749 }, 1750 }, 1751 }; 1752 1753 QETH_CARD_TEXT(card, 2, "brqports"); 1754 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) 1755 return -EOPNOTSUPP; 1756 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); 1757 if (!iob) 1758 return -ENOMEM; 1759 1760 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, 1761 &cbctl); 1762 } 1763 1764 static int qeth_bridgeport_set_cb(struct qeth_card *card, 1765 struct qeth_reply *reply, unsigned long data) 1766 { 1767 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 1768 1769 QETH_CARD_TEXT(card, 2, "brsetrcb"); 1770 return qeth_bridgeport_makerc(card, cmd); 1771 } 1772 1773 /** 1774 * qeth_bridgeport_setrole() - Assign primary role to the port. 1775 * @card: qeth_card structure pointer. 1776 * @role: Role to assign. 1777 * 1778 * Returns negative errno-compatible error indication or 0 on success. 1779 */ 1780 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) 1781 { 1782 struct qeth_cmd_buffer *iob; 1783 enum qeth_ipa_sbp_cmd setcmd; 1784 unsigned int cmdlength = 0; 1785 1786 QETH_CARD_TEXT(card, 2, "brsetrol"); 1787 switch (role) { 1788 case QETH_SBP_ROLE_NONE: 1789 setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE; 1790 break; 1791 case QETH_SBP_ROLE_PRIMARY: 1792 setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT; 1793 cmdlength = SBP_DATA_SIZEOF(set_primary); 1794 break; 1795 case QETH_SBP_ROLE_SECONDARY: 1796 setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT; 1797 break; 1798 default: 1799 return -EINVAL; 1800 } 1801 if (!(card->options.sbp.supported_funcs & setcmd)) 1802 return -EOPNOTSUPP; 1803 iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); 1804 if (!iob) 1805 return -ENOMEM; 1806 1807 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL); 1808 } 1809 1810 static void qeth_bridgeport_an_set_cb(void *priv, 1811 struct chsc_pnso_naid_l2 *entry) 1812 { 1813 struct qeth_card *card = (struct qeth_card *)priv; 1814 u8 code; 1815 1816 code = IPA_ADDR_CHANGE_CODE_MACADDR; 1817 if (entry->addr_lnid.lnid < VLAN_N_VID) 1818 code |= IPA_ADDR_CHANGE_CODE_VLANID; 1819 qeth_bridge_emit_host_event(card, anev_reg_unreg, code, 1820 (struct net_if_token *)&entry->nit, 1821 (struct mac_addr_lnid *)&entry->addr_lnid); 1822 } 1823 1824 /** 1825 * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification 1826 * @card: qeth_card structure pointer. 1827 * @enable: 0 - disable, non-zero - enable notifications 1828 * 1829 * Returns negative errno-compatible error indication or 0 on success. 1830 * 1831 * On enable, emits a series of address notifications udev events for all 1832 * currently registered hosts. 1833 */ 1834 int qeth_bridgeport_an_set(struct qeth_card *card, int enable) 1835 { 1836 int rc; 1837 1838 if (!card->options.sbp.supported_funcs) 1839 return -EOPNOTSUPP; 1840 1841 if (enable) { 1842 qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); 1843 qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT); 1844 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1, 1845 qeth_bridgeport_an_set_cb, card); 1846 if (rc) 1847 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); 1848 } else { 1849 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL); 1850 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); 1851 } 1852 return rc; 1853 } 1854 1855 /* VNIC Characteristics support */ 1856 1857 /* handle VNICC IPA command return codes; convert to error codes */ 1858 static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc) 1859 { 1860 int rc; 1861 1862 switch (ipa_rc) { 1863 case IPA_RC_SUCCESS: 1864 return ipa_rc; 1865 case IPA_RC_L2_UNSUPPORTED_CMD: 1866 case IPA_RC_NOTSUPP: 1867 rc = -EOPNOTSUPP; 1868 break; 1869 case IPA_RC_VNICC_OOSEQ: 1870 rc = -EALREADY; 1871 break; 1872 case IPA_RC_VNICC_VNICBP: 1873 rc = -EBUSY; 1874 break; 1875 case IPA_RC_L2_ADDR_TABLE_FULL: 1876 rc = -ENOSPC; 1877 break; 1878 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 1879 rc = -EACCES; 1880 break; 1881 default: 1882 rc = -EIO; 1883 } 1884 1885 QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc); 1886 return rc; 1887 } 1888 1889 /* generic VNICC request call back */ 1890 static int qeth_l2_vnicc_request_cb(struct qeth_card *card, 1891 struct qeth_reply *reply, 1892 unsigned long data) 1893 { 1894 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1895 struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; 1896 u32 sub_cmd = cmd->data.vnicc.hdr.sub_command; 1897 1898 QETH_CARD_TEXT(card, 2, "vniccrcb"); 1899 if (cmd->hdr.return_code) 1900 return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code); 1901 /* return results to caller */ 1902 card->options.vnicc.sup_chars = rep->vnicc_cmds.supported; 1903 card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled; 1904 1905 if (sub_cmd == IPA_VNICC_QUERY_CMDS) 1906 *(u32 *)reply->param = rep->data.query_cmds.sup_cmds; 1907 else if (sub_cmd == IPA_VNICC_GET_TIMEOUT) 1908 *(u32 *)reply->param = rep->data.getset_timeout.timeout; 1909 1910 return 0; 1911 } 1912 1913 static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card, 1914 u32 vnicc_cmd, 1915 unsigned int data_length) 1916 { 1917 struct qeth_ipacmd_vnicc_hdr *hdr; 1918 struct qeth_cmd_buffer *iob; 1919 1920 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE, 1921 data_length + 1922 offsetof(struct qeth_ipacmd_vnicc, data)); 1923 if (!iob) 1924 return NULL; 1925 1926 hdr = &__ipa_cmd(iob)->data.vnicc.hdr; 1927 hdr->data_length = sizeof(*hdr) + data_length; 1928 hdr->sub_command = vnicc_cmd; 1929 return iob; 1930 } 1931 1932 /* VNICC query VNIC characteristics request */ 1933 static int qeth_l2_vnicc_query_chars(struct qeth_card *card) 1934 { 1935 struct qeth_cmd_buffer *iob; 1936 1937 QETH_CARD_TEXT(card, 2, "vniccqch"); 1938 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0); 1939 if (!iob) 1940 return -ENOMEM; 1941 1942 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); 1943 } 1944 1945 /* VNICC query sub commands request */ 1946 static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, 1947 u32 *sup_cmds) 1948 { 1949 struct qeth_cmd_buffer *iob; 1950 1951 QETH_CARD_TEXT(card, 2, "vniccqcm"); 1952 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS, 1953 VNICC_DATA_SIZEOF(query_cmds)); 1954 if (!iob) 1955 return -ENOMEM; 1956 1957 __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char; 1958 1959 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds); 1960 } 1961 1962 /* VNICC enable/disable characteristic request */ 1963 static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, 1964 u32 cmd) 1965 { 1966 struct qeth_cmd_buffer *iob; 1967 1968 QETH_CARD_TEXT(card, 2, "vniccedc"); 1969 iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char)); 1970 if (!iob) 1971 return -ENOMEM; 1972 1973 __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char; 1974 1975 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); 1976 } 1977 1978 /* VNICC get/set timeout for characteristic request */ 1979 static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc, 1980 u32 cmd, u32 *timeout) 1981 { 1982 struct qeth_vnicc_getset_timeout *getset_timeout; 1983 struct qeth_cmd_buffer *iob; 1984 1985 QETH_CARD_TEXT(card, 2, "vniccgst"); 1986 iob = qeth_l2_vnicc_build_cmd(card, cmd, 1987 VNICC_DATA_SIZEOF(getset_timeout)); 1988 if (!iob) 1989 return -ENOMEM; 1990 1991 getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout; 1992 getset_timeout->vnic_char = vnicc; 1993 1994 if (cmd == IPA_VNICC_SET_TIMEOUT) 1995 getset_timeout->timeout = *timeout; 1996 1997 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout); 1998 } 1999 2000 /* recover user timeout setting */ 2001 static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, 2002 u32 *timeout) 2003 { 2004 if (card->options.vnicc.sup_chars & vnicc && 2005 card->options.vnicc.getset_timeout_sup & vnicc && 2006 !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT, 2007 timeout)) 2008 return false; 2009 *timeout = QETH_VNICC_DEFAULT_TIMEOUT; 2010 return true; 2011 } 2012 2013 /* set current VNICC flag state; called from sysfs store function */ 2014 int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state) 2015 { 2016 int rc = 0; 2017 u32 cmd; 2018 2019 QETH_CARD_TEXT(card, 2, "vniccsch"); 2020 2021 /* check if characteristic and enable/disable are supported */ 2022 if (!(card->options.vnicc.sup_chars & vnicc) || 2023 !(card->options.vnicc.set_char_sup & vnicc)) 2024 return -EOPNOTSUPP; 2025 2026 if (qeth_bridgeport_is_in_use(card)) 2027 return -EBUSY; 2028 2029 /* set enable/disable command and store wanted characteristic */ 2030 if (state) { 2031 cmd = IPA_VNICC_ENABLE; 2032 card->options.vnicc.wanted_chars |= vnicc; 2033 } else { 2034 cmd = IPA_VNICC_DISABLE; 2035 card->options.vnicc.wanted_chars &= ~vnicc; 2036 } 2037 2038 /* do we need to do anything? */ 2039 if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars) 2040 return rc; 2041 2042 /* if card is not ready, simply stop here */ 2043 if (!qeth_card_hw_is_reachable(card)) { 2044 if (state) 2045 card->options.vnicc.cur_chars |= vnicc; 2046 else 2047 card->options.vnicc.cur_chars &= ~vnicc; 2048 return rc; 2049 } 2050 2051 rc = qeth_l2_vnicc_set_char(card, vnicc, cmd); 2052 if (rc) 2053 card->options.vnicc.wanted_chars = 2054 card->options.vnicc.cur_chars; 2055 else { 2056 /* successful online VNICC change; handle special cases */ 2057 if (state && vnicc == QETH_VNICC_RX_BCAST) 2058 card->options.vnicc.rx_bcast_enabled = true; 2059 if (!state && vnicc == QETH_VNICC_LEARNING) 2060 qeth_l2_vnicc_recover_timeout(card, vnicc, 2061 &card->options.vnicc.learning_timeout); 2062 } 2063 2064 return rc; 2065 } 2066 2067 /* get current VNICC flag state; called from sysfs show function */ 2068 int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state) 2069 { 2070 int rc = 0; 2071 2072 QETH_CARD_TEXT(card, 2, "vniccgch"); 2073 2074 /* check if characteristic is supported */ 2075 if (!(card->options.vnicc.sup_chars & vnicc)) 2076 return -EOPNOTSUPP; 2077 2078 if (qeth_bridgeport_is_in_use(card)) 2079 return -EBUSY; 2080 2081 /* if card is ready, query current VNICC state */ 2082 if (qeth_card_hw_is_reachable(card)) 2083 rc = qeth_l2_vnicc_query_chars(card); 2084 2085 *state = (card->options.vnicc.cur_chars & vnicc) ? true : false; 2086 return rc; 2087 } 2088 2089 /* set VNICC timeout; called from sysfs store function. Currently, only learning 2090 * supports timeout 2091 */ 2092 int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout) 2093 { 2094 int rc = 0; 2095 2096 QETH_CARD_TEXT(card, 2, "vniccsto"); 2097 2098 /* check if characteristic and set_timeout are supported */ 2099 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || 2100 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) 2101 return -EOPNOTSUPP; 2102 2103 if (qeth_bridgeport_is_in_use(card)) 2104 return -EBUSY; 2105 2106 /* do we need to do anything? */ 2107 if (card->options.vnicc.learning_timeout == timeout) 2108 return rc; 2109 2110 /* if card is not ready, simply store the value internally and return */ 2111 if (!qeth_card_hw_is_reachable(card)) { 2112 card->options.vnicc.learning_timeout = timeout; 2113 return rc; 2114 } 2115 2116 /* send timeout value to card; if successful, store value internally */ 2117 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, 2118 IPA_VNICC_SET_TIMEOUT, &timeout); 2119 if (!rc) 2120 card->options.vnicc.learning_timeout = timeout; 2121 2122 return rc; 2123 } 2124 2125 /* get current VNICC timeout; called from sysfs show function. Currently, only 2126 * learning supports timeout 2127 */ 2128 int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) 2129 { 2130 int rc = 0; 2131 2132 QETH_CARD_TEXT(card, 2, "vniccgto"); 2133 2134 /* check if characteristic and get_timeout are supported */ 2135 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || 2136 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) 2137 return -EOPNOTSUPP; 2138 2139 if (qeth_bridgeport_is_in_use(card)) 2140 return -EBUSY; 2141 2142 /* if card is ready, get timeout. Otherwise, just return stored value */ 2143 *timeout = card->options.vnicc.learning_timeout; 2144 if (qeth_card_hw_is_reachable(card)) 2145 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, 2146 IPA_VNICC_GET_TIMEOUT, 2147 timeout); 2148 2149 return rc; 2150 } 2151 2152 /* check if VNICC is currently enabled */ 2153 static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card) 2154 { 2155 if (!card->options.vnicc.sup_chars) 2156 return false; 2157 /* default values are only OK if rx_bcast was not enabled by user 2158 * or the card is offline. 2159 */ 2160 if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) { 2161 if (!card->options.vnicc.rx_bcast_enabled || 2162 !qeth_card_hw_is_reachable(card)) 2163 return false; 2164 } 2165 return true; 2166 } 2167 2168 /** 2169 * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed? 2170 * @card: qeth_card structure pointer 2171 * 2172 * qeth_bridgeport functionality is mutually exclusive with usage of the 2173 * VNIC Characteristics and dev2br address notifications 2174 */ 2175 bool qeth_bridgeport_allowed(struct qeth_card *card) 2176 { 2177 struct qeth_priv *priv = netdev_priv(card->dev); 2178 2179 return (!_qeth_l2_vnicc_is_in_use(card) && 2180 !(priv->brport_features & BR_LEARNING_SYNC)); 2181 } 2182 2183 /* recover user characteristic setting */ 2184 static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc, 2185 bool enable) 2186 { 2187 u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE; 2188 2189 if (card->options.vnicc.sup_chars & vnicc && 2190 card->options.vnicc.set_char_sup & vnicc && 2191 !qeth_l2_vnicc_set_char(card, vnicc, cmd)) 2192 return false; 2193 card->options.vnicc.wanted_chars &= ~vnicc; 2194 card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc; 2195 return true; 2196 } 2197 2198 /* (re-)initialize VNICC */ 2199 static void qeth_l2_vnicc_init(struct qeth_card *card) 2200 { 2201 u32 *timeout = &card->options.vnicc.learning_timeout; 2202 bool enable, error = false; 2203 unsigned int chars_len, i; 2204 unsigned long chars_tmp; 2205 u32 sup_cmds, vnicc; 2206 2207 QETH_CARD_TEXT(card, 2, "vniccini"); 2208 /* reset rx_bcast */ 2209 card->options.vnicc.rx_bcast_enabled = 0; 2210 /* initial query and storage of VNIC characteristics */ 2211 if (qeth_l2_vnicc_query_chars(card)) { 2212 if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT || 2213 *timeout != QETH_VNICC_DEFAULT_TIMEOUT) 2214 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); 2215 /* fail quietly if user didn't change the default config */ 2216 card->options.vnicc.sup_chars = 0; 2217 card->options.vnicc.cur_chars = 0; 2218 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; 2219 return; 2220 } 2221 /* get supported commands for each supported characteristic */ 2222 chars_tmp = card->options.vnicc.sup_chars; 2223 chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE; 2224 for_each_set_bit(i, &chars_tmp, chars_len) { 2225 vnicc = BIT(i); 2226 if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) { 2227 sup_cmds = 0; 2228 error = true; 2229 } 2230 if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) && 2231 (sup_cmds & IPA_VNICC_GET_TIMEOUT)) 2232 card->options.vnicc.getset_timeout_sup |= vnicc; 2233 else 2234 card->options.vnicc.getset_timeout_sup &= ~vnicc; 2235 if ((sup_cmds & IPA_VNICC_ENABLE) && 2236 (sup_cmds & IPA_VNICC_DISABLE)) 2237 card->options.vnicc.set_char_sup |= vnicc; 2238 else 2239 card->options.vnicc.set_char_sup &= ~vnicc; 2240 } 2241 /* enforce assumed default values and recover settings, if changed */ 2242 error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, 2243 timeout); 2244 /* Change chars, if necessary */ 2245 chars_tmp = card->options.vnicc.wanted_chars ^ 2246 card->options.vnicc.cur_chars; 2247 chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; 2248 for_each_set_bit(i, &chars_tmp, chars_len) { 2249 vnicc = BIT(i); 2250 enable = card->options.vnicc.wanted_chars & vnicc; 2251 error |= qeth_l2_vnicc_recover_char(card, vnicc, enable); 2252 } 2253 if (error) 2254 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); 2255 } 2256 2257 /* configure default values of VNIC characteristics */ 2258 static void qeth_l2_vnicc_set_defaults(struct qeth_card *card) 2259 { 2260 /* characteristics values */ 2261 card->options.vnicc.sup_chars = QETH_VNICC_ALL; 2262 card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT; 2263 card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT; 2264 /* supported commands */ 2265 card->options.vnicc.set_char_sup = QETH_VNICC_ALL; 2266 card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING; 2267 /* settings wanted by users */ 2268 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; 2269 } 2270 2271 static const struct device_type qeth_l2_devtype = { 2272 .name = "qeth_layer2", 2273 .groups = qeth_l2_attr_groups, 2274 }; 2275 2276 static int qeth_l2_probe_device(struct ccwgroup_device *gdev) 2277 { 2278 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2279 int rc; 2280 2281 qeth_l2_vnicc_set_defaults(card); 2282 mutex_init(&card->sbp_lock); 2283 2284 if (gdev->dev.type) { 2285 rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups); 2286 if (rc) 2287 return rc; 2288 } else { 2289 gdev->dev.type = &qeth_l2_devtype; 2290 } 2291 2292 INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work); 2293 return 0; 2294 } 2295 2296 static void qeth_l2_remove_device(struct ccwgroup_device *gdev) 2297 { 2298 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2299 struct qeth_priv *priv; 2300 2301 if (gdev->dev.type != &qeth_l2_devtype) 2302 device_remove_groups(&gdev->dev, qeth_l2_attr_groups); 2303 2304 qeth_set_allowed_threads(card, 0, 1); 2305 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2306 2307 if (gdev->state == CCWGROUP_ONLINE) 2308 qeth_set_offline(card, card->discipline, false); 2309 2310 if (card->dev->reg_state == NETREG_REGISTERED) { 2311 priv = netdev_priv(card->dev); 2312 if (priv->brport_features & BR_LEARNING_SYNC) { 2313 rtnl_lock(); 2314 qeth_l2_br2dev_put(); 2315 rtnl_unlock(); 2316 } 2317 unregister_netdev(card->dev); 2318 } 2319 } 2320 2321 static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok) 2322 { 2323 struct net_device *dev = card->dev; 2324 int rc = 0; 2325 2326 qeth_l2_detect_dev2br_support(card); 2327 2328 mutex_lock(&card->sbp_lock); 2329 qeth_bridgeport_query_support(card); 2330 if (card->options.sbp.supported_funcs) { 2331 qeth_l2_setup_bridgeport_attrs(card); 2332 dev_info(&card->gdev->dev, 2333 "The device represents a Bridge Capable Port\n"); 2334 } 2335 mutex_unlock(&card->sbp_lock); 2336 2337 qeth_l2_register_dev_addr(card); 2338 2339 /* for the rx_bcast characteristic, init VNICC after setmac */ 2340 qeth_l2_vnicc_init(card); 2341 2342 qeth_l2_trace_features(card); 2343 2344 /* softsetup */ 2345 QETH_CARD_TEXT(card, 2, "softsetp"); 2346 2347 card->state = CARD_STATE_SOFTSETUP; 2348 2349 qeth_set_allowed_threads(card, 0xffffffff, 0); 2350 2351 if (dev->reg_state != NETREG_REGISTERED) { 2352 rc = qeth_l2_setup_netdev(card); 2353 if (rc) 2354 goto err_setup; 2355 2356 if (carrier_ok) 2357 netif_carrier_on(dev); 2358 } else { 2359 rtnl_lock(); 2360 rc = qeth_set_real_num_tx_queues(card, 2361 qeth_tx_actual_queues(card)); 2362 if (rc) { 2363 rtnl_unlock(); 2364 goto err_set_queues; 2365 } 2366 2367 if (carrier_ok) 2368 netif_carrier_on(dev); 2369 else 2370 netif_carrier_off(dev); 2371 2372 netif_device_attach(dev); 2373 qeth_enable_hw_features(dev); 2374 qeth_l2_enable_brport_features(card); 2375 2376 if (card->info.open_when_online) { 2377 card->info.open_when_online = 0; 2378 dev_open(dev, NULL); 2379 } 2380 rtnl_unlock(); 2381 } 2382 return 0; 2383 2384 err_set_queues: 2385 err_setup: 2386 qeth_set_allowed_threads(card, 0, 1); 2387 card->state = CARD_STATE_DOWN; 2388 return rc; 2389 } 2390 2391 static void qeth_l2_set_offline(struct qeth_card *card) 2392 { 2393 struct qeth_priv *priv = netdev_priv(card->dev); 2394 2395 qeth_set_allowed_threads(card, 0, 1); 2396 qeth_l2_drain_rx_mode_cache(card); 2397 2398 if (card->state == CARD_STATE_SOFTSETUP) 2399 card->state = CARD_STATE_DOWN; 2400 2401 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); 2402 if (priv->brport_features & BR_LEARNING_SYNC) 2403 qeth_l2_dev2br_fdb_flush(card); 2404 } 2405 2406 /* Returns zero if the command is successfully "consumed" */ 2407 static int qeth_l2_control_event(struct qeth_card *card, 2408 struct qeth_ipa_cmd *cmd) 2409 { 2410 switch (cmd->hdr.command) { 2411 case IPA_CMD_SETBRIDGEPORT_OSA: 2412 case IPA_CMD_SETBRIDGEPORT_IQD: 2413 if (cmd->data.sbp.hdr.command_code == 2414 IPA_SBP_BRIDGE_PORT_STATE_CHANGE) { 2415 qeth_bridge_state_change(card, cmd); 2416 return 0; 2417 } 2418 2419 return 1; 2420 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 2421 qeth_addr_change_event(card, cmd); 2422 return 0; 2423 default: 2424 return 1; 2425 } 2426 } 2427 2428 const struct qeth_discipline qeth_l2_discipline = { 2429 .setup = qeth_l2_probe_device, 2430 .remove = qeth_l2_remove_device, 2431 .set_online = qeth_l2_set_online, 2432 .set_offline = qeth_l2_set_offline, 2433 .control_event_handler = qeth_l2_control_event, 2434 }; 2435 EXPORT_SYMBOL_GPL(qeth_l2_discipline); 2436 2437 static int __init qeth_l2_init(void) 2438 { 2439 pr_info("register layer 2 discipline\n"); 2440 refcount_set(&qeth_l2_switchdev_notify_refcnt, 0); 2441 return 0; 2442 } 2443 2444 static void __exit qeth_l2_exit(void) 2445 { 2446 pr_info("unregister layer 2 discipline\n"); 2447 } 2448 2449 module_init(qeth_l2_init); 2450 module_exit(qeth_l2_exit); 2451 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 2452 MODULE_DESCRIPTION("qeth layer 2 discipline"); 2453 MODULE_LICENSE("GPL"); 2454