1 /* 2 * Copyright Gavin Shan, IBM Corporation 2016. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 16 #include <net/ncsi.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <net/addrconf.h> 20 #include <net/ipv6.h> 21 #include <net/if_inet6.h> 22 #include <net/genetlink.h> 23 24 #include "internal.h" 25 #include "ncsi-pkt.h" 26 #include "ncsi-netlink.h" 27 28 LIST_HEAD(ncsi_dev_list); 29 DEFINE_SPINLOCK(ncsi_dev_lock); 30 31 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down) 32 { 33 struct ncsi_dev *nd = &ndp->ndev; 34 struct ncsi_package *np; 35 struct ncsi_channel *nc; 36 unsigned long flags; 37 38 nd->state = ncsi_dev_state_functional; 39 if (force_down) { 40 nd->link_up = 0; 41 goto report; 42 } 43 44 nd->link_up = 0; 45 NCSI_FOR_EACH_PACKAGE(ndp, np) { 46 NCSI_FOR_EACH_CHANNEL(np, nc) { 47 spin_lock_irqsave(&nc->lock, flags); 48 49 if (!list_empty(&nc->link) || 50 nc->state != NCSI_CHANNEL_ACTIVE) { 51 spin_unlock_irqrestore(&nc->lock, flags); 52 continue; 53 } 54 55 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 56 spin_unlock_irqrestore(&nc->lock, flags); 57 nd->link_up = 1; 58 goto report; 59 } 60 61 spin_unlock_irqrestore(&nc->lock, flags); 62 } 63 } 64 65 report: 66 nd->handler(nd); 67 } 68 69 static void ncsi_channel_monitor(struct timer_list *t) 70 { 71 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer); 72 struct ncsi_package *np = nc->package; 73 struct ncsi_dev_priv *ndp = np->ndp; 74 struct ncsi_channel_mode *ncm; 75 struct ncsi_cmd_arg nca; 76 bool enabled, chained; 77 unsigned int monitor_state; 78 unsigned long flags; 79 int state, ret; 80 81 spin_lock_irqsave(&nc->lock, flags); 82 state = nc->state; 83 chained = !list_empty(&nc->link); 84 enabled = nc->monitor.enabled; 85 monitor_state = nc->monitor.state; 86 spin_unlock_irqrestore(&nc->lock, flags); 87 88 if (!enabled || chained) { 89 ncsi_stop_channel_monitor(nc); 90 return; 91 } 92 if (state != NCSI_CHANNEL_INACTIVE && 93 state != NCSI_CHANNEL_ACTIVE) { 94 ncsi_stop_channel_monitor(nc); 95 return; 96 } 97 98 switch (monitor_state) { 99 case NCSI_CHANNEL_MONITOR_START: 100 case NCSI_CHANNEL_MONITOR_RETRY: 101 nca.ndp = ndp; 102 nca.package = np->id; 103 nca.channel = nc->id; 104 nca.type = NCSI_PKT_CMD_GLS; 105 nca.req_flags = 0; 106 ret = ncsi_xmit_cmd(&nca); 107 if (ret) 108 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 109 ret); 110 break; 111 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 112 break; 113 default: 114 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n", 115 nc->id); 116 if (!(ndp->flags & NCSI_DEV_HWA)) { 117 ncsi_report_link(ndp, true); 118 ndp->flags |= NCSI_DEV_RESHUFFLE; 119 } 120 121 ncsi_stop_channel_monitor(nc); 122 123 ncm = &nc->modes[NCSI_MODE_LINK]; 124 spin_lock_irqsave(&nc->lock, flags); 125 nc->state = NCSI_CHANNEL_INVISIBLE; 126 ncm->data[2] &= ~0x1; 127 spin_unlock_irqrestore(&nc->lock, flags); 128 129 spin_lock_irqsave(&ndp->lock, flags); 130 nc->state = NCSI_CHANNEL_ACTIVE; 131 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 132 spin_unlock_irqrestore(&ndp->lock, flags); 133 ncsi_process_next_channel(ndp); 134 return; 135 } 136 137 spin_lock_irqsave(&nc->lock, flags); 138 nc->monitor.state++; 139 spin_unlock_irqrestore(&nc->lock, flags); 140 mod_timer(&nc->monitor.timer, jiffies + HZ); 141 } 142 143 void ncsi_start_channel_monitor(struct ncsi_channel *nc) 144 { 145 unsigned long flags; 146 147 spin_lock_irqsave(&nc->lock, flags); 148 WARN_ON_ONCE(nc->monitor.enabled); 149 nc->monitor.enabled = true; 150 nc->monitor.state = NCSI_CHANNEL_MONITOR_START; 151 spin_unlock_irqrestore(&nc->lock, flags); 152 153 mod_timer(&nc->monitor.timer, jiffies + HZ); 154 } 155 156 void ncsi_stop_channel_monitor(struct ncsi_channel *nc) 157 { 158 unsigned long flags; 159 160 spin_lock_irqsave(&nc->lock, flags); 161 if (!nc->monitor.enabled) { 162 spin_unlock_irqrestore(&nc->lock, flags); 163 return; 164 } 165 nc->monitor.enabled = false; 166 spin_unlock_irqrestore(&nc->lock, flags); 167 168 del_timer_sync(&nc->monitor.timer); 169 } 170 171 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np, 172 unsigned char id) 173 { 174 struct ncsi_channel *nc; 175 176 NCSI_FOR_EACH_CHANNEL(np, nc) { 177 if (nc->id == id) 178 return nc; 179 } 180 181 return NULL; 182 } 183 184 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id) 185 { 186 struct ncsi_channel *nc, *tmp; 187 int index; 188 unsigned long flags; 189 190 nc = kzalloc(sizeof(*nc), GFP_ATOMIC); 191 if (!nc) 192 return NULL; 193 194 nc->id = id; 195 nc->package = np; 196 nc->state = NCSI_CHANNEL_INACTIVE; 197 nc->monitor.enabled = false; 198 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0); 199 spin_lock_init(&nc->lock); 200 INIT_LIST_HEAD(&nc->link); 201 for (index = 0; index < NCSI_CAP_MAX; index++) 202 nc->caps[index].index = index; 203 for (index = 0; index < NCSI_MODE_MAX; index++) 204 nc->modes[index].index = index; 205 206 spin_lock_irqsave(&np->lock, flags); 207 tmp = ncsi_find_channel(np, id); 208 if (tmp) { 209 spin_unlock_irqrestore(&np->lock, flags); 210 kfree(nc); 211 return tmp; 212 } 213 214 list_add_tail_rcu(&nc->node, &np->channels); 215 np->channel_num++; 216 spin_unlock_irqrestore(&np->lock, flags); 217 218 return nc; 219 } 220 221 static void ncsi_remove_channel(struct ncsi_channel *nc) 222 { 223 struct ncsi_package *np = nc->package; 224 unsigned long flags; 225 226 spin_lock_irqsave(&nc->lock, flags); 227 228 /* Release filters */ 229 kfree(nc->mac_filter.addrs); 230 kfree(nc->vlan_filter.vids); 231 232 nc->state = NCSI_CHANNEL_INACTIVE; 233 spin_unlock_irqrestore(&nc->lock, flags); 234 ncsi_stop_channel_monitor(nc); 235 236 /* Remove and free channel */ 237 spin_lock_irqsave(&np->lock, flags); 238 list_del_rcu(&nc->node); 239 np->channel_num--; 240 spin_unlock_irqrestore(&np->lock, flags); 241 242 kfree(nc); 243 } 244 245 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp, 246 unsigned char id) 247 { 248 struct ncsi_package *np; 249 250 NCSI_FOR_EACH_PACKAGE(ndp, np) { 251 if (np->id == id) 252 return np; 253 } 254 255 return NULL; 256 } 257 258 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp, 259 unsigned char id) 260 { 261 struct ncsi_package *np, *tmp; 262 unsigned long flags; 263 264 np = kzalloc(sizeof(*np), GFP_ATOMIC); 265 if (!np) 266 return NULL; 267 268 np->id = id; 269 np->ndp = ndp; 270 spin_lock_init(&np->lock); 271 INIT_LIST_HEAD(&np->channels); 272 273 spin_lock_irqsave(&ndp->lock, flags); 274 tmp = ncsi_find_package(ndp, id); 275 if (tmp) { 276 spin_unlock_irqrestore(&ndp->lock, flags); 277 kfree(np); 278 return tmp; 279 } 280 281 list_add_tail_rcu(&np->node, &ndp->packages); 282 ndp->package_num++; 283 spin_unlock_irqrestore(&ndp->lock, flags); 284 285 return np; 286 } 287 288 void ncsi_remove_package(struct ncsi_package *np) 289 { 290 struct ncsi_dev_priv *ndp = np->ndp; 291 struct ncsi_channel *nc, *tmp; 292 unsigned long flags; 293 294 /* Release all child channels */ 295 list_for_each_entry_safe(nc, tmp, &np->channels, node) 296 ncsi_remove_channel(nc); 297 298 /* Remove and free package */ 299 spin_lock_irqsave(&ndp->lock, flags); 300 list_del_rcu(&np->node); 301 ndp->package_num--; 302 spin_unlock_irqrestore(&ndp->lock, flags); 303 304 kfree(np); 305 } 306 307 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp, 308 unsigned char id, 309 struct ncsi_package **np, 310 struct ncsi_channel **nc) 311 { 312 struct ncsi_package *p; 313 struct ncsi_channel *c; 314 315 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id)); 316 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL; 317 318 if (np) 319 *np = p; 320 if (nc) 321 *nc = c; 322 } 323 324 /* For two consecutive NCSI commands, the packet IDs shouldn't 325 * be same. Otherwise, the bogus response might be replied. So 326 * the available IDs are allocated in round-robin fashion. 327 */ 328 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, 329 unsigned int req_flags) 330 { 331 struct ncsi_request *nr = NULL; 332 int i, limit = ARRAY_SIZE(ndp->requests); 333 unsigned long flags; 334 335 /* Check if there is one available request until the ceiling */ 336 spin_lock_irqsave(&ndp->lock, flags); 337 for (i = ndp->request_id; i < limit; i++) { 338 if (ndp->requests[i].used) 339 continue; 340 341 nr = &ndp->requests[i]; 342 nr->used = true; 343 nr->flags = req_flags; 344 ndp->request_id = i + 1; 345 goto found; 346 } 347 348 /* Fail back to check from the starting cursor */ 349 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) { 350 if (ndp->requests[i].used) 351 continue; 352 353 nr = &ndp->requests[i]; 354 nr->used = true; 355 nr->flags = req_flags; 356 ndp->request_id = i + 1; 357 goto found; 358 } 359 360 found: 361 spin_unlock_irqrestore(&ndp->lock, flags); 362 return nr; 363 } 364 365 void ncsi_free_request(struct ncsi_request *nr) 366 { 367 struct ncsi_dev_priv *ndp = nr->ndp; 368 struct sk_buff *cmd, *rsp; 369 unsigned long flags; 370 bool driven; 371 372 if (nr->enabled) { 373 nr->enabled = false; 374 del_timer_sync(&nr->timer); 375 } 376 377 spin_lock_irqsave(&ndp->lock, flags); 378 cmd = nr->cmd; 379 rsp = nr->rsp; 380 nr->cmd = NULL; 381 nr->rsp = NULL; 382 nr->used = false; 383 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN); 384 spin_unlock_irqrestore(&ndp->lock, flags); 385 386 if (driven && cmd && --ndp->pending_req_num == 0) 387 schedule_work(&ndp->work); 388 389 /* Release command and response */ 390 consume_skb(cmd); 391 consume_skb(rsp); 392 } 393 394 struct ncsi_dev *ncsi_find_dev(struct net_device *dev) 395 { 396 struct ncsi_dev_priv *ndp; 397 398 NCSI_FOR_EACH_DEV(ndp) { 399 if (ndp->ndev.dev == dev) 400 return &ndp->ndev; 401 } 402 403 return NULL; 404 } 405 406 static void ncsi_request_timeout(struct timer_list *t) 407 { 408 struct ncsi_request *nr = from_timer(nr, t, timer); 409 struct ncsi_dev_priv *ndp = nr->ndp; 410 struct ncsi_cmd_pkt *cmd; 411 struct ncsi_package *np; 412 struct ncsi_channel *nc; 413 unsigned long flags; 414 415 /* If the request already had associated response, 416 * let the response handler to release it. 417 */ 418 spin_lock_irqsave(&ndp->lock, flags); 419 nr->enabled = false; 420 if (nr->rsp || !nr->cmd) { 421 spin_unlock_irqrestore(&ndp->lock, flags); 422 return; 423 } 424 spin_unlock_irqrestore(&ndp->lock, flags); 425 426 if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) { 427 if (nr->cmd) { 428 /* Find the package */ 429 cmd = (struct ncsi_cmd_pkt *) 430 skb_network_header(nr->cmd); 431 ncsi_find_package_and_channel(ndp, 432 cmd->cmd.common.channel, 433 &np, &nc); 434 ncsi_send_netlink_timeout(nr, np, nc); 435 } 436 } 437 438 /* Release the request */ 439 ncsi_free_request(nr); 440 } 441 442 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) 443 { 444 struct ncsi_dev *nd = &ndp->ndev; 445 struct ncsi_package *np = ndp->active_package; 446 struct ncsi_channel *nc = ndp->active_channel; 447 struct ncsi_cmd_arg nca; 448 unsigned long flags; 449 int ret; 450 451 nca.ndp = ndp; 452 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 453 switch (nd->state) { 454 case ncsi_dev_state_suspend: 455 nd->state = ncsi_dev_state_suspend_select; 456 /* Fall through */ 457 case ncsi_dev_state_suspend_select: 458 ndp->pending_req_num = 1; 459 460 nca.type = NCSI_PKT_CMD_SP; 461 nca.package = np->id; 462 nca.channel = NCSI_RESERVED_CHANNEL; 463 if (ndp->flags & NCSI_DEV_HWA) 464 nca.bytes[0] = 0; 465 else 466 nca.bytes[0] = 1; 467 468 /* To retrieve the last link states of channels in current 469 * package when current active channel needs fail over to 470 * another one. It means we will possibly select another 471 * channel as next active one. The link states of channels 472 * are most important factor of the selection. So we need 473 * accurate link states. Unfortunately, the link states on 474 * inactive channels can't be updated with LSC AEN in time. 475 */ 476 if (ndp->flags & NCSI_DEV_RESHUFFLE) 477 nd->state = ncsi_dev_state_suspend_gls; 478 else 479 nd->state = ncsi_dev_state_suspend_dcnt; 480 ret = ncsi_xmit_cmd(&nca); 481 if (ret) 482 goto error; 483 484 break; 485 case ncsi_dev_state_suspend_gls: 486 ndp->pending_req_num = np->channel_num; 487 488 nca.type = NCSI_PKT_CMD_GLS; 489 nca.package = np->id; 490 491 nd->state = ncsi_dev_state_suspend_dcnt; 492 NCSI_FOR_EACH_CHANNEL(np, nc) { 493 nca.channel = nc->id; 494 ret = ncsi_xmit_cmd(&nca); 495 if (ret) 496 goto error; 497 } 498 499 break; 500 case ncsi_dev_state_suspend_dcnt: 501 ndp->pending_req_num = 1; 502 503 nca.type = NCSI_PKT_CMD_DCNT; 504 nca.package = np->id; 505 nca.channel = nc->id; 506 507 nd->state = ncsi_dev_state_suspend_dc; 508 ret = ncsi_xmit_cmd(&nca); 509 if (ret) 510 goto error; 511 512 break; 513 case ncsi_dev_state_suspend_dc: 514 ndp->pending_req_num = 1; 515 516 nca.type = NCSI_PKT_CMD_DC; 517 nca.package = np->id; 518 nca.channel = nc->id; 519 nca.bytes[0] = 1; 520 521 nd->state = ncsi_dev_state_suspend_deselect; 522 ret = ncsi_xmit_cmd(&nca); 523 if (ret) 524 goto error; 525 526 break; 527 case ncsi_dev_state_suspend_deselect: 528 ndp->pending_req_num = 1; 529 530 nca.type = NCSI_PKT_CMD_DP; 531 nca.package = np->id; 532 nca.channel = NCSI_RESERVED_CHANNEL; 533 534 nd->state = ncsi_dev_state_suspend_done; 535 ret = ncsi_xmit_cmd(&nca); 536 if (ret) 537 goto error; 538 539 break; 540 case ncsi_dev_state_suspend_done: 541 spin_lock_irqsave(&nc->lock, flags); 542 nc->state = NCSI_CHANNEL_INACTIVE; 543 spin_unlock_irqrestore(&nc->lock, flags); 544 ncsi_process_next_channel(ndp); 545 546 break; 547 default: 548 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", 549 nd->state); 550 } 551 552 return; 553 error: 554 nd->state = ncsi_dev_state_functional; 555 } 556 557 /* Check the VLAN filter bitmap for a set filter, and construct a 558 * "Set VLAN Filter - Disable" packet if found. 559 */ 560 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 561 struct ncsi_cmd_arg *nca) 562 { 563 struct ncsi_channel_vlan_filter *ncf; 564 unsigned long flags; 565 void *bitmap; 566 int index; 567 u16 vid; 568 569 ncf = &nc->vlan_filter; 570 bitmap = &ncf->bitmap; 571 572 spin_lock_irqsave(&nc->lock, flags); 573 index = find_next_bit(bitmap, ncf->n_vids, 0); 574 if (index >= ncf->n_vids) { 575 spin_unlock_irqrestore(&nc->lock, flags); 576 return -1; 577 } 578 vid = ncf->vids[index]; 579 580 clear_bit(index, bitmap); 581 ncf->vids[index] = 0; 582 spin_unlock_irqrestore(&nc->lock, flags); 583 584 nca->type = NCSI_PKT_CMD_SVF; 585 nca->words[1] = vid; 586 /* HW filter index starts at 1 */ 587 nca->bytes[6] = index + 1; 588 nca->bytes[7] = 0x00; 589 return 0; 590 } 591 592 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable" 593 * packet. 594 */ 595 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 596 struct ncsi_cmd_arg *nca) 597 { 598 struct ncsi_channel_vlan_filter *ncf; 599 struct vlan_vid *vlan = NULL; 600 unsigned long flags; 601 int i, index; 602 void *bitmap; 603 u16 vid; 604 605 if (list_empty(&ndp->vlan_vids)) 606 return -1; 607 608 ncf = &nc->vlan_filter; 609 bitmap = &ncf->bitmap; 610 611 spin_lock_irqsave(&nc->lock, flags); 612 613 rcu_read_lock(); 614 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 615 vid = vlan->vid; 616 for (i = 0; i < ncf->n_vids; i++) 617 if (ncf->vids[i] == vid) { 618 vid = 0; 619 break; 620 } 621 if (vid) 622 break; 623 } 624 rcu_read_unlock(); 625 626 if (!vid) { 627 /* No VLAN ID is not set */ 628 spin_unlock_irqrestore(&nc->lock, flags); 629 return -1; 630 } 631 632 index = find_next_zero_bit(bitmap, ncf->n_vids, 0); 633 if (index < 0 || index >= ncf->n_vids) { 634 netdev_err(ndp->ndev.dev, 635 "Channel %u already has all VLAN filters set\n", 636 nc->id); 637 spin_unlock_irqrestore(&nc->lock, flags); 638 return -1; 639 } 640 641 ncf->vids[index] = vid; 642 set_bit(index, bitmap); 643 spin_unlock_irqrestore(&nc->lock, flags); 644 645 nca->type = NCSI_PKT_CMD_SVF; 646 nca->words[1] = vid; 647 /* HW filter index starts at 1 */ 648 nca->bytes[6] = index + 1; 649 nca->bytes[7] = 0x01; 650 651 return 0; 652 } 653 654 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) 655 656 /* NCSI OEM Command APIs */ 657 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca) 658 { 659 unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN]; 660 int ret = 0; 661 662 nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN; 663 664 memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN); 665 *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID); 666 data[5] = NCSI_OEM_BCM_CMD_GMA; 667 668 nca->data = data; 669 670 ret = ncsi_xmit_cmd(nca); 671 if (ret) 672 netdev_err(nca->ndp->ndev.dev, 673 "NCSI: Failed to transmit cmd 0x%x during configure\n", 674 nca->type); 675 return ret; 676 } 677 678 /* OEM Command handlers initialization */ 679 static struct ncsi_oem_gma_handler { 680 unsigned int mfr_id; 681 int (*handler)(struct ncsi_cmd_arg *nca); 682 } ncsi_oem_gma_handlers[] = { 683 { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm } 684 }; 685 686 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id) 687 { 688 struct ncsi_oem_gma_handler *nch = NULL; 689 int i; 690 691 /* This function should only be called once, return if flag set */ 692 if (nca->ndp->gma_flag == 1) 693 return -1; 694 695 /* Find gma handler for given manufacturer id */ 696 for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) { 697 if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) { 698 if (ncsi_oem_gma_handlers[i].handler) 699 nch = &ncsi_oem_gma_handlers[i]; 700 break; 701 } 702 } 703 704 if (!nch) { 705 netdev_err(nca->ndp->ndev.dev, 706 "NCSI: No GMA handler available for MFR-ID (0x%x)\n", 707 mf_id); 708 return -1; 709 } 710 711 /* Set the flag for GMA command which should only be called once */ 712 nca->ndp->gma_flag = 1; 713 714 /* Get Mac address from NCSI device */ 715 return nch->handler(nca); 716 } 717 718 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */ 719 720 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) 721 { 722 struct ncsi_dev *nd = &ndp->ndev; 723 struct net_device *dev = nd->dev; 724 struct ncsi_package *np = ndp->active_package; 725 struct ncsi_channel *nc = ndp->active_channel; 726 struct ncsi_channel *hot_nc = NULL; 727 struct ncsi_cmd_arg nca; 728 unsigned char index; 729 unsigned long flags; 730 int ret; 731 732 nca.ndp = ndp; 733 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 734 switch (nd->state) { 735 case ncsi_dev_state_config: 736 case ncsi_dev_state_config_sp: 737 ndp->pending_req_num = 1; 738 739 /* Select the specific package */ 740 nca.type = NCSI_PKT_CMD_SP; 741 if (ndp->flags & NCSI_DEV_HWA) 742 nca.bytes[0] = 0; 743 else 744 nca.bytes[0] = 1; 745 nca.package = np->id; 746 nca.channel = NCSI_RESERVED_CHANNEL; 747 ret = ncsi_xmit_cmd(&nca); 748 if (ret) { 749 netdev_err(ndp->ndev.dev, 750 "NCSI: Failed to transmit CMD_SP\n"); 751 goto error; 752 } 753 754 nd->state = ncsi_dev_state_config_cis; 755 break; 756 case ncsi_dev_state_config_cis: 757 ndp->pending_req_num = 1; 758 759 /* Clear initial state */ 760 nca.type = NCSI_PKT_CMD_CIS; 761 nca.package = np->id; 762 nca.channel = nc->id; 763 ret = ncsi_xmit_cmd(&nca); 764 if (ret) { 765 netdev_err(ndp->ndev.dev, 766 "NCSI: Failed to transmit CMD_CIS\n"); 767 goto error; 768 } 769 770 nd->state = ncsi_dev_state_config_oem_gma; 771 break; 772 case ncsi_dev_state_config_oem_gma: 773 nd->state = ncsi_dev_state_config_clear_vids; 774 ret = -1; 775 776 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) 777 nca.type = NCSI_PKT_CMD_OEM; 778 nca.package = np->id; 779 nca.channel = nc->id; 780 ndp->pending_req_num = 1; 781 ret = ncsi_gma_handler(&nca, nc->version.mf_id); 782 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */ 783 784 if (ret < 0) 785 schedule_work(&ndp->work); 786 787 break; 788 case ncsi_dev_state_config_clear_vids: 789 case ncsi_dev_state_config_svf: 790 case ncsi_dev_state_config_ev: 791 case ncsi_dev_state_config_sma: 792 case ncsi_dev_state_config_ebf: 793 #if IS_ENABLED(CONFIG_IPV6) 794 case ncsi_dev_state_config_egmf: 795 #endif 796 case ncsi_dev_state_config_ecnt: 797 case ncsi_dev_state_config_ec: 798 case ncsi_dev_state_config_ae: 799 case ncsi_dev_state_config_gls: 800 ndp->pending_req_num = 1; 801 802 nca.package = np->id; 803 nca.channel = nc->id; 804 805 /* Clear any active filters on the channel before setting */ 806 if (nd->state == ncsi_dev_state_config_clear_vids) { 807 ret = clear_one_vid(ndp, nc, &nca); 808 if (ret) { 809 nd->state = ncsi_dev_state_config_svf; 810 schedule_work(&ndp->work); 811 break; 812 } 813 /* Repeat */ 814 nd->state = ncsi_dev_state_config_clear_vids; 815 /* Add known VLAN tags to the filter */ 816 } else if (nd->state == ncsi_dev_state_config_svf) { 817 ret = set_one_vid(ndp, nc, &nca); 818 if (ret) { 819 nd->state = ncsi_dev_state_config_ev; 820 schedule_work(&ndp->work); 821 break; 822 } 823 /* Repeat */ 824 nd->state = ncsi_dev_state_config_svf; 825 /* Enable/Disable the VLAN filter */ 826 } else if (nd->state == ncsi_dev_state_config_ev) { 827 if (list_empty(&ndp->vlan_vids)) { 828 nca.type = NCSI_PKT_CMD_DV; 829 } else { 830 nca.type = NCSI_PKT_CMD_EV; 831 nca.bytes[3] = NCSI_CAP_VLAN_NO; 832 } 833 nd->state = ncsi_dev_state_config_sma; 834 } else if (nd->state == ncsi_dev_state_config_sma) { 835 /* Use first entry in unicast filter table. Note that 836 * the MAC filter table starts from entry 1 instead of 837 * 0. 838 */ 839 nca.type = NCSI_PKT_CMD_SMA; 840 for (index = 0; index < 6; index++) 841 nca.bytes[index] = dev->dev_addr[index]; 842 nca.bytes[6] = 0x1; 843 nca.bytes[7] = 0x1; 844 nd->state = ncsi_dev_state_config_ebf; 845 } else if (nd->state == ncsi_dev_state_config_ebf) { 846 nca.type = NCSI_PKT_CMD_EBF; 847 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap; 848 nd->state = ncsi_dev_state_config_ecnt; 849 #if IS_ENABLED(CONFIG_IPV6) 850 if (ndp->inet6_addr_num > 0 && 851 (nc->caps[NCSI_CAP_GENERIC].cap & 852 NCSI_CAP_GENERIC_MC)) 853 nd->state = ncsi_dev_state_config_egmf; 854 else 855 nd->state = ncsi_dev_state_config_ecnt; 856 } else if (nd->state == ncsi_dev_state_config_egmf) { 857 nca.type = NCSI_PKT_CMD_EGMF; 858 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 859 nd->state = ncsi_dev_state_config_ecnt; 860 #endif /* CONFIG_IPV6 */ 861 } else if (nd->state == ncsi_dev_state_config_ecnt) { 862 nca.type = NCSI_PKT_CMD_ECNT; 863 nd->state = ncsi_dev_state_config_ec; 864 } else if (nd->state == ncsi_dev_state_config_ec) { 865 /* Enable AEN if it's supported */ 866 nca.type = NCSI_PKT_CMD_EC; 867 nd->state = ncsi_dev_state_config_ae; 868 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK)) 869 nd->state = ncsi_dev_state_config_gls; 870 } else if (nd->state == ncsi_dev_state_config_ae) { 871 nca.type = NCSI_PKT_CMD_AE; 872 nca.bytes[0] = 0; 873 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap; 874 nd->state = ncsi_dev_state_config_gls; 875 } else if (nd->state == ncsi_dev_state_config_gls) { 876 nca.type = NCSI_PKT_CMD_GLS; 877 nd->state = ncsi_dev_state_config_done; 878 } 879 880 ret = ncsi_xmit_cmd(&nca); 881 if (ret) { 882 netdev_err(ndp->ndev.dev, 883 "NCSI: Failed to transmit CMD %x\n", 884 nca.type); 885 goto error; 886 } 887 break; 888 case ncsi_dev_state_config_done: 889 netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n", 890 nc->id); 891 spin_lock_irqsave(&nc->lock, flags); 892 if (nc->reconfigure_needed) { 893 /* This channel's configuration has been updated 894 * part-way during the config state - start the 895 * channel configuration over 896 */ 897 nc->reconfigure_needed = false; 898 nc->state = NCSI_CHANNEL_INACTIVE; 899 spin_unlock_irqrestore(&nc->lock, flags); 900 901 spin_lock_irqsave(&ndp->lock, flags); 902 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 903 spin_unlock_irqrestore(&ndp->lock, flags); 904 905 netdev_dbg(dev, "Dirty NCSI channel state reset\n"); 906 ncsi_process_next_channel(ndp); 907 break; 908 } 909 910 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 911 hot_nc = nc; 912 nc->state = NCSI_CHANNEL_ACTIVE; 913 } else { 914 hot_nc = NULL; 915 nc->state = NCSI_CHANNEL_INACTIVE; 916 netdev_dbg(ndp->ndev.dev, 917 "NCSI: channel %u link down after config\n", 918 nc->id); 919 } 920 spin_unlock_irqrestore(&nc->lock, flags); 921 922 /* Update the hot channel */ 923 spin_lock_irqsave(&ndp->lock, flags); 924 ndp->hot_channel = hot_nc; 925 spin_unlock_irqrestore(&ndp->lock, flags); 926 927 ncsi_start_channel_monitor(nc); 928 ncsi_process_next_channel(ndp); 929 break; 930 default: 931 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n", 932 nd->state); 933 } 934 935 return; 936 937 error: 938 ncsi_report_link(ndp, true); 939 } 940 941 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) 942 { 943 struct ncsi_package *np, *force_package; 944 struct ncsi_channel *nc, *found, *hot_nc, *force_channel; 945 struct ncsi_channel_mode *ncm; 946 unsigned long flags; 947 948 spin_lock_irqsave(&ndp->lock, flags); 949 hot_nc = ndp->hot_channel; 950 force_channel = ndp->force_channel; 951 force_package = ndp->force_package; 952 spin_unlock_irqrestore(&ndp->lock, flags); 953 954 /* Force a specific channel whether or not it has link if we have been 955 * configured to do so 956 */ 957 if (force_package && force_channel) { 958 found = force_channel; 959 ncm = &found->modes[NCSI_MODE_LINK]; 960 if (!(ncm->data[2] & 0x1)) 961 netdev_info(ndp->ndev.dev, 962 "NCSI: Channel %u forced, but it is link down\n", 963 found->id); 964 goto out; 965 } 966 967 /* The search is done once an inactive channel with up 968 * link is found. 969 */ 970 found = NULL; 971 NCSI_FOR_EACH_PACKAGE(ndp, np) { 972 if (ndp->force_package && np != ndp->force_package) 973 continue; 974 NCSI_FOR_EACH_CHANNEL(np, nc) { 975 spin_lock_irqsave(&nc->lock, flags); 976 977 if (!list_empty(&nc->link) || 978 nc->state != NCSI_CHANNEL_INACTIVE) { 979 spin_unlock_irqrestore(&nc->lock, flags); 980 continue; 981 } 982 983 if (!found) 984 found = nc; 985 986 if (nc == hot_nc) 987 found = nc; 988 989 ncm = &nc->modes[NCSI_MODE_LINK]; 990 if (ncm->data[2] & 0x1) { 991 spin_unlock_irqrestore(&nc->lock, flags); 992 found = nc; 993 goto out; 994 } 995 996 spin_unlock_irqrestore(&nc->lock, flags); 997 } 998 } 999 1000 if (!found) { 1001 netdev_warn(ndp->ndev.dev, 1002 "NCSI: No channel found with link\n"); 1003 ncsi_report_link(ndp, true); 1004 return -ENODEV; 1005 } 1006 1007 ncm = &found->modes[NCSI_MODE_LINK]; 1008 netdev_dbg(ndp->ndev.dev, 1009 "NCSI: Channel %u added to queue (link %s)\n", 1010 found->id, ncm->data[2] & 0x1 ? "up" : "down"); 1011 1012 out: 1013 spin_lock_irqsave(&ndp->lock, flags); 1014 list_add_tail_rcu(&found->link, &ndp->channel_queue); 1015 spin_unlock_irqrestore(&ndp->lock, flags); 1016 1017 return ncsi_process_next_channel(ndp); 1018 } 1019 1020 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp) 1021 { 1022 struct ncsi_package *np; 1023 struct ncsi_channel *nc; 1024 unsigned int cap; 1025 bool has_channel = false; 1026 1027 /* The hardware arbitration is disabled if any one channel 1028 * doesn't support explicitly. 1029 */ 1030 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1031 NCSI_FOR_EACH_CHANNEL(np, nc) { 1032 has_channel = true; 1033 1034 cap = nc->caps[NCSI_CAP_GENERIC].cap; 1035 if (!(cap & NCSI_CAP_GENERIC_HWA) || 1036 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 1037 NCSI_CAP_GENERIC_HWA_SUPPORT) { 1038 ndp->flags &= ~NCSI_DEV_HWA; 1039 return false; 1040 } 1041 } 1042 } 1043 1044 if (has_channel) { 1045 ndp->flags |= NCSI_DEV_HWA; 1046 return true; 1047 } 1048 1049 ndp->flags &= ~NCSI_DEV_HWA; 1050 return false; 1051 } 1052 1053 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) 1054 { 1055 struct ncsi_package *np; 1056 struct ncsi_channel *nc; 1057 unsigned long flags; 1058 1059 /* Move all available channels to processing queue */ 1060 spin_lock_irqsave(&ndp->lock, flags); 1061 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1062 NCSI_FOR_EACH_CHANNEL(np, nc) { 1063 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE || 1064 !list_empty(&nc->link)); 1065 ncsi_stop_channel_monitor(nc); 1066 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 1067 } 1068 } 1069 spin_unlock_irqrestore(&ndp->lock, flags); 1070 1071 /* We can have no channels in extremely case */ 1072 if (list_empty(&ndp->channel_queue)) { 1073 netdev_err(ndp->ndev.dev, 1074 "NCSI: No available channels for HWA\n"); 1075 ncsi_report_link(ndp, false); 1076 return -ENOENT; 1077 } 1078 1079 return ncsi_process_next_channel(ndp); 1080 } 1081 1082 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) 1083 { 1084 struct ncsi_dev *nd = &ndp->ndev; 1085 struct ncsi_package *np; 1086 struct ncsi_channel *nc; 1087 struct ncsi_cmd_arg nca; 1088 unsigned char index; 1089 int ret; 1090 1091 nca.ndp = ndp; 1092 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 1093 switch (nd->state) { 1094 case ncsi_dev_state_probe: 1095 nd->state = ncsi_dev_state_probe_deselect; 1096 /* Fall through */ 1097 case ncsi_dev_state_probe_deselect: 1098 ndp->pending_req_num = 8; 1099 1100 /* Deselect all possible packages */ 1101 nca.type = NCSI_PKT_CMD_DP; 1102 nca.channel = NCSI_RESERVED_CHANNEL; 1103 for (index = 0; index < 8; index++) { 1104 nca.package = index; 1105 ret = ncsi_xmit_cmd(&nca); 1106 if (ret) 1107 goto error; 1108 } 1109 1110 nd->state = ncsi_dev_state_probe_package; 1111 break; 1112 case ncsi_dev_state_probe_package: 1113 ndp->pending_req_num = 16; 1114 1115 /* Select all possible packages */ 1116 nca.type = NCSI_PKT_CMD_SP; 1117 nca.bytes[0] = 1; 1118 nca.channel = NCSI_RESERVED_CHANNEL; 1119 for (index = 0; index < 8; index++) { 1120 nca.package = index; 1121 ret = ncsi_xmit_cmd(&nca); 1122 if (ret) 1123 goto error; 1124 } 1125 1126 /* Disable all possible packages */ 1127 nca.type = NCSI_PKT_CMD_DP; 1128 for (index = 0; index < 8; index++) { 1129 nca.package = index; 1130 ret = ncsi_xmit_cmd(&nca); 1131 if (ret) 1132 goto error; 1133 } 1134 1135 nd->state = ncsi_dev_state_probe_channel; 1136 break; 1137 case ncsi_dev_state_probe_channel: 1138 if (!ndp->active_package) 1139 ndp->active_package = list_first_or_null_rcu( 1140 &ndp->packages, struct ncsi_package, node); 1141 else if (list_is_last(&ndp->active_package->node, 1142 &ndp->packages)) 1143 ndp->active_package = NULL; 1144 else 1145 ndp->active_package = list_next_entry( 1146 ndp->active_package, node); 1147 1148 /* All available packages and channels are enumerated. The 1149 * enumeration happens for once when the NCSI interface is 1150 * started. So we need continue to start the interface after 1151 * the enumeration. 1152 * 1153 * We have to choose an active channel before configuring it. 1154 * Note that we possibly don't have active channel in extreme 1155 * situation. 1156 */ 1157 if (!ndp->active_package) { 1158 ndp->flags |= NCSI_DEV_PROBED; 1159 if (ncsi_check_hwa(ndp)) 1160 ncsi_enable_hwa(ndp); 1161 else 1162 ncsi_choose_active_channel(ndp); 1163 return; 1164 } 1165 1166 /* Select the active package */ 1167 ndp->pending_req_num = 1; 1168 nca.type = NCSI_PKT_CMD_SP; 1169 nca.bytes[0] = 1; 1170 nca.package = ndp->active_package->id; 1171 nca.channel = NCSI_RESERVED_CHANNEL; 1172 ret = ncsi_xmit_cmd(&nca); 1173 if (ret) 1174 goto error; 1175 1176 nd->state = ncsi_dev_state_probe_cis; 1177 break; 1178 case ncsi_dev_state_probe_cis: 1179 ndp->pending_req_num = NCSI_RESERVED_CHANNEL; 1180 1181 /* Clear initial state */ 1182 nca.type = NCSI_PKT_CMD_CIS; 1183 nca.package = ndp->active_package->id; 1184 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) { 1185 nca.channel = index; 1186 ret = ncsi_xmit_cmd(&nca); 1187 if (ret) 1188 goto error; 1189 } 1190 1191 nd->state = ncsi_dev_state_probe_gvi; 1192 break; 1193 case ncsi_dev_state_probe_gvi: 1194 case ncsi_dev_state_probe_gc: 1195 case ncsi_dev_state_probe_gls: 1196 np = ndp->active_package; 1197 ndp->pending_req_num = np->channel_num; 1198 1199 /* Retrieve version, capability or link status */ 1200 if (nd->state == ncsi_dev_state_probe_gvi) 1201 nca.type = NCSI_PKT_CMD_GVI; 1202 else if (nd->state == ncsi_dev_state_probe_gc) 1203 nca.type = NCSI_PKT_CMD_GC; 1204 else 1205 nca.type = NCSI_PKT_CMD_GLS; 1206 1207 nca.package = np->id; 1208 NCSI_FOR_EACH_CHANNEL(np, nc) { 1209 nca.channel = nc->id; 1210 ret = ncsi_xmit_cmd(&nca); 1211 if (ret) 1212 goto error; 1213 } 1214 1215 if (nd->state == ncsi_dev_state_probe_gvi) 1216 nd->state = ncsi_dev_state_probe_gc; 1217 else if (nd->state == ncsi_dev_state_probe_gc) 1218 nd->state = ncsi_dev_state_probe_gls; 1219 else 1220 nd->state = ncsi_dev_state_probe_dp; 1221 break; 1222 case ncsi_dev_state_probe_dp: 1223 ndp->pending_req_num = 1; 1224 1225 /* Deselect the active package */ 1226 nca.type = NCSI_PKT_CMD_DP; 1227 nca.package = ndp->active_package->id; 1228 nca.channel = NCSI_RESERVED_CHANNEL; 1229 ret = ncsi_xmit_cmd(&nca); 1230 if (ret) 1231 goto error; 1232 1233 /* Scan channels in next package */ 1234 nd->state = ncsi_dev_state_probe_channel; 1235 break; 1236 default: 1237 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n", 1238 nd->state); 1239 } 1240 1241 return; 1242 error: 1243 netdev_err(ndp->ndev.dev, 1244 "NCSI: Failed to transmit cmd 0x%x during probe\n", 1245 nca.type); 1246 ncsi_report_link(ndp, true); 1247 } 1248 1249 static void ncsi_dev_work(struct work_struct *work) 1250 { 1251 struct ncsi_dev_priv *ndp = container_of(work, 1252 struct ncsi_dev_priv, work); 1253 struct ncsi_dev *nd = &ndp->ndev; 1254 1255 switch (nd->state & ncsi_dev_state_major) { 1256 case ncsi_dev_state_probe: 1257 ncsi_probe_channel(ndp); 1258 break; 1259 case ncsi_dev_state_suspend: 1260 ncsi_suspend_channel(ndp); 1261 break; 1262 case ncsi_dev_state_config: 1263 ncsi_configure_channel(ndp); 1264 break; 1265 default: 1266 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n", 1267 nd->state); 1268 } 1269 } 1270 1271 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp) 1272 { 1273 struct ncsi_channel *nc; 1274 int old_state; 1275 unsigned long flags; 1276 1277 spin_lock_irqsave(&ndp->lock, flags); 1278 nc = list_first_or_null_rcu(&ndp->channel_queue, 1279 struct ncsi_channel, link); 1280 if (!nc) { 1281 spin_unlock_irqrestore(&ndp->lock, flags); 1282 goto out; 1283 } 1284 1285 list_del_init(&nc->link); 1286 spin_unlock_irqrestore(&ndp->lock, flags); 1287 1288 spin_lock_irqsave(&nc->lock, flags); 1289 old_state = nc->state; 1290 nc->state = NCSI_CHANNEL_INVISIBLE; 1291 spin_unlock_irqrestore(&nc->lock, flags); 1292 1293 ndp->active_channel = nc; 1294 ndp->active_package = nc->package; 1295 1296 switch (old_state) { 1297 case NCSI_CHANNEL_INACTIVE: 1298 ndp->ndev.state = ncsi_dev_state_config; 1299 netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n", 1300 nc->id); 1301 ncsi_configure_channel(ndp); 1302 break; 1303 case NCSI_CHANNEL_ACTIVE: 1304 ndp->ndev.state = ncsi_dev_state_suspend; 1305 netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n", 1306 nc->id); 1307 ncsi_suspend_channel(ndp); 1308 break; 1309 default: 1310 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n", 1311 old_state, nc->package->id, nc->id); 1312 ncsi_report_link(ndp, false); 1313 return -EINVAL; 1314 } 1315 1316 return 0; 1317 1318 out: 1319 ndp->active_channel = NULL; 1320 ndp->active_package = NULL; 1321 if (ndp->flags & NCSI_DEV_RESHUFFLE) { 1322 ndp->flags &= ~NCSI_DEV_RESHUFFLE; 1323 return ncsi_choose_active_channel(ndp); 1324 } 1325 1326 ncsi_report_link(ndp, false); 1327 return -ENODEV; 1328 } 1329 1330 #if IS_ENABLED(CONFIG_IPV6) 1331 static int ncsi_inet6addr_event(struct notifier_block *this, 1332 unsigned long event, void *data) 1333 { 1334 struct inet6_ifaddr *ifa = data; 1335 struct net_device *dev = ifa->idev->dev; 1336 struct ncsi_dev *nd = ncsi_find_dev(dev); 1337 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; 1338 struct ncsi_package *np; 1339 struct ncsi_channel *nc; 1340 struct ncsi_cmd_arg nca; 1341 bool action; 1342 int ret; 1343 1344 if (!ndp || (ipv6_addr_type(&ifa->addr) & 1345 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK))) 1346 return NOTIFY_OK; 1347 1348 switch (event) { 1349 case NETDEV_UP: 1350 action = (++ndp->inet6_addr_num) == 1; 1351 nca.type = NCSI_PKT_CMD_EGMF; 1352 break; 1353 case NETDEV_DOWN: 1354 action = (--ndp->inet6_addr_num == 0); 1355 nca.type = NCSI_PKT_CMD_DGMF; 1356 break; 1357 default: 1358 return NOTIFY_OK; 1359 } 1360 1361 /* We might not have active channel or packages. The IPv6 1362 * required multicast will be enabled when active channel 1363 * or packages are chosen. 1364 */ 1365 np = ndp->active_package; 1366 nc = ndp->active_channel; 1367 if (!action || !np || !nc) 1368 return NOTIFY_OK; 1369 1370 /* We needn't enable or disable it if the function isn't supported */ 1371 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC)) 1372 return NOTIFY_OK; 1373 1374 nca.ndp = ndp; 1375 nca.req_flags = 0; 1376 nca.package = np->id; 1377 nca.channel = nc->id; 1378 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 1379 ret = ncsi_xmit_cmd(&nca); 1380 if (ret) { 1381 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n", 1382 (event == NETDEV_UP) ? "enable" : "disable", ret); 1383 return NOTIFY_DONE; 1384 } 1385 1386 return NOTIFY_OK; 1387 } 1388 1389 static struct notifier_block ncsi_inet6addr_notifier = { 1390 .notifier_call = ncsi_inet6addr_event, 1391 }; 1392 #endif /* CONFIG_IPV6 */ 1393 1394 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp) 1395 { 1396 struct ncsi_dev *nd = &ndp->ndev; 1397 struct ncsi_channel *nc; 1398 struct ncsi_package *np; 1399 unsigned long flags; 1400 unsigned int n = 0; 1401 1402 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1403 NCSI_FOR_EACH_CHANNEL(np, nc) { 1404 spin_lock_irqsave(&nc->lock, flags); 1405 1406 /* Channels may be busy, mark dirty instead of 1407 * kicking if; 1408 * a) not ACTIVE (configured) 1409 * b) in the channel_queue (to be configured) 1410 * c) it's ndev is in the config state 1411 */ 1412 if (nc->state != NCSI_CHANNEL_ACTIVE) { 1413 if ((ndp->ndev.state & 0xff00) == 1414 ncsi_dev_state_config || 1415 !list_empty(&nc->link)) { 1416 netdev_dbg(nd->dev, 1417 "NCSI: channel %p marked dirty\n", 1418 nc); 1419 nc->reconfigure_needed = true; 1420 } 1421 spin_unlock_irqrestore(&nc->lock, flags); 1422 continue; 1423 } 1424 1425 spin_unlock_irqrestore(&nc->lock, flags); 1426 1427 ncsi_stop_channel_monitor(nc); 1428 spin_lock_irqsave(&nc->lock, flags); 1429 nc->state = NCSI_CHANNEL_INACTIVE; 1430 spin_unlock_irqrestore(&nc->lock, flags); 1431 1432 spin_lock_irqsave(&ndp->lock, flags); 1433 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 1434 spin_unlock_irqrestore(&ndp->lock, flags); 1435 1436 netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc); 1437 n++; 1438 } 1439 } 1440 1441 return n; 1442 } 1443 1444 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1445 { 1446 struct ncsi_dev_priv *ndp; 1447 unsigned int n_vids = 0; 1448 struct vlan_vid *vlan; 1449 struct ncsi_dev *nd; 1450 bool found = false; 1451 1452 if (vid == 0) 1453 return 0; 1454 1455 nd = ncsi_find_dev(dev); 1456 if (!nd) { 1457 netdev_warn(dev, "NCSI: No net_device?\n"); 1458 return 0; 1459 } 1460 1461 ndp = TO_NCSI_DEV_PRIV(nd); 1462 1463 /* Add the VLAN id to our internal list */ 1464 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 1465 n_vids++; 1466 if (vlan->vid == vid) { 1467 netdev_dbg(dev, "NCSI: vid %u already registered\n", 1468 vid); 1469 return 0; 1470 } 1471 } 1472 if (n_vids >= NCSI_MAX_VLAN_VIDS) { 1473 netdev_warn(dev, 1474 "tried to add vlan id %u but NCSI max already registered (%u)\n", 1475 vid, NCSI_MAX_VLAN_VIDS); 1476 return -ENOSPC; 1477 } 1478 1479 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1480 if (!vlan) 1481 return -ENOMEM; 1482 1483 vlan->proto = proto; 1484 vlan->vid = vid; 1485 list_add_rcu(&vlan->list, &ndp->vlan_vids); 1486 1487 netdev_dbg(dev, "NCSI: Added new vid %u\n", vid); 1488 1489 found = ncsi_kick_channels(ndp) != 0; 1490 1491 return found ? ncsi_process_next_channel(ndp) : 0; 1492 } 1493 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid); 1494 1495 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1496 { 1497 struct vlan_vid *vlan, *tmp; 1498 struct ncsi_dev_priv *ndp; 1499 struct ncsi_dev *nd; 1500 bool found = false; 1501 1502 if (vid == 0) 1503 return 0; 1504 1505 nd = ncsi_find_dev(dev); 1506 if (!nd) { 1507 netdev_warn(dev, "NCSI: no net_device?\n"); 1508 return 0; 1509 } 1510 1511 ndp = TO_NCSI_DEV_PRIV(nd); 1512 1513 /* Remove the VLAN id from our internal list */ 1514 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list) 1515 if (vlan->vid == vid) { 1516 netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid); 1517 list_del_rcu(&vlan->list); 1518 found = true; 1519 kfree(vlan); 1520 } 1521 1522 if (!found) { 1523 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid); 1524 return -EINVAL; 1525 } 1526 1527 found = ncsi_kick_channels(ndp) != 0; 1528 1529 return found ? ncsi_process_next_channel(ndp) : 0; 1530 } 1531 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid); 1532 1533 struct ncsi_dev *ncsi_register_dev(struct net_device *dev, 1534 void (*handler)(struct ncsi_dev *ndev)) 1535 { 1536 struct ncsi_dev_priv *ndp; 1537 struct ncsi_dev *nd; 1538 unsigned long flags; 1539 int i; 1540 1541 /* Check if the device has been registered or not */ 1542 nd = ncsi_find_dev(dev); 1543 if (nd) 1544 return nd; 1545 1546 /* Create NCSI device */ 1547 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC); 1548 if (!ndp) 1549 return NULL; 1550 1551 nd = &ndp->ndev; 1552 nd->state = ncsi_dev_state_registered; 1553 nd->dev = dev; 1554 nd->handler = handler; 1555 ndp->pending_req_num = 0; 1556 INIT_LIST_HEAD(&ndp->channel_queue); 1557 INIT_LIST_HEAD(&ndp->vlan_vids); 1558 INIT_WORK(&ndp->work, ncsi_dev_work); 1559 1560 /* Initialize private NCSI device */ 1561 spin_lock_init(&ndp->lock); 1562 INIT_LIST_HEAD(&ndp->packages); 1563 ndp->request_id = NCSI_REQ_START_IDX; 1564 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) { 1565 ndp->requests[i].id = i; 1566 ndp->requests[i].ndp = ndp; 1567 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0); 1568 } 1569 1570 spin_lock_irqsave(&ncsi_dev_lock, flags); 1571 #if IS_ENABLED(CONFIG_IPV6) 1572 ndp->inet6_addr_num = 0; 1573 if (list_empty(&ncsi_dev_list)) 1574 register_inet6addr_notifier(&ncsi_inet6addr_notifier); 1575 #endif 1576 list_add_tail_rcu(&ndp->node, &ncsi_dev_list); 1577 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1578 1579 /* Register NCSI packet Rx handler */ 1580 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI); 1581 ndp->ptype.func = ncsi_rcv_rsp; 1582 ndp->ptype.dev = dev; 1583 dev_add_pack(&ndp->ptype); 1584 1585 /* Set up generic netlink interface */ 1586 ncsi_init_netlink(dev); 1587 1588 return nd; 1589 } 1590 EXPORT_SYMBOL_GPL(ncsi_register_dev); 1591 1592 int ncsi_start_dev(struct ncsi_dev *nd) 1593 { 1594 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1595 int ret; 1596 1597 if (nd->state != ncsi_dev_state_registered && 1598 nd->state != ncsi_dev_state_functional) 1599 return -ENOTTY; 1600 1601 if (!(ndp->flags & NCSI_DEV_PROBED)) { 1602 nd->state = ncsi_dev_state_probe; 1603 schedule_work(&ndp->work); 1604 return 0; 1605 } 1606 1607 if (ndp->flags & NCSI_DEV_HWA) { 1608 netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n"); 1609 ret = ncsi_enable_hwa(ndp); 1610 } else { 1611 ret = ncsi_choose_active_channel(ndp); 1612 } 1613 1614 return ret; 1615 } 1616 EXPORT_SYMBOL_GPL(ncsi_start_dev); 1617 1618 void ncsi_stop_dev(struct ncsi_dev *nd) 1619 { 1620 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1621 struct ncsi_package *np; 1622 struct ncsi_channel *nc; 1623 bool chained; 1624 int old_state; 1625 unsigned long flags; 1626 1627 /* Stop the channel monitor and reset channel's state */ 1628 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1629 NCSI_FOR_EACH_CHANNEL(np, nc) { 1630 ncsi_stop_channel_monitor(nc); 1631 1632 spin_lock_irqsave(&nc->lock, flags); 1633 chained = !list_empty(&nc->link); 1634 old_state = nc->state; 1635 nc->state = NCSI_CHANNEL_INACTIVE; 1636 spin_unlock_irqrestore(&nc->lock, flags); 1637 1638 WARN_ON_ONCE(chained || 1639 old_state == NCSI_CHANNEL_INVISIBLE); 1640 } 1641 } 1642 1643 netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n"); 1644 ncsi_report_link(ndp, true); 1645 } 1646 EXPORT_SYMBOL_GPL(ncsi_stop_dev); 1647 1648 void ncsi_unregister_dev(struct ncsi_dev *nd) 1649 { 1650 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1651 struct ncsi_package *np, *tmp; 1652 unsigned long flags; 1653 1654 dev_remove_pack(&ndp->ptype); 1655 1656 list_for_each_entry_safe(np, tmp, &ndp->packages, node) 1657 ncsi_remove_package(np); 1658 1659 spin_lock_irqsave(&ncsi_dev_lock, flags); 1660 list_del_rcu(&ndp->node); 1661 #if IS_ENABLED(CONFIG_IPV6) 1662 if (list_empty(&ncsi_dev_list)) 1663 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier); 1664 #endif 1665 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1666 1667 ncsi_unregister_netlink(nd->dev); 1668 1669 kfree(ndp); 1670 } 1671 EXPORT_SYMBOL_GPL(ncsi_unregister_dev); 1672