1 /* 2 * Copyright Gavin Shan, IBM Corporation 2016. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 16 #include <net/ncsi.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <net/addrconf.h> 20 #include <net/ipv6.h> 21 #include <net/if_inet6.h> 22 23 #include "internal.h" 24 #include "ncsi-pkt.h" 25 #include "ncsi-netlink.h" 26 27 LIST_HEAD(ncsi_dev_list); 28 DEFINE_SPINLOCK(ncsi_dev_lock); 29 30 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down) 31 { 32 struct ncsi_dev *nd = &ndp->ndev; 33 struct ncsi_package *np; 34 struct ncsi_channel *nc; 35 unsigned long flags; 36 37 nd->state = ncsi_dev_state_functional; 38 if (force_down) { 39 nd->link_up = 0; 40 goto report; 41 } 42 43 nd->link_up = 0; 44 NCSI_FOR_EACH_PACKAGE(ndp, np) { 45 NCSI_FOR_EACH_CHANNEL(np, nc) { 46 spin_lock_irqsave(&nc->lock, flags); 47 48 if (!list_empty(&nc->link) || 49 nc->state != NCSI_CHANNEL_ACTIVE) { 50 spin_unlock_irqrestore(&nc->lock, flags); 51 continue; 52 } 53 54 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 55 spin_unlock_irqrestore(&nc->lock, flags); 56 nd->link_up = 1; 57 goto report; 58 } 59 60 spin_unlock_irqrestore(&nc->lock, flags); 61 } 62 } 63 64 report: 65 nd->handler(nd); 66 } 67 68 static void ncsi_channel_monitor(struct timer_list *t) 69 { 70 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer); 71 struct ncsi_package *np = nc->package; 72 struct ncsi_dev_priv *ndp = np->ndp; 73 struct ncsi_channel_mode *ncm; 74 struct ncsi_cmd_arg nca; 75 bool enabled, chained; 76 unsigned int monitor_state; 77 unsigned long flags; 78 int state, ret; 79 80 spin_lock_irqsave(&nc->lock, flags); 81 state = nc->state; 82 chained = !list_empty(&nc->link); 83 enabled = nc->monitor.enabled; 84 monitor_state = nc->monitor.state; 85 spin_unlock_irqrestore(&nc->lock, flags); 86 87 if (!enabled || chained) { 88 ncsi_stop_channel_monitor(nc); 89 return; 90 } 91 if (state != NCSI_CHANNEL_INACTIVE && 92 state != NCSI_CHANNEL_ACTIVE) { 93 ncsi_stop_channel_monitor(nc); 94 return; 95 } 96 97 switch (monitor_state) { 98 case NCSI_CHANNEL_MONITOR_START: 99 case NCSI_CHANNEL_MONITOR_RETRY: 100 nca.ndp = ndp; 101 nca.package = np->id; 102 nca.channel = nc->id; 103 nca.type = NCSI_PKT_CMD_GLS; 104 nca.req_flags = 0; 105 ret = ncsi_xmit_cmd(&nca); 106 if (ret) 107 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 108 ret); 109 break; 110 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 111 break; 112 default: 113 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n", 114 nc->id); 115 if (!(ndp->flags & NCSI_DEV_HWA)) { 116 ncsi_report_link(ndp, true); 117 ndp->flags |= NCSI_DEV_RESHUFFLE; 118 } 119 120 ncsi_stop_channel_monitor(nc); 121 122 ncm = &nc->modes[NCSI_MODE_LINK]; 123 spin_lock_irqsave(&nc->lock, flags); 124 nc->state = NCSI_CHANNEL_INVISIBLE; 125 ncm->data[2] &= ~0x1; 126 spin_unlock_irqrestore(&nc->lock, flags); 127 128 spin_lock_irqsave(&ndp->lock, flags); 129 nc->state = NCSI_CHANNEL_ACTIVE; 130 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 131 spin_unlock_irqrestore(&ndp->lock, flags); 132 ncsi_process_next_channel(ndp); 133 return; 134 } 135 136 spin_lock_irqsave(&nc->lock, flags); 137 nc->monitor.state++; 138 spin_unlock_irqrestore(&nc->lock, flags); 139 mod_timer(&nc->monitor.timer, jiffies + HZ); 140 } 141 142 void ncsi_start_channel_monitor(struct ncsi_channel *nc) 143 { 144 unsigned long flags; 145 146 spin_lock_irqsave(&nc->lock, flags); 147 WARN_ON_ONCE(nc->monitor.enabled); 148 nc->monitor.enabled = true; 149 nc->monitor.state = NCSI_CHANNEL_MONITOR_START; 150 spin_unlock_irqrestore(&nc->lock, flags); 151 152 mod_timer(&nc->monitor.timer, jiffies + HZ); 153 } 154 155 void ncsi_stop_channel_monitor(struct ncsi_channel *nc) 156 { 157 unsigned long flags; 158 159 spin_lock_irqsave(&nc->lock, flags); 160 if (!nc->monitor.enabled) { 161 spin_unlock_irqrestore(&nc->lock, flags); 162 return; 163 } 164 nc->monitor.enabled = false; 165 spin_unlock_irqrestore(&nc->lock, flags); 166 167 del_timer_sync(&nc->monitor.timer); 168 } 169 170 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np, 171 unsigned char id) 172 { 173 struct ncsi_channel *nc; 174 175 NCSI_FOR_EACH_CHANNEL(np, nc) { 176 if (nc->id == id) 177 return nc; 178 } 179 180 return NULL; 181 } 182 183 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id) 184 { 185 struct ncsi_channel *nc, *tmp; 186 int index; 187 unsigned long flags; 188 189 nc = kzalloc(sizeof(*nc), GFP_ATOMIC); 190 if (!nc) 191 return NULL; 192 193 nc->id = id; 194 nc->package = np; 195 nc->state = NCSI_CHANNEL_INACTIVE; 196 nc->monitor.enabled = false; 197 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0); 198 spin_lock_init(&nc->lock); 199 INIT_LIST_HEAD(&nc->link); 200 for (index = 0; index < NCSI_CAP_MAX; index++) 201 nc->caps[index].index = index; 202 for (index = 0; index < NCSI_MODE_MAX; index++) 203 nc->modes[index].index = index; 204 205 spin_lock_irqsave(&np->lock, flags); 206 tmp = ncsi_find_channel(np, id); 207 if (tmp) { 208 spin_unlock_irqrestore(&np->lock, flags); 209 kfree(nc); 210 return tmp; 211 } 212 213 list_add_tail_rcu(&nc->node, &np->channels); 214 np->channel_num++; 215 spin_unlock_irqrestore(&np->lock, flags); 216 217 return nc; 218 } 219 220 static void ncsi_remove_channel(struct ncsi_channel *nc) 221 { 222 struct ncsi_package *np = nc->package; 223 unsigned long flags; 224 225 spin_lock_irqsave(&nc->lock, flags); 226 227 /* Release filters */ 228 kfree(nc->mac_filter.addrs); 229 kfree(nc->vlan_filter.vids); 230 231 nc->state = NCSI_CHANNEL_INACTIVE; 232 spin_unlock_irqrestore(&nc->lock, flags); 233 ncsi_stop_channel_monitor(nc); 234 235 /* Remove and free channel */ 236 spin_lock_irqsave(&np->lock, flags); 237 list_del_rcu(&nc->node); 238 np->channel_num--; 239 spin_unlock_irqrestore(&np->lock, flags); 240 241 kfree(nc); 242 } 243 244 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp, 245 unsigned char id) 246 { 247 struct ncsi_package *np; 248 249 NCSI_FOR_EACH_PACKAGE(ndp, np) { 250 if (np->id == id) 251 return np; 252 } 253 254 return NULL; 255 } 256 257 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp, 258 unsigned char id) 259 { 260 struct ncsi_package *np, *tmp; 261 unsigned long flags; 262 263 np = kzalloc(sizeof(*np), GFP_ATOMIC); 264 if (!np) 265 return NULL; 266 267 np->id = id; 268 np->ndp = ndp; 269 spin_lock_init(&np->lock); 270 INIT_LIST_HEAD(&np->channels); 271 272 spin_lock_irqsave(&ndp->lock, flags); 273 tmp = ncsi_find_package(ndp, id); 274 if (tmp) { 275 spin_unlock_irqrestore(&ndp->lock, flags); 276 kfree(np); 277 return tmp; 278 } 279 280 list_add_tail_rcu(&np->node, &ndp->packages); 281 ndp->package_num++; 282 spin_unlock_irqrestore(&ndp->lock, flags); 283 284 return np; 285 } 286 287 void ncsi_remove_package(struct ncsi_package *np) 288 { 289 struct ncsi_dev_priv *ndp = np->ndp; 290 struct ncsi_channel *nc, *tmp; 291 unsigned long flags; 292 293 /* Release all child channels */ 294 list_for_each_entry_safe(nc, tmp, &np->channels, node) 295 ncsi_remove_channel(nc); 296 297 /* Remove and free package */ 298 spin_lock_irqsave(&ndp->lock, flags); 299 list_del_rcu(&np->node); 300 ndp->package_num--; 301 spin_unlock_irqrestore(&ndp->lock, flags); 302 303 kfree(np); 304 } 305 306 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp, 307 unsigned char id, 308 struct ncsi_package **np, 309 struct ncsi_channel **nc) 310 { 311 struct ncsi_package *p; 312 struct ncsi_channel *c; 313 314 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id)); 315 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL; 316 317 if (np) 318 *np = p; 319 if (nc) 320 *nc = c; 321 } 322 323 /* For two consecutive NCSI commands, the packet IDs shouldn't 324 * be same. Otherwise, the bogus response might be replied. So 325 * the available IDs are allocated in round-robin fashion. 326 */ 327 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, 328 unsigned int req_flags) 329 { 330 struct ncsi_request *nr = NULL; 331 int i, limit = ARRAY_SIZE(ndp->requests); 332 unsigned long flags; 333 334 /* Check if there is one available request until the ceiling */ 335 spin_lock_irqsave(&ndp->lock, flags); 336 for (i = ndp->request_id; i < limit; i++) { 337 if (ndp->requests[i].used) 338 continue; 339 340 nr = &ndp->requests[i]; 341 nr->used = true; 342 nr->flags = req_flags; 343 ndp->request_id = i + 1; 344 goto found; 345 } 346 347 /* Fail back to check from the starting cursor */ 348 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) { 349 if (ndp->requests[i].used) 350 continue; 351 352 nr = &ndp->requests[i]; 353 nr->used = true; 354 nr->flags = req_flags; 355 ndp->request_id = i + 1; 356 goto found; 357 } 358 359 found: 360 spin_unlock_irqrestore(&ndp->lock, flags); 361 return nr; 362 } 363 364 void ncsi_free_request(struct ncsi_request *nr) 365 { 366 struct ncsi_dev_priv *ndp = nr->ndp; 367 struct sk_buff *cmd, *rsp; 368 unsigned long flags; 369 bool driven; 370 371 if (nr->enabled) { 372 nr->enabled = false; 373 del_timer_sync(&nr->timer); 374 } 375 376 spin_lock_irqsave(&ndp->lock, flags); 377 cmd = nr->cmd; 378 rsp = nr->rsp; 379 nr->cmd = NULL; 380 nr->rsp = NULL; 381 nr->used = false; 382 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN); 383 spin_unlock_irqrestore(&ndp->lock, flags); 384 385 if (driven && cmd && --ndp->pending_req_num == 0) 386 schedule_work(&ndp->work); 387 388 /* Release command and response */ 389 consume_skb(cmd); 390 consume_skb(rsp); 391 } 392 393 struct ncsi_dev *ncsi_find_dev(struct net_device *dev) 394 { 395 struct ncsi_dev_priv *ndp; 396 397 NCSI_FOR_EACH_DEV(ndp) { 398 if (ndp->ndev.dev == dev) 399 return &ndp->ndev; 400 } 401 402 return NULL; 403 } 404 405 static void ncsi_request_timeout(struct timer_list *t) 406 { 407 struct ncsi_request *nr = from_timer(nr, t, timer); 408 struct ncsi_dev_priv *ndp = nr->ndp; 409 unsigned long flags; 410 411 /* If the request already had associated response, 412 * let the response handler to release it. 413 */ 414 spin_lock_irqsave(&ndp->lock, flags); 415 nr->enabled = false; 416 if (nr->rsp || !nr->cmd) { 417 spin_unlock_irqrestore(&ndp->lock, flags); 418 return; 419 } 420 spin_unlock_irqrestore(&ndp->lock, flags); 421 422 /* Release the request */ 423 ncsi_free_request(nr); 424 } 425 426 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) 427 { 428 struct ncsi_dev *nd = &ndp->ndev; 429 struct ncsi_package *np = ndp->active_package; 430 struct ncsi_channel *nc = ndp->active_channel; 431 struct ncsi_cmd_arg nca; 432 unsigned long flags; 433 int ret; 434 435 nca.ndp = ndp; 436 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 437 switch (nd->state) { 438 case ncsi_dev_state_suspend: 439 nd->state = ncsi_dev_state_suspend_select; 440 /* Fall through */ 441 case ncsi_dev_state_suspend_select: 442 ndp->pending_req_num = 1; 443 444 nca.type = NCSI_PKT_CMD_SP; 445 nca.package = np->id; 446 nca.channel = NCSI_RESERVED_CHANNEL; 447 if (ndp->flags & NCSI_DEV_HWA) 448 nca.bytes[0] = 0; 449 else 450 nca.bytes[0] = 1; 451 452 /* To retrieve the last link states of channels in current 453 * package when current active channel needs fail over to 454 * another one. It means we will possibly select another 455 * channel as next active one. The link states of channels 456 * are most important factor of the selection. So we need 457 * accurate link states. Unfortunately, the link states on 458 * inactive channels can't be updated with LSC AEN in time. 459 */ 460 if (ndp->flags & NCSI_DEV_RESHUFFLE) 461 nd->state = ncsi_dev_state_suspend_gls; 462 else 463 nd->state = ncsi_dev_state_suspend_dcnt; 464 ret = ncsi_xmit_cmd(&nca); 465 if (ret) 466 goto error; 467 468 break; 469 case ncsi_dev_state_suspend_gls: 470 ndp->pending_req_num = np->channel_num; 471 472 nca.type = NCSI_PKT_CMD_GLS; 473 nca.package = np->id; 474 475 nd->state = ncsi_dev_state_suspend_dcnt; 476 NCSI_FOR_EACH_CHANNEL(np, nc) { 477 nca.channel = nc->id; 478 ret = ncsi_xmit_cmd(&nca); 479 if (ret) 480 goto error; 481 } 482 483 break; 484 case ncsi_dev_state_suspend_dcnt: 485 ndp->pending_req_num = 1; 486 487 nca.type = NCSI_PKT_CMD_DCNT; 488 nca.package = np->id; 489 nca.channel = nc->id; 490 491 nd->state = ncsi_dev_state_suspend_dc; 492 ret = ncsi_xmit_cmd(&nca); 493 if (ret) 494 goto error; 495 496 break; 497 case ncsi_dev_state_suspend_dc: 498 ndp->pending_req_num = 1; 499 500 nca.type = NCSI_PKT_CMD_DC; 501 nca.package = np->id; 502 nca.channel = nc->id; 503 nca.bytes[0] = 1; 504 505 nd->state = ncsi_dev_state_suspend_deselect; 506 ret = ncsi_xmit_cmd(&nca); 507 if (ret) 508 goto error; 509 510 break; 511 case ncsi_dev_state_suspend_deselect: 512 ndp->pending_req_num = 1; 513 514 nca.type = NCSI_PKT_CMD_DP; 515 nca.package = np->id; 516 nca.channel = NCSI_RESERVED_CHANNEL; 517 518 nd->state = ncsi_dev_state_suspend_done; 519 ret = ncsi_xmit_cmd(&nca); 520 if (ret) 521 goto error; 522 523 break; 524 case ncsi_dev_state_suspend_done: 525 spin_lock_irqsave(&nc->lock, flags); 526 nc->state = NCSI_CHANNEL_INACTIVE; 527 spin_unlock_irqrestore(&nc->lock, flags); 528 ncsi_process_next_channel(ndp); 529 530 break; 531 default: 532 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", 533 nd->state); 534 } 535 536 return; 537 error: 538 nd->state = ncsi_dev_state_functional; 539 } 540 541 /* Check the VLAN filter bitmap for a set filter, and construct a 542 * "Set VLAN Filter - Disable" packet if found. 543 */ 544 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 545 struct ncsi_cmd_arg *nca) 546 { 547 struct ncsi_channel_vlan_filter *ncf; 548 unsigned long flags; 549 void *bitmap; 550 int index; 551 u16 vid; 552 553 ncf = &nc->vlan_filter; 554 bitmap = &ncf->bitmap; 555 556 spin_lock_irqsave(&nc->lock, flags); 557 index = find_next_bit(bitmap, ncf->n_vids, 0); 558 if (index >= ncf->n_vids) { 559 spin_unlock_irqrestore(&nc->lock, flags); 560 return -1; 561 } 562 vid = ncf->vids[index]; 563 564 clear_bit(index, bitmap); 565 ncf->vids[index] = 0; 566 spin_unlock_irqrestore(&nc->lock, flags); 567 568 nca->type = NCSI_PKT_CMD_SVF; 569 nca->words[1] = vid; 570 /* HW filter index starts at 1 */ 571 nca->bytes[6] = index + 1; 572 nca->bytes[7] = 0x00; 573 return 0; 574 } 575 576 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable" 577 * packet. 578 */ 579 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, 580 struct ncsi_cmd_arg *nca) 581 { 582 struct ncsi_channel_vlan_filter *ncf; 583 struct vlan_vid *vlan = NULL; 584 unsigned long flags; 585 int i, index; 586 void *bitmap; 587 u16 vid; 588 589 if (list_empty(&ndp->vlan_vids)) 590 return -1; 591 592 ncf = &nc->vlan_filter; 593 bitmap = &ncf->bitmap; 594 595 spin_lock_irqsave(&nc->lock, flags); 596 597 rcu_read_lock(); 598 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 599 vid = vlan->vid; 600 for (i = 0; i < ncf->n_vids; i++) 601 if (ncf->vids[i] == vid) { 602 vid = 0; 603 break; 604 } 605 if (vid) 606 break; 607 } 608 rcu_read_unlock(); 609 610 if (!vid) { 611 /* No VLAN ID is not set */ 612 spin_unlock_irqrestore(&nc->lock, flags); 613 return -1; 614 } 615 616 index = find_next_zero_bit(bitmap, ncf->n_vids, 0); 617 if (index < 0 || index >= ncf->n_vids) { 618 netdev_err(ndp->ndev.dev, 619 "Channel %u already has all VLAN filters set\n", 620 nc->id); 621 spin_unlock_irqrestore(&nc->lock, flags); 622 return -1; 623 } 624 625 ncf->vids[index] = vid; 626 set_bit(index, bitmap); 627 spin_unlock_irqrestore(&nc->lock, flags); 628 629 nca->type = NCSI_PKT_CMD_SVF; 630 nca->words[1] = vid; 631 /* HW filter index starts at 1 */ 632 nca->bytes[6] = index + 1; 633 nca->bytes[7] = 0x01; 634 635 return 0; 636 } 637 638 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) 639 { 640 struct ncsi_dev *nd = &ndp->ndev; 641 struct net_device *dev = nd->dev; 642 struct ncsi_package *np = ndp->active_package; 643 struct ncsi_channel *nc = ndp->active_channel; 644 struct ncsi_channel *hot_nc = NULL; 645 struct ncsi_cmd_arg nca; 646 unsigned char index; 647 unsigned long flags; 648 int ret; 649 650 nca.ndp = ndp; 651 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 652 switch (nd->state) { 653 case ncsi_dev_state_config: 654 case ncsi_dev_state_config_sp: 655 ndp->pending_req_num = 1; 656 657 /* Select the specific package */ 658 nca.type = NCSI_PKT_CMD_SP; 659 if (ndp->flags & NCSI_DEV_HWA) 660 nca.bytes[0] = 0; 661 else 662 nca.bytes[0] = 1; 663 nca.package = np->id; 664 nca.channel = NCSI_RESERVED_CHANNEL; 665 ret = ncsi_xmit_cmd(&nca); 666 if (ret) { 667 netdev_err(ndp->ndev.dev, 668 "NCSI: Failed to transmit CMD_SP\n"); 669 goto error; 670 } 671 672 nd->state = ncsi_dev_state_config_cis; 673 break; 674 case ncsi_dev_state_config_cis: 675 ndp->pending_req_num = 1; 676 677 /* Clear initial state */ 678 nca.type = NCSI_PKT_CMD_CIS; 679 nca.package = np->id; 680 nca.channel = nc->id; 681 ret = ncsi_xmit_cmd(&nca); 682 if (ret) { 683 netdev_err(ndp->ndev.dev, 684 "NCSI: Failed to transmit CMD_CIS\n"); 685 goto error; 686 } 687 688 nd->state = ncsi_dev_state_config_clear_vids; 689 break; 690 case ncsi_dev_state_config_clear_vids: 691 case ncsi_dev_state_config_svf: 692 case ncsi_dev_state_config_ev: 693 case ncsi_dev_state_config_sma: 694 case ncsi_dev_state_config_ebf: 695 #if IS_ENABLED(CONFIG_IPV6) 696 case ncsi_dev_state_config_egmf: 697 #endif 698 case ncsi_dev_state_config_ecnt: 699 case ncsi_dev_state_config_ec: 700 case ncsi_dev_state_config_ae: 701 case ncsi_dev_state_config_gls: 702 ndp->pending_req_num = 1; 703 704 nca.package = np->id; 705 nca.channel = nc->id; 706 707 /* Clear any active filters on the channel before setting */ 708 if (nd->state == ncsi_dev_state_config_clear_vids) { 709 ret = clear_one_vid(ndp, nc, &nca); 710 if (ret) { 711 nd->state = ncsi_dev_state_config_svf; 712 schedule_work(&ndp->work); 713 break; 714 } 715 /* Repeat */ 716 nd->state = ncsi_dev_state_config_clear_vids; 717 /* Add known VLAN tags to the filter */ 718 } else if (nd->state == ncsi_dev_state_config_svf) { 719 ret = set_one_vid(ndp, nc, &nca); 720 if (ret) { 721 nd->state = ncsi_dev_state_config_ev; 722 schedule_work(&ndp->work); 723 break; 724 } 725 /* Repeat */ 726 nd->state = ncsi_dev_state_config_svf; 727 /* Enable/Disable the VLAN filter */ 728 } else if (nd->state == ncsi_dev_state_config_ev) { 729 if (list_empty(&ndp->vlan_vids)) { 730 nca.type = NCSI_PKT_CMD_DV; 731 } else { 732 nca.type = NCSI_PKT_CMD_EV; 733 nca.bytes[3] = NCSI_CAP_VLAN_NO; 734 } 735 nd->state = ncsi_dev_state_config_sma; 736 } else if (nd->state == ncsi_dev_state_config_sma) { 737 /* Use first entry in unicast filter table. Note that 738 * the MAC filter table starts from entry 1 instead of 739 * 0. 740 */ 741 nca.type = NCSI_PKT_CMD_SMA; 742 for (index = 0; index < 6; index++) 743 nca.bytes[index] = dev->dev_addr[index]; 744 nca.bytes[6] = 0x1; 745 nca.bytes[7] = 0x1; 746 nd->state = ncsi_dev_state_config_ebf; 747 } else if (nd->state == ncsi_dev_state_config_ebf) { 748 nca.type = NCSI_PKT_CMD_EBF; 749 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap; 750 nd->state = ncsi_dev_state_config_ecnt; 751 #if IS_ENABLED(CONFIG_IPV6) 752 if (ndp->inet6_addr_num > 0 && 753 (nc->caps[NCSI_CAP_GENERIC].cap & 754 NCSI_CAP_GENERIC_MC)) 755 nd->state = ncsi_dev_state_config_egmf; 756 else 757 nd->state = ncsi_dev_state_config_ecnt; 758 } else if (nd->state == ncsi_dev_state_config_egmf) { 759 nca.type = NCSI_PKT_CMD_EGMF; 760 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 761 nd->state = ncsi_dev_state_config_ecnt; 762 #endif /* CONFIG_IPV6 */ 763 } else if (nd->state == ncsi_dev_state_config_ecnt) { 764 nca.type = NCSI_PKT_CMD_ECNT; 765 nd->state = ncsi_dev_state_config_ec; 766 } else if (nd->state == ncsi_dev_state_config_ec) { 767 /* Enable AEN if it's supported */ 768 nca.type = NCSI_PKT_CMD_EC; 769 nd->state = ncsi_dev_state_config_ae; 770 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK)) 771 nd->state = ncsi_dev_state_config_gls; 772 } else if (nd->state == ncsi_dev_state_config_ae) { 773 nca.type = NCSI_PKT_CMD_AE; 774 nca.bytes[0] = 0; 775 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap; 776 nd->state = ncsi_dev_state_config_gls; 777 } else if (nd->state == ncsi_dev_state_config_gls) { 778 nca.type = NCSI_PKT_CMD_GLS; 779 nd->state = ncsi_dev_state_config_done; 780 } 781 782 ret = ncsi_xmit_cmd(&nca); 783 if (ret) { 784 netdev_err(ndp->ndev.dev, 785 "NCSI: Failed to transmit CMD %x\n", 786 nca.type); 787 goto error; 788 } 789 break; 790 case ncsi_dev_state_config_done: 791 netdev_printk(KERN_DEBUG, ndp->ndev.dev, 792 "NCSI: channel %u config done\n", nc->id); 793 spin_lock_irqsave(&nc->lock, flags); 794 if (nc->reconfigure_needed) { 795 /* This channel's configuration has been updated 796 * part-way during the config state - start the 797 * channel configuration over 798 */ 799 nc->reconfigure_needed = false; 800 nc->state = NCSI_CHANNEL_INACTIVE; 801 spin_unlock_irqrestore(&nc->lock, flags); 802 803 spin_lock_irqsave(&ndp->lock, flags); 804 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 805 spin_unlock_irqrestore(&ndp->lock, flags); 806 807 netdev_printk(KERN_DEBUG, dev, 808 "Dirty NCSI channel state reset\n"); 809 ncsi_process_next_channel(ndp); 810 break; 811 } 812 813 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 814 hot_nc = nc; 815 nc->state = NCSI_CHANNEL_ACTIVE; 816 } else { 817 hot_nc = NULL; 818 nc->state = NCSI_CHANNEL_INACTIVE; 819 netdev_warn(ndp->ndev.dev, 820 "NCSI: channel %u link down after config\n", 821 nc->id); 822 } 823 spin_unlock_irqrestore(&nc->lock, flags); 824 825 /* Update the hot channel */ 826 spin_lock_irqsave(&ndp->lock, flags); 827 ndp->hot_channel = hot_nc; 828 spin_unlock_irqrestore(&ndp->lock, flags); 829 830 ncsi_start_channel_monitor(nc); 831 ncsi_process_next_channel(ndp); 832 break; 833 default: 834 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n", 835 nd->state); 836 } 837 838 return; 839 840 error: 841 ncsi_report_link(ndp, true); 842 } 843 844 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) 845 { 846 struct ncsi_package *np, *force_package; 847 struct ncsi_channel *nc, *found, *hot_nc, *force_channel; 848 struct ncsi_channel_mode *ncm; 849 unsigned long flags; 850 851 spin_lock_irqsave(&ndp->lock, flags); 852 hot_nc = ndp->hot_channel; 853 force_channel = ndp->force_channel; 854 force_package = ndp->force_package; 855 spin_unlock_irqrestore(&ndp->lock, flags); 856 857 /* Force a specific channel whether or not it has link if we have been 858 * configured to do so 859 */ 860 if (force_package && force_channel) { 861 found = force_channel; 862 ncm = &found->modes[NCSI_MODE_LINK]; 863 if (!(ncm->data[2] & 0x1)) 864 netdev_info(ndp->ndev.dev, 865 "NCSI: Channel %u forced, but it is link down\n", 866 found->id); 867 goto out; 868 } 869 870 /* The search is done once an inactive channel with up 871 * link is found. 872 */ 873 found = NULL; 874 NCSI_FOR_EACH_PACKAGE(ndp, np) { 875 if (ndp->force_package && np != ndp->force_package) 876 continue; 877 NCSI_FOR_EACH_CHANNEL(np, nc) { 878 spin_lock_irqsave(&nc->lock, flags); 879 880 if (!list_empty(&nc->link) || 881 nc->state != NCSI_CHANNEL_INACTIVE) { 882 spin_unlock_irqrestore(&nc->lock, flags); 883 continue; 884 } 885 886 if (!found) 887 found = nc; 888 889 if (nc == hot_nc) 890 found = nc; 891 892 ncm = &nc->modes[NCSI_MODE_LINK]; 893 if (ncm->data[2] & 0x1) { 894 spin_unlock_irqrestore(&nc->lock, flags); 895 found = nc; 896 goto out; 897 } 898 899 spin_unlock_irqrestore(&nc->lock, flags); 900 } 901 } 902 903 if (!found) { 904 netdev_warn(ndp->ndev.dev, 905 "NCSI: No channel found with link\n"); 906 ncsi_report_link(ndp, true); 907 return -ENODEV; 908 } 909 910 ncm = &found->modes[NCSI_MODE_LINK]; 911 netdev_printk(KERN_DEBUG, ndp->ndev.dev, 912 "NCSI: Channel %u added to queue (link %s)\n", 913 found->id, ncm->data[2] & 0x1 ? "up" : "down"); 914 915 out: 916 spin_lock_irqsave(&ndp->lock, flags); 917 list_add_tail_rcu(&found->link, &ndp->channel_queue); 918 spin_unlock_irqrestore(&ndp->lock, flags); 919 920 return ncsi_process_next_channel(ndp); 921 } 922 923 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp) 924 { 925 struct ncsi_package *np; 926 struct ncsi_channel *nc; 927 unsigned int cap; 928 bool has_channel = false; 929 930 /* The hardware arbitration is disabled if any one channel 931 * doesn't support explicitly. 932 */ 933 NCSI_FOR_EACH_PACKAGE(ndp, np) { 934 NCSI_FOR_EACH_CHANNEL(np, nc) { 935 has_channel = true; 936 937 cap = nc->caps[NCSI_CAP_GENERIC].cap; 938 if (!(cap & NCSI_CAP_GENERIC_HWA) || 939 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 940 NCSI_CAP_GENERIC_HWA_SUPPORT) { 941 ndp->flags &= ~NCSI_DEV_HWA; 942 return false; 943 } 944 } 945 } 946 947 if (has_channel) { 948 ndp->flags |= NCSI_DEV_HWA; 949 return true; 950 } 951 952 ndp->flags &= ~NCSI_DEV_HWA; 953 return false; 954 } 955 956 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) 957 { 958 struct ncsi_package *np; 959 struct ncsi_channel *nc; 960 unsigned long flags; 961 962 /* Move all available channels to processing queue */ 963 spin_lock_irqsave(&ndp->lock, flags); 964 NCSI_FOR_EACH_PACKAGE(ndp, np) { 965 NCSI_FOR_EACH_CHANNEL(np, nc) { 966 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE || 967 !list_empty(&nc->link)); 968 ncsi_stop_channel_monitor(nc); 969 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 970 } 971 } 972 spin_unlock_irqrestore(&ndp->lock, flags); 973 974 /* We can have no channels in extremely case */ 975 if (list_empty(&ndp->channel_queue)) { 976 netdev_err(ndp->ndev.dev, 977 "NCSI: No available channels for HWA\n"); 978 ncsi_report_link(ndp, false); 979 return -ENOENT; 980 } 981 982 return ncsi_process_next_channel(ndp); 983 } 984 985 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) 986 { 987 struct ncsi_dev *nd = &ndp->ndev; 988 struct ncsi_package *np; 989 struct ncsi_channel *nc; 990 struct ncsi_cmd_arg nca; 991 unsigned char index; 992 int ret; 993 994 nca.ndp = ndp; 995 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 996 switch (nd->state) { 997 case ncsi_dev_state_probe: 998 nd->state = ncsi_dev_state_probe_deselect; 999 /* Fall through */ 1000 case ncsi_dev_state_probe_deselect: 1001 ndp->pending_req_num = 8; 1002 1003 /* Deselect all possible packages */ 1004 nca.type = NCSI_PKT_CMD_DP; 1005 nca.channel = NCSI_RESERVED_CHANNEL; 1006 for (index = 0; index < 8; index++) { 1007 nca.package = index; 1008 ret = ncsi_xmit_cmd(&nca); 1009 if (ret) 1010 goto error; 1011 } 1012 1013 nd->state = ncsi_dev_state_probe_package; 1014 break; 1015 case ncsi_dev_state_probe_package: 1016 ndp->pending_req_num = 16; 1017 1018 /* Select all possible packages */ 1019 nca.type = NCSI_PKT_CMD_SP; 1020 nca.bytes[0] = 1; 1021 nca.channel = NCSI_RESERVED_CHANNEL; 1022 for (index = 0; index < 8; index++) { 1023 nca.package = index; 1024 ret = ncsi_xmit_cmd(&nca); 1025 if (ret) 1026 goto error; 1027 } 1028 1029 /* Disable all possible packages */ 1030 nca.type = NCSI_PKT_CMD_DP; 1031 for (index = 0; index < 8; index++) { 1032 nca.package = index; 1033 ret = ncsi_xmit_cmd(&nca); 1034 if (ret) 1035 goto error; 1036 } 1037 1038 nd->state = ncsi_dev_state_probe_channel; 1039 break; 1040 case ncsi_dev_state_probe_channel: 1041 if (!ndp->active_package) 1042 ndp->active_package = list_first_or_null_rcu( 1043 &ndp->packages, struct ncsi_package, node); 1044 else if (list_is_last(&ndp->active_package->node, 1045 &ndp->packages)) 1046 ndp->active_package = NULL; 1047 else 1048 ndp->active_package = list_next_entry( 1049 ndp->active_package, node); 1050 1051 /* All available packages and channels are enumerated. The 1052 * enumeration happens for once when the NCSI interface is 1053 * started. So we need continue to start the interface after 1054 * the enumeration. 1055 * 1056 * We have to choose an active channel before configuring it. 1057 * Note that we possibly don't have active channel in extreme 1058 * situation. 1059 */ 1060 if (!ndp->active_package) { 1061 ndp->flags |= NCSI_DEV_PROBED; 1062 if (ncsi_check_hwa(ndp)) 1063 ncsi_enable_hwa(ndp); 1064 else 1065 ncsi_choose_active_channel(ndp); 1066 return; 1067 } 1068 1069 /* Select the active package */ 1070 ndp->pending_req_num = 1; 1071 nca.type = NCSI_PKT_CMD_SP; 1072 nca.bytes[0] = 1; 1073 nca.package = ndp->active_package->id; 1074 nca.channel = NCSI_RESERVED_CHANNEL; 1075 ret = ncsi_xmit_cmd(&nca); 1076 if (ret) 1077 goto error; 1078 1079 nd->state = ncsi_dev_state_probe_cis; 1080 break; 1081 case ncsi_dev_state_probe_cis: 1082 ndp->pending_req_num = NCSI_RESERVED_CHANNEL; 1083 1084 /* Clear initial state */ 1085 nca.type = NCSI_PKT_CMD_CIS; 1086 nca.package = ndp->active_package->id; 1087 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) { 1088 nca.channel = index; 1089 ret = ncsi_xmit_cmd(&nca); 1090 if (ret) 1091 goto error; 1092 } 1093 1094 nd->state = ncsi_dev_state_probe_gvi; 1095 break; 1096 case ncsi_dev_state_probe_gvi: 1097 case ncsi_dev_state_probe_gc: 1098 case ncsi_dev_state_probe_gls: 1099 np = ndp->active_package; 1100 ndp->pending_req_num = np->channel_num; 1101 1102 /* Retrieve version, capability or link status */ 1103 if (nd->state == ncsi_dev_state_probe_gvi) 1104 nca.type = NCSI_PKT_CMD_GVI; 1105 else if (nd->state == ncsi_dev_state_probe_gc) 1106 nca.type = NCSI_PKT_CMD_GC; 1107 else 1108 nca.type = NCSI_PKT_CMD_GLS; 1109 1110 nca.package = np->id; 1111 NCSI_FOR_EACH_CHANNEL(np, nc) { 1112 nca.channel = nc->id; 1113 ret = ncsi_xmit_cmd(&nca); 1114 if (ret) 1115 goto error; 1116 } 1117 1118 if (nd->state == ncsi_dev_state_probe_gvi) 1119 nd->state = ncsi_dev_state_probe_gc; 1120 else if (nd->state == ncsi_dev_state_probe_gc) 1121 nd->state = ncsi_dev_state_probe_gls; 1122 else 1123 nd->state = ncsi_dev_state_probe_dp; 1124 break; 1125 case ncsi_dev_state_probe_dp: 1126 ndp->pending_req_num = 1; 1127 1128 /* Deselect the active package */ 1129 nca.type = NCSI_PKT_CMD_DP; 1130 nca.package = ndp->active_package->id; 1131 nca.channel = NCSI_RESERVED_CHANNEL; 1132 ret = ncsi_xmit_cmd(&nca); 1133 if (ret) 1134 goto error; 1135 1136 /* Scan channels in next package */ 1137 nd->state = ncsi_dev_state_probe_channel; 1138 break; 1139 default: 1140 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n", 1141 nd->state); 1142 } 1143 1144 return; 1145 error: 1146 netdev_err(ndp->ndev.dev, 1147 "NCSI: Failed to transmit cmd 0x%x during probe\n", 1148 nca.type); 1149 ncsi_report_link(ndp, true); 1150 } 1151 1152 static void ncsi_dev_work(struct work_struct *work) 1153 { 1154 struct ncsi_dev_priv *ndp = container_of(work, 1155 struct ncsi_dev_priv, work); 1156 struct ncsi_dev *nd = &ndp->ndev; 1157 1158 switch (nd->state & ncsi_dev_state_major) { 1159 case ncsi_dev_state_probe: 1160 ncsi_probe_channel(ndp); 1161 break; 1162 case ncsi_dev_state_suspend: 1163 ncsi_suspend_channel(ndp); 1164 break; 1165 case ncsi_dev_state_config: 1166 ncsi_configure_channel(ndp); 1167 break; 1168 default: 1169 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n", 1170 nd->state); 1171 } 1172 } 1173 1174 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp) 1175 { 1176 struct ncsi_channel *nc; 1177 int old_state; 1178 unsigned long flags; 1179 1180 spin_lock_irqsave(&ndp->lock, flags); 1181 nc = list_first_or_null_rcu(&ndp->channel_queue, 1182 struct ncsi_channel, link); 1183 if (!nc) { 1184 spin_unlock_irqrestore(&ndp->lock, flags); 1185 goto out; 1186 } 1187 1188 list_del_init(&nc->link); 1189 spin_unlock_irqrestore(&ndp->lock, flags); 1190 1191 spin_lock_irqsave(&nc->lock, flags); 1192 old_state = nc->state; 1193 nc->state = NCSI_CHANNEL_INVISIBLE; 1194 spin_unlock_irqrestore(&nc->lock, flags); 1195 1196 ndp->active_channel = nc; 1197 ndp->active_package = nc->package; 1198 1199 switch (old_state) { 1200 case NCSI_CHANNEL_INACTIVE: 1201 ndp->ndev.state = ncsi_dev_state_config; 1202 netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n", 1203 nc->id); 1204 ncsi_configure_channel(ndp); 1205 break; 1206 case NCSI_CHANNEL_ACTIVE: 1207 ndp->ndev.state = ncsi_dev_state_suspend; 1208 netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n", 1209 nc->id); 1210 ncsi_suspend_channel(ndp); 1211 break; 1212 default: 1213 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n", 1214 old_state, nc->package->id, nc->id); 1215 ncsi_report_link(ndp, false); 1216 return -EINVAL; 1217 } 1218 1219 return 0; 1220 1221 out: 1222 ndp->active_channel = NULL; 1223 ndp->active_package = NULL; 1224 if (ndp->flags & NCSI_DEV_RESHUFFLE) { 1225 ndp->flags &= ~NCSI_DEV_RESHUFFLE; 1226 return ncsi_choose_active_channel(ndp); 1227 } 1228 1229 netdev_printk(KERN_DEBUG, ndp->ndev.dev, 1230 "NCSI: No more channels to process\n"); 1231 ncsi_report_link(ndp, false); 1232 return -ENODEV; 1233 } 1234 1235 #if IS_ENABLED(CONFIG_IPV6) 1236 static int ncsi_inet6addr_event(struct notifier_block *this, 1237 unsigned long event, void *data) 1238 { 1239 struct inet6_ifaddr *ifa = data; 1240 struct net_device *dev = ifa->idev->dev; 1241 struct ncsi_dev *nd = ncsi_find_dev(dev); 1242 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; 1243 struct ncsi_package *np; 1244 struct ncsi_channel *nc; 1245 struct ncsi_cmd_arg nca; 1246 bool action; 1247 int ret; 1248 1249 if (!ndp || (ipv6_addr_type(&ifa->addr) & 1250 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK))) 1251 return NOTIFY_OK; 1252 1253 switch (event) { 1254 case NETDEV_UP: 1255 action = (++ndp->inet6_addr_num) == 1; 1256 nca.type = NCSI_PKT_CMD_EGMF; 1257 break; 1258 case NETDEV_DOWN: 1259 action = (--ndp->inet6_addr_num == 0); 1260 nca.type = NCSI_PKT_CMD_DGMF; 1261 break; 1262 default: 1263 return NOTIFY_OK; 1264 } 1265 1266 /* We might not have active channel or packages. The IPv6 1267 * required multicast will be enabled when active channel 1268 * or packages are chosen. 1269 */ 1270 np = ndp->active_package; 1271 nc = ndp->active_channel; 1272 if (!action || !np || !nc) 1273 return NOTIFY_OK; 1274 1275 /* We needn't enable or disable it if the function isn't supported */ 1276 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC)) 1277 return NOTIFY_OK; 1278 1279 nca.ndp = ndp; 1280 nca.req_flags = 0; 1281 nca.package = np->id; 1282 nca.channel = nc->id; 1283 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 1284 ret = ncsi_xmit_cmd(&nca); 1285 if (ret) { 1286 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n", 1287 (event == NETDEV_UP) ? "enable" : "disable", ret); 1288 return NOTIFY_DONE; 1289 } 1290 1291 return NOTIFY_OK; 1292 } 1293 1294 static struct notifier_block ncsi_inet6addr_notifier = { 1295 .notifier_call = ncsi_inet6addr_event, 1296 }; 1297 #endif /* CONFIG_IPV6 */ 1298 1299 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp) 1300 { 1301 struct ncsi_dev *nd = &ndp->ndev; 1302 struct ncsi_channel *nc; 1303 struct ncsi_package *np; 1304 unsigned long flags; 1305 unsigned int n = 0; 1306 1307 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1308 NCSI_FOR_EACH_CHANNEL(np, nc) { 1309 spin_lock_irqsave(&nc->lock, flags); 1310 1311 /* Channels may be busy, mark dirty instead of 1312 * kicking if; 1313 * a) not ACTIVE (configured) 1314 * b) in the channel_queue (to be configured) 1315 * c) it's ndev is in the config state 1316 */ 1317 if (nc->state != NCSI_CHANNEL_ACTIVE) { 1318 if ((ndp->ndev.state & 0xff00) == 1319 ncsi_dev_state_config || 1320 !list_empty(&nc->link)) { 1321 netdev_printk(KERN_DEBUG, nd->dev, 1322 "NCSI: channel %p marked dirty\n", 1323 nc); 1324 nc->reconfigure_needed = true; 1325 } 1326 spin_unlock_irqrestore(&nc->lock, flags); 1327 continue; 1328 } 1329 1330 spin_unlock_irqrestore(&nc->lock, flags); 1331 1332 ncsi_stop_channel_monitor(nc); 1333 spin_lock_irqsave(&nc->lock, flags); 1334 nc->state = NCSI_CHANNEL_INACTIVE; 1335 spin_unlock_irqrestore(&nc->lock, flags); 1336 1337 spin_lock_irqsave(&ndp->lock, flags); 1338 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 1339 spin_unlock_irqrestore(&ndp->lock, flags); 1340 1341 netdev_printk(KERN_DEBUG, nd->dev, 1342 "NCSI: kicked channel %p\n", nc); 1343 n++; 1344 } 1345 } 1346 1347 return n; 1348 } 1349 1350 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1351 { 1352 struct ncsi_dev_priv *ndp; 1353 unsigned int n_vids = 0; 1354 struct vlan_vid *vlan; 1355 struct ncsi_dev *nd; 1356 bool found = false; 1357 1358 if (vid == 0) 1359 return 0; 1360 1361 nd = ncsi_find_dev(dev); 1362 if (!nd) { 1363 netdev_warn(dev, "NCSI: No net_device?\n"); 1364 return 0; 1365 } 1366 1367 ndp = TO_NCSI_DEV_PRIV(nd); 1368 1369 /* Add the VLAN id to our internal list */ 1370 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 1371 n_vids++; 1372 if (vlan->vid == vid) { 1373 netdev_printk(KERN_DEBUG, dev, 1374 "NCSI: vid %u already registered\n", vid); 1375 return 0; 1376 } 1377 } 1378 if (n_vids >= NCSI_MAX_VLAN_VIDS) { 1379 netdev_warn(dev, 1380 "tried to add vlan id %u but NCSI max already registered (%u)\n", 1381 vid, NCSI_MAX_VLAN_VIDS); 1382 return -ENOSPC; 1383 } 1384 1385 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1386 if (!vlan) 1387 return -ENOMEM; 1388 1389 vlan->proto = proto; 1390 vlan->vid = vid; 1391 list_add_rcu(&vlan->list, &ndp->vlan_vids); 1392 1393 netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid); 1394 1395 found = ncsi_kick_channels(ndp) != 0; 1396 1397 return found ? ncsi_process_next_channel(ndp) : 0; 1398 } 1399 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid); 1400 1401 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 1402 { 1403 struct vlan_vid *vlan, *tmp; 1404 struct ncsi_dev_priv *ndp; 1405 struct ncsi_dev *nd; 1406 bool found = false; 1407 1408 if (vid == 0) 1409 return 0; 1410 1411 nd = ncsi_find_dev(dev); 1412 if (!nd) { 1413 netdev_warn(dev, "NCSI: no net_device?\n"); 1414 return 0; 1415 } 1416 1417 ndp = TO_NCSI_DEV_PRIV(nd); 1418 1419 /* Remove the VLAN id from our internal list */ 1420 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list) 1421 if (vlan->vid == vid) { 1422 netdev_printk(KERN_DEBUG, dev, 1423 "NCSI: vid %u found, removing\n", vid); 1424 list_del_rcu(&vlan->list); 1425 found = true; 1426 kfree(vlan); 1427 } 1428 1429 if (!found) { 1430 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid); 1431 return -EINVAL; 1432 } 1433 1434 found = ncsi_kick_channels(ndp) != 0; 1435 1436 return found ? ncsi_process_next_channel(ndp) : 0; 1437 } 1438 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid); 1439 1440 struct ncsi_dev *ncsi_register_dev(struct net_device *dev, 1441 void (*handler)(struct ncsi_dev *ndev)) 1442 { 1443 struct ncsi_dev_priv *ndp; 1444 struct ncsi_dev *nd; 1445 unsigned long flags; 1446 int i; 1447 1448 /* Check if the device has been registered or not */ 1449 nd = ncsi_find_dev(dev); 1450 if (nd) 1451 return nd; 1452 1453 /* Create NCSI device */ 1454 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC); 1455 if (!ndp) 1456 return NULL; 1457 1458 nd = &ndp->ndev; 1459 nd->state = ncsi_dev_state_registered; 1460 nd->dev = dev; 1461 nd->handler = handler; 1462 ndp->pending_req_num = 0; 1463 INIT_LIST_HEAD(&ndp->channel_queue); 1464 INIT_LIST_HEAD(&ndp->vlan_vids); 1465 INIT_WORK(&ndp->work, ncsi_dev_work); 1466 1467 /* Initialize private NCSI device */ 1468 spin_lock_init(&ndp->lock); 1469 INIT_LIST_HEAD(&ndp->packages); 1470 ndp->request_id = NCSI_REQ_START_IDX; 1471 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) { 1472 ndp->requests[i].id = i; 1473 ndp->requests[i].ndp = ndp; 1474 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0); 1475 } 1476 1477 spin_lock_irqsave(&ncsi_dev_lock, flags); 1478 #if IS_ENABLED(CONFIG_IPV6) 1479 ndp->inet6_addr_num = 0; 1480 if (list_empty(&ncsi_dev_list)) 1481 register_inet6addr_notifier(&ncsi_inet6addr_notifier); 1482 #endif 1483 list_add_tail_rcu(&ndp->node, &ncsi_dev_list); 1484 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1485 1486 /* Register NCSI packet Rx handler */ 1487 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI); 1488 ndp->ptype.func = ncsi_rcv_rsp; 1489 ndp->ptype.dev = dev; 1490 dev_add_pack(&ndp->ptype); 1491 1492 /* Set up generic netlink interface */ 1493 ncsi_init_netlink(dev); 1494 1495 return nd; 1496 } 1497 EXPORT_SYMBOL_GPL(ncsi_register_dev); 1498 1499 int ncsi_start_dev(struct ncsi_dev *nd) 1500 { 1501 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1502 int ret; 1503 1504 if (nd->state != ncsi_dev_state_registered && 1505 nd->state != ncsi_dev_state_functional) 1506 return -ENOTTY; 1507 1508 if (!(ndp->flags & NCSI_DEV_PROBED)) { 1509 nd->state = ncsi_dev_state_probe; 1510 schedule_work(&ndp->work); 1511 return 0; 1512 } 1513 1514 if (ndp->flags & NCSI_DEV_HWA) { 1515 netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n"); 1516 ret = ncsi_enable_hwa(ndp); 1517 } else { 1518 ret = ncsi_choose_active_channel(ndp); 1519 } 1520 1521 return ret; 1522 } 1523 EXPORT_SYMBOL_GPL(ncsi_start_dev); 1524 1525 void ncsi_stop_dev(struct ncsi_dev *nd) 1526 { 1527 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1528 struct ncsi_package *np; 1529 struct ncsi_channel *nc; 1530 bool chained; 1531 int old_state; 1532 unsigned long flags; 1533 1534 /* Stop the channel monitor and reset channel's state */ 1535 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1536 NCSI_FOR_EACH_CHANNEL(np, nc) { 1537 ncsi_stop_channel_monitor(nc); 1538 1539 spin_lock_irqsave(&nc->lock, flags); 1540 chained = !list_empty(&nc->link); 1541 old_state = nc->state; 1542 nc->state = NCSI_CHANNEL_INACTIVE; 1543 spin_unlock_irqrestore(&nc->lock, flags); 1544 1545 WARN_ON_ONCE(chained || 1546 old_state == NCSI_CHANNEL_INVISIBLE); 1547 } 1548 } 1549 1550 netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n"); 1551 ncsi_report_link(ndp, true); 1552 } 1553 EXPORT_SYMBOL_GPL(ncsi_stop_dev); 1554 1555 void ncsi_unregister_dev(struct ncsi_dev *nd) 1556 { 1557 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1558 struct ncsi_package *np, *tmp; 1559 unsigned long flags; 1560 1561 dev_remove_pack(&ndp->ptype); 1562 1563 list_for_each_entry_safe(np, tmp, &ndp->packages, node) 1564 ncsi_remove_package(np); 1565 1566 spin_lock_irqsave(&ncsi_dev_lock, flags); 1567 list_del_rcu(&ndp->node); 1568 #if IS_ENABLED(CONFIG_IPV6) 1569 if (list_empty(&ncsi_dev_list)) 1570 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier); 1571 #endif 1572 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1573 1574 ncsi_unregister_netlink(nd->dev); 1575 1576 kfree(ndp); 1577 } 1578 EXPORT_SYMBOL_GPL(ncsi_unregister_dev); 1579