1 /* 2 * Copyright Gavin Shan, IBM Corporation 2016. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/netdevice.h> 14 #include <linux/skbuff.h> 15 #include <linux/netlink.h> 16 17 #include <net/ncsi.h> 18 #include <net/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/addrconf.h> 21 #include <net/ipv6.h> 22 #include <net/if_inet6.h> 23 24 #include "internal.h" 25 #include "ncsi-pkt.h" 26 27 LIST_HEAD(ncsi_dev_list); 28 DEFINE_SPINLOCK(ncsi_dev_lock); 29 30 static inline int ncsi_filter_size(int table) 31 { 32 int sizes[] = { 2, 6, 6, 6 }; 33 34 BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX); 35 if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX) 36 return -EINVAL; 37 38 return sizes[table]; 39 } 40 41 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data) 42 { 43 struct ncsi_channel_filter *ncf; 44 void *bitmap; 45 int index, size; 46 unsigned long flags; 47 48 ncf = nc->filters[table]; 49 if (!ncf) 50 return -ENXIO; 51 52 size = ncsi_filter_size(table); 53 if (size < 0) 54 return size; 55 56 spin_lock_irqsave(&nc->lock, flags); 57 bitmap = (void *)&ncf->bitmap; 58 index = -1; 59 while ((index = find_next_bit(bitmap, ncf->total, index + 1)) 60 < ncf->total) { 61 if (!memcmp(ncf->data + size * index, data, size)) { 62 spin_unlock_irqrestore(&nc->lock, flags); 63 return index; 64 } 65 } 66 spin_unlock_irqrestore(&nc->lock, flags); 67 68 return -ENOENT; 69 } 70 71 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data) 72 { 73 struct ncsi_channel_filter *ncf; 74 int index, size; 75 void *bitmap; 76 unsigned long flags; 77 78 size = ncsi_filter_size(table); 79 if (size < 0) 80 return size; 81 82 index = ncsi_find_filter(nc, table, data); 83 if (index >= 0) 84 return index; 85 86 ncf = nc->filters[table]; 87 if (!ncf) 88 return -ENODEV; 89 90 spin_lock_irqsave(&nc->lock, flags); 91 bitmap = (void *)&ncf->bitmap; 92 do { 93 index = find_next_zero_bit(bitmap, ncf->total, 0); 94 if (index >= ncf->total) { 95 spin_unlock_irqrestore(&nc->lock, flags); 96 return -ENOSPC; 97 } 98 } while (test_and_set_bit(index, bitmap)); 99 100 memcpy(ncf->data + size * index, data, size); 101 spin_unlock_irqrestore(&nc->lock, flags); 102 103 return index; 104 } 105 106 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index) 107 { 108 struct ncsi_channel_filter *ncf; 109 int size; 110 void *bitmap; 111 unsigned long flags; 112 113 size = ncsi_filter_size(table); 114 if (size < 0) 115 return size; 116 117 ncf = nc->filters[table]; 118 if (!ncf || index >= ncf->total) 119 return -ENODEV; 120 121 spin_lock_irqsave(&nc->lock, flags); 122 bitmap = (void *)&ncf->bitmap; 123 if (test_and_clear_bit(index, bitmap)) 124 memset(ncf->data + size * index, 0, size); 125 spin_unlock_irqrestore(&nc->lock, flags); 126 127 return 0; 128 } 129 130 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down) 131 { 132 struct ncsi_dev *nd = &ndp->ndev; 133 struct ncsi_package *np; 134 struct ncsi_channel *nc; 135 unsigned long flags; 136 137 nd->state = ncsi_dev_state_functional; 138 if (force_down) { 139 nd->link_up = 0; 140 goto report; 141 } 142 143 nd->link_up = 0; 144 NCSI_FOR_EACH_PACKAGE(ndp, np) { 145 NCSI_FOR_EACH_CHANNEL(np, nc) { 146 spin_lock_irqsave(&nc->lock, flags); 147 148 if (!list_empty(&nc->link) || 149 nc->state != NCSI_CHANNEL_ACTIVE) { 150 spin_unlock_irqrestore(&nc->lock, flags); 151 continue; 152 } 153 154 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { 155 spin_unlock_irqrestore(&nc->lock, flags); 156 nd->link_up = 1; 157 goto report; 158 } 159 160 spin_unlock_irqrestore(&nc->lock, flags); 161 } 162 } 163 164 report: 165 nd->handler(nd); 166 } 167 168 static void ncsi_channel_monitor(unsigned long data) 169 { 170 struct ncsi_channel *nc = (struct ncsi_channel *)data; 171 struct ncsi_package *np = nc->package; 172 struct ncsi_dev_priv *ndp = np->ndp; 173 struct ncsi_cmd_arg nca; 174 bool enabled, chained; 175 unsigned int monitor_state; 176 unsigned long flags; 177 int state, ret; 178 179 spin_lock_irqsave(&nc->lock, flags); 180 state = nc->state; 181 chained = !list_empty(&nc->link); 182 enabled = nc->monitor.enabled; 183 monitor_state = nc->monitor.state; 184 spin_unlock_irqrestore(&nc->lock, flags); 185 186 if (!enabled || chained) 187 return; 188 if (state != NCSI_CHANNEL_INACTIVE && 189 state != NCSI_CHANNEL_ACTIVE) 190 return; 191 192 switch (monitor_state) { 193 case NCSI_CHANNEL_MONITOR_START: 194 case NCSI_CHANNEL_MONITOR_RETRY: 195 nca.ndp = ndp; 196 nca.package = np->id; 197 nca.channel = nc->id; 198 nca.type = NCSI_PKT_CMD_GLS; 199 nca.req_flags = 0; 200 ret = ncsi_xmit_cmd(&nca); 201 if (ret) { 202 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 203 ret); 204 return; 205 } 206 207 break; 208 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 209 break; 210 default: 211 if (!(ndp->flags & NCSI_DEV_HWA) && 212 state == NCSI_CHANNEL_ACTIVE) { 213 ncsi_report_link(ndp, true); 214 ndp->flags |= NCSI_DEV_RESHUFFLE; 215 } 216 217 spin_lock_irqsave(&nc->lock, flags); 218 nc->state = NCSI_CHANNEL_INVISIBLE; 219 spin_unlock_irqrestore(&nc->lock, flags); 220 221 spin_lock_irqsave(&ndp->lock, flags); 222 nc->state = NCSI_CHANNEL_INACTIVE; 223 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 224 spin_unlock_irqrestore(&ndp->lock, flags); 225 ncsi_process_next_channel(ndp); 226 return; 227 } 228 229 spin_lock_irqsave(&nc->lock, flags); 230 nc->monitor.state++; 231 spin_unlock_irqrestore(&nc->lock, flags); 232 mod_timer(&nc->monitor.timer, jiffies + HZ); 233 } 234 235 void ncsi_start_channel_monitor(struct ncsi_channel *nc) 236 { 237 unsigned long flags; 238 239 spin_lock_irqsave(&nc->lock, flags); 240 WARN_ON_ONCE(nc->monitor.enabled); 241 nc->monitor.enabled = true; 242 nc->monitor.state = NCSI_CHANNEL_MONITOR_START; 243 spin_unlock_irqrestore(&nc->lock, flags); 244 245 mod_timer(&nc->monitor.timer, jiffies + HZ); 246 } 247 248 void ncsi_stop_channel_monitor(struct ncsi_channel *nc) 249 { 250 unsigned long flags; 251 252 spin_lock_irqsave(&nc->lock, flags); 253 if (!nc->monitor.enabled) { 254 spin_unlock_irqrestore(&nc->lock, flags); 255 return; 256 } 257 nc->monitor.enabled = false; 258 spin_unlock_irqrestore(&nc->lock, flags); 259 260 del_timer_sync(&nc->monitor.timer); 261 } 262 263 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np, 264 unsigned char id) 265 { 266 struct ncsi_channel *nc; 267 268 NCSI_FOR_EACH_CHANNEL(np, nc) { 269 if (nc->id == id) 270 return nc; 271 } 272 273 return NULL; 274 } 275 276 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id) 277 { 278 struct ncsi_channel *nc, *tmp; 279 int index; 280 unsigned long flags; 281 282 nc = kzalloc(sizeof(*nc), GFP_ATOMIC); 283 if (!nc) 284 return NULL; 285 286 nc->id = id; 287 nc->package = np; 288 nc->state = NCSI_CHANNEL_INACTIVE; 289 nc->monitor.enabled = false; 290 setup_timer(&nc->monitor.timer, 291 ncsi_channel_monitor, (unsigned long)nc); 292 spin_lock_init(&nc->lock); 293 INIT_LIST_HEAD(&nc->link); 294 for (index = 0; index < NCSI_CAP_MAX; index++) 295 nc->caps[index].index = index; 296 for (index = 0; index < NCSI_MODE_MAX; index++) 297 nc->modes[index].index = index; 298 299 spin_lock_irqsave(&np->lock, flags); 300 tmp = ncsi_find_channel(np, id); 301 if (tmp) { 302 spin_unlock_irqrestore(&np->lock, flags); 303 kfree(nc); 304 return tmp; 305 } 306 307 list_add_tail_rcu(&nc->node, &np->channels); 308 np->channel_num++; 309 spin_unlock_irqrestore(&np->lock, flags); 310 311 return nc; 312 } 313 314 static void ncsi_remove_channel(struct ncsi_channel *nc) 315 { 316 struct ncsi_package *np = nc->package; 317 struct ncsi_channel_filter *ncf; 318 unsigned long flags; 319 int i; 320 321 /* Release filters */ 322 spin_lock_irqsave(&nc->lock, flags); 323 for (i = 0; i < NCSI_FILTER_MAX; i++) { 324 ncf = nc->filters[i]; 325 if (!ncf) 326 continue; 327 328 nc->filters[i] = NULL; 329 kfree(ncf); 330 } 331 332 nc->state = NCSI_CHANNEL_INACTIVE; 333 spin_unlock_irqrestore(&nc->lock, flags); 334 ncsi_stop_channel_monitor(nc); 335 336 /* Remove and free channel */ 337 spin_lock_irqsave(&np->lock, flags); 338 list_del_rcu(&nc->node); 339 np->channel_num--; 340 spin_unlock_irqrestore(&np->lock, flags); 341 342 kfree(nc); 343 } 344 345 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp, 346 unsigned char id) 347 { 348 struct ncsi_package *np; 349 350 NCSI_FOR_EACH_PACKAGE(ndp, np) { 351 if (np->id == id) 352 return np; 353 } 354 355 return NULL; 356 } 357 358 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp, 359 unsigned char id) 360 { 361 struct ncsi_package *np, *tmp; 362 unsigned long flags; 363 364 np = kzalloc(sizeof(*np), GFP_ATOMIC); 365 if (!np) 366 return NULL; 367 368 np->id = id; 369 np->ndp = ndp; 370 spin_lock_init(&np->lock); 371 INIT_LIST_HEAD(&np->channels); 372 373 spin_lock_irqsave(&ndp->lock, flags); 374 tmp = ncsi_find_package(ndp, id); 375 if (tmp) { 376 spin_unlock_irqrestore(&ndp->lock, flags); 377 kfree(np); 378 return tmp; 379 } 380 381 list_add_tail_rcu(&np->node, &ndp->packages); 382 ndp->package_num++; 383 spin_unlock_irqrestore(&ndp->lock, flags); 384 385 return np; 386 } 387 388 void ncsi_remove_package(struct ncsi_package *np) 389 { 390 struct ncsi_dev_priv *ndp = np->ndp; 391 struct ncsi_channel *nc, *tmp; 392 unsigned long flags; 393 394 /* Release all child channels */ 395 list_for_each_entry_safe(nc, tmp, &np->channels, node) 396 ncsi_remove_channel(nc); 397 398 /* Remove and free package */ 399 spin_lock_irqsave(&ndp->lock, flags); 400 list_del_rcu(&np->node); 401 ndp->package_num--; 402 spin_unlock_irqrestore(&ndp->lock, flags); 403 404 kfree(np); 405 } 406 407 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp, 408 unsigned char id, 409 struct ncsi_package **np, 410 struct ncsi_channel **nc) 411 { 412 struct ncsi_package *p; 413 struct ncsi_channel *c; 414 415 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id)); 416 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL; 417 418 if (np) 419 *np = p; 420 if (nc) 421 *nc = c; 422 } 423 424 /* For two consecutive NCSI commands, the packet IDs shouldn't 425 * be same. Otherwise, the bogus response might be replied. So 426 * the available IDs are allocated in round-robin fashion. 427 */ 428 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, 429 unsigned int req_flags) 430 { 431 struct ncsi_request *nr = NULL; 432 int i, limit = ARRAY_SIZE(ndp->requests); 433 unsigned long flags; 434 435 /* Check if there is one available request until the ceiling */ 436 spin_lock_irqsave(&ndp->lock, flags); 437 for (i = ndp->request_id; i < limit; i++) { 438 if (ndp->requests[i].used) 439 continue; 440 441 nr = &ndp->requests[i]; 442 nr->used = true; 443 nr->flags = req_flags; 444 ndp->request_id = i + 1; 445 goto found; 446 } 447 448 /* Fail back to check from the starting cursor */ 449 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) { 450 if (ndp->requests[i].used) 451 continue; 452 453 nr = &ndp->requests[i]; 454 nr->used = true; 455 nr->flags = req_flags; 456 ndp->request_id = i + 1; 457 goto found; 458 } 459 460 found: 461 spin_unlock_irqrestore(&ndp->lock, flags); 462 return nr; 463 } 464 465 void ncsi_free_request(struct ncsi_request *nr) 466 { 467 struct ncsi_dev_priv *ndp = nr->ndp; 468 struct sk_buff *cmd, *rsp; 469 unsigned long flags; 470 bool driven; 471 472 if (nr->enabled) { 473 nr->enabled = false; 474 del_timer_sync(&nr->timer); 475 } 476 477 spin_lock_irqsave(&ndp->lock, flags); 478 cmd = nr->cmd; 479 rsp = nr->rsp; 480 nr->cmd = NULL; 481 nr->rsp = NULL; 482 nr->used = false; 483 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN); 484 spin_unlock_irqrestore(&ndp->lock, flags); 485 486 if (driven && cmd && --ndp->pending_req_num == 0) 487 schedule_work(&ndp->work); 488 489 /* Release command and response */ 490 consume_skb(cmd); 491 consume_skb(rsp); 492 } 493 494 struct ncsi_dev *ncsi_find_dev(struct net_device *dev) 495 { 496 struct ncsi_dev_priv *ndp; 497 498 NCSI_FOR_EACH_DEV(ndp) { 499 if (ndp->ndev.dev == dev) 500 return &ndp->ndev; 501 } 502 503 return NULL; 504 } 505 506 static void ncsi_request_timeout(unsigned long data) 507 { 508 struct ncsi_request *nr = (struct ncsi_request *)data; 509 struct ncsi_dev_priv *ndp = nr->ndp; 510 unsigned long flags; 511 512 /* If the request already had associated response, 513 * let the response handler to release it. 514 */ 515 spin_lock_irqsave(&ndp->lock, flags); 516 nr->enabled = false; 517 if (nr->rsp || !nr->cmd) { 518 spin_unlock_irqrestore(&ndp->lock, flags); 519 return; 520 } 521 spin_unlock_irqrestore(&ndp->lock, flags); 522 523 /* Release the request */ 524 ncsi_free_request(nr); 525 } 526 527 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) 528 { 529 struct ncsi_dev *nd = &ndp->ndev; 530 struct ncsi_package *np = ndp->active_package; 531 struct ncsi_channel *nc = ndp->active_channel; 532 struct ncsi_cmd_arg nca; 533 unsigned long flags; 534 int ret; 535 536 nca.ndp = ndp; 537 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 538 switch (nd->state) { 539 case ncsi_dev_state_suspend: 540 nd->state = ncsi_dev_state_suspend_select; 541 /* Fall through */ 542 case ncsi_dev_state_suspend_select: 543 case ncsi_dev_state_suspend_dcnt: 544 case ncsi_dev_state_suspend_dc: 545 case ncsi_dev_state_suspend_deselect: 546 ndp->pending_req_num = 1; 547 548 np = ndp->active_package; 549 nc = ndp->active_channel; 550 nca.package = np->id; 551 if (nd->state == ncsi_dev_state_suspend_select) { 552 nca.type = NCSI_PKT_CMD_SP; 553 nca.channel = NCSI_RESERVED_CHANNEL; 554 if (ndp->flags & NCSI_DEV_HWA) 555 nca.bytes[0] = 0; 556 else 557 nca.bytes[0] = 1; 558 nd->state = ncsi_dev_state_suspend_dcnt; 559 } else if (nd->state == ncsi_dev_state_suspend_dcnt) { 560 nca.type = NCSI_PKT_CMD_DCNT; 561 nca.channel = nc->id; 562 nd->state = ncsi_dev_state_suspend_dc; 563 } else if (nd->state == ncsi_dev_state_suspend_dc) { 564 nca.type = NCSI_PKT_CMD_DC; 565 nca.channel = nc->id; 566 nca.bytes[0] = 1; 567 nd->state = ncsi_dev_state_suspend_deselect; 568 } else if (nd->state == ncsi_dev_state_suspend_deselect) { 569 nca.type = NCSI_PKT_CMD_DP; 570 nca.channel = NCSI_RESERVED_CHANNEL; 571 nd->state = ncsi_dev_state_suspend_done; 572 } 573 574 ret = ncsi_xmit_cmd(&nca); 575 if (ret) { 576 nd->state = ncsi_dev_state_functional; 577 return; 578 } 579 580 break; 581 case ncsi_dev_state_suspend_done: 582 spin_lock_irqsave(&nc->lock, flags); 583 nc->state = NCSI_CHANNEL_INACTIVE; 584 spin_unlock_irqrestore(&nc->lock, flags); 585 ncsi_process_next_channel(ndp); 586 587 break; 588 default: 589 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", 590 nd->state); 591 } 592 } 593 594 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) 595 { 596 struct ncsi_dev *nd = &ndp->ndev; 597 struct net_device *dev = nd->dev; 598 struct ncsi_package *np = ndp->active_package; 599 struct ncsi_channel *nc = ndp->active_channel; 600 struct ncsi_cmd_arg nca; 601 unsigned char index; 602 unsigned long flags; 603 int ret; 604 605 nca.ndp = ndp; 606 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 607 switch (nd->state) { 608 case ncsi_dev_state_config: 609 case ncsi_dev_state_config_sp: 610 ndp->pending_req_num = 1; 611 612 /* Select the specific package */ 613 nca.type = NCSI_PKT_CMD_SP; 614 if (ndp->flags & NCSI_DEV_HWA) 615 nca.bytes[0] = 0; 616 else 617 nca.bytes[0] = 1; 618 nca.package = np->id; 619 nca.channel = NCSI_RESERVED_CHANNEL; 620 ret = ncsi_xmit_cmd(&nca); 621 if (ret) 622 goto error; 623 624 nd->state = ncsi_dev_state_config_cis; 625 break; 626 case ncsi_dev_state_config_cis: 627 ndp->pending_req_num = 1; 628 629 /* Clear initial state */ 630 nca.type = NCSI_PKT_CMD_CIS; 631 nca.package = np->id; 632 nca.channel = nc->id; 633 ret = ncsi_xmit_cmd(&nca); 634 if (ret) 635 goto error; 636 637 nd->state = ncsi_dev_state_config_sma; 638 break; 639 case ncsi_dev_state_config_sma: 640 case ncsi_dev_state_config_ebf: 641 #if IS_ENABLED(CONFIG_IPV6) 642 case ncsi_dev_state_config_egmf: 643 #endif 644 case ncsi_dev_state_config_ecnt: 645 case ncsi_dev_state_config_ec: 646 case ncsi_dev_state_config_ae: 647 case ncsi_dev_state_config_gls: 648 ndp->pending_req_num = 1; 649 650 nca.package = np->id; 651 nca.channel = nc->id; 652 653 /* Use first entry in unicast filter table. Note that 654 * the MAC filter table starts from entry 1 instead of 655 * 0. 656 */ 657 if (nd->state == ncsi_dev_state_config_sma) { 658 nca.type = NCSI_PKT_CMD_SMA; 659 for (index = 0; index < 6; index++) 660 nca.bytes[index] = dev->dev_addr[index]; 661 nca.bytes[6] = 0x1; 662 nca.bytes[7] = 0x1; 663 nd->state = ncsi_dev_state_config_ebf; 664 } else if (nd->state == ncsi_dev_state_config_ebf) { 665 nca.type = NCSI_PKT_CMD_EBF; 666 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap; 667 nd->state = ncsi_dev_state_config_ecnt; 668 #if IS_ENABLED(CONFIG_IPV6) 669 if (ndp->inet6_addr_num > 0 && 670 (nc->caps[NCSI_CAP_GENERIC].cap & 671 NCSI_CAP_GENERIC_MC)) 672 nd->state = ncsi_dev_state_config_egmf; 673 else 674 nd->state = ncsi_dev_state_config_ecnt; 675 } else if (nd->state == ncsi_dev_state_config_egmf) { 676 nca.type = NCSI_PKT_CMD_EGMF; 677 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 678 nd->state = ncsi_dev_state_config_ecnt; 679 #endif /* CONFIG_IPV6 */ 680 } else if (nd->state == ncsi_dev_state_config_ecnt) { 681 nca.type = NCSI_PKT_CMD_ECNT; 682 nd->state = ncsi_dev_state_config_ec; 683 } else if (nd->state == ncsi_dev_state_config_ec) { 684 /* Enable AEN if it's supported */ 685 nca.type = NCSI_PKT_CMD_EC; 686 nd->state = ncsi_dev_state_config_ae; 687 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK)) 688 nd->state = ncsi_dev_state_config_gls; 689 } else if (nd->state == ncsi_dev_state_config_ae) { 690 nca.type = NCSI_PKT_CMD_AE; 691 nca.bytes[0] = 0; 692 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap; 693 nd->state = ncsi_dev_state_config_gls; 694 } else if (nd->state == ncsi_dev_state_config_gls) { 695 nca.type = NCSI_PKT_CMD_GLS; 696 nd->state = ncsi_dev_state_config_done; 697 } 698 699 ret = ncsi_xmit_cmd(&nca); 700 if (ret) 701 goto error; 702 break; 703 case ncsi_dev_state_config_done: 704 spin_lock_irqsave(&nc->lock, flags); 705 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) 706 nc->state = NCSI_CHANNEL_ACTIVE; 707 else 708 nc->state = NCSI_CHANNEL_INACTIVE; 709 spin_unlock_irqrestore(&nc->lock, flags); 710 711 ncsi_start_channel_monitor(nc); 712 ncsi_process_next_channel(ndp); 713 break; 714 default: 715 netdev_warn(dev, "Wrong NCSI state 0x%x in config\n", 716 nd->state); 717 } 718 719 return; 720 721 error: 722 ncsi_report_link(ndp, true); 723 } 724 725 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) 726 { 727 struct ncsi_package *np; 728 struct ncsi_channel *nc, *found; 729 struct ncsi_channel_mode *ncm; 730 unsigned long flags; 731 732 /* The search is done once an inactive channel with up 733 * link is found. 734 */ 735 found = NULL; 736 NCSI_FOR_EACH_PACKAGE(ndp, np) { 737 NCSI_FOR_EACH_CHANNEL(np, nc) { 738 spin_lock_irqsave(&nc->lock, flags); 739 740 if (!list_empty(&nc->link) || 741 nc->state != NCSI_CHANNEL_INACTIVE) { 742 spin_unlock_irqrestore(&nc->lock, flags); 743 continue; 744 } 745 746 if (!found) 747 found = nc; 748 749 ncm = &nc->modes[NCSI_MODE_LINK]; 750 if (ncm->data[2] & 0x1) { 751 spin_unlock_irqrestore(&nc->lock, flags); 752 found = nc; 753 goto out; 754 } 755 756 spin_unlock_irqrestore(&nc->lock, flags); 757 } 758 } 759 760 if (!found) { 761 ncsi_report_link(ndp, true); 762 return -ENODEV; 763 } 764 765 out: 766 spin_lock_irqsave(&ndp->lock, flags); 767 list_add_tail_rcu(&found->link, &ndp->channel_queue); 768 spin_unlock_irqrestore(&ndp->lock, flags); 769 770 return ncsi_process_next_channel(ndp); 771 } 772 773 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp) 774 { 775 struct ncsi_package *np; 776 struct ncsi_channel *nc; 777 unsigned int cap; 778 779 /* The hardware arbitration is disabled if any one channel 780 * doesn't support explicitly. 781 */ 782 NCSI_FOR_EACH_PACKAGE(ndp, np) { 783 NCSI_FOR_EACH_CHANNEL(np, nc) { 784 cap = nc->caps[NCSI_CAP_GENERIC].cap; 785 if (!(cap & NCSI_CAP_GENERIC_HWA) || 786 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 787 NCSI_CAP_GENERIC_HWA_SUPPORT) { 788 ndp->flags &= ~NCSI_DEV_HWA; 789 return false; 790 } 791 } 792 } 793 794 ndp->flags |= NCSI_DEV_HWA; 795 return true; 796 } 797 798 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) 799 { 800 struct ncsi_package *np; 801 struct ncsi_channel *nc; 802 unsigned long flags; 803 804 /* Move all available channels to processing queue */ 805 spin_lock_irqsave(&ndp->lock, flags); 806 NCSI_FOR_EACH_PACKAGE(ndp, np) { 807 NCSI_FOR_EACH_CHANNEL(np, nc) { 808 WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE || 809 !list_empty(&nc->link)); 810 ncsi_stop_channel_monitor(nc); 811 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 812 } 813 } 814 spin_unlock_irqrestore(&ndp->lock, flags); 815 816 /* We can have no channels in extremely case */ 817 if (list_empty(&ndp->channel_queue)) { 818 ncsi_report_link(ndp, false); 819 return -ENOENT; 820 } 821 822 return ncsi_process_next_channel(ndp); 823 } 824 825 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) 826 { 827 struct ncsi_dev *nd = &ndp->ndev; 828 struct ncsi_package *np; 829 struct ncsi_channel *nc; 830 struct ncsi_cmd_arg nca; 831 unsigned char index; 832 int ret; 833 834 nca.ndp = ndp; 835 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN; 836 switch (nd->state) { 837 case ncsi_dev_state_probe: 838 nd->state = ncsi_dev_state_probe_deselect; 839 /* Fall through */ 840 case ncsi_dev_state_probe_deselect: 841 ndp->pending_req_num = 8; 842 843 /* Deselect all possible packages */ 844 nca.type = NCSI_PKT_CMD_DP; 845 nca.channel = NCSI_RESERVED_CHANNEL; 846 for (index = 0; index < 8; index++) { 847 nca.package = index; 848 ret = ncsi_xmit_cmd(&nca); 849 if (ret) 850 goto error; 851 } 852 853 nd->state = ncsi_dev_state_probe_package; 854 break; 855 case ncsi_dev_state_probe_package: 856 ndp->pending_req_num = 16; 857 858 /* Select all possible packages */ 859 nca.type = NCSI_PKT_CMD_SP; 860 nca.bytes[0] = 1; 861 nca.channel = NCSI_RESERVED_CHANNEL; 862 for (index = 0; index < 8; index++) { 863 nca.package = index; 864 ret = ncsi_xmit_cmd(&nca); 865 if (ret) 866 goto error; 867 } 868 869 /* Disable all possible packages */ 870 nca.type = NCSI_PKT_CMD_DP; 871 for (index = 0; index < 8; index++) { 872 nca.package = index; 873 ret = ncsi_xmit_cmd(&nca); 874 if (ret) 875 goto error; 876 } 877 878 nd->state = ncsi_dev_state_probe_channel; 879 break; 880 case ncsi_dev_state_probe_channel: 881 if (!ndp->active_package) 882 ndp->active_package = list_first_or_null_rcu( 883 &ndp->packages, struct ncsi_package, node); 884 else if (list_is_last(&ndp->active_package->node, 885 &ndp->packages)) 886 ndp->active_package = NULL; 887 else 888 ndp->active_package = list_next_entry( 889 ndp->active_package, node); 890 891 /* All available packages and channels are enumerated. The 892 * enumeration happens for once when the NCSI interface is 893 * started. So we need continue to start the interface after 894 * the enumeration. 895 * 896 * We have to choose an active channel before configuring it. 897 * Note that we possibly don't have active channel in extreme 898 * situation. 899 */ 900 if (!ndp->active_package) { 901 ndp->flags |= NCSI_DEV_PROBED; 902 if (ncsi_check_hwa(ndp)) 903 ncsi_enable_hwa(ndp); 904 else 905 ncsi_choose_active_channel(ndp); 906 return; 907 } 908 909 /* Select the active package */ 910 ndp->pending_req_num = 1; 911 nca.type = NCSI_PKT_CMD_SP; 912 nca.bytes[0] = 1; 913 nca.package = ndp->active_package->id; 914 nca.channel = NCSI_RESERVED_CHANNEL; 915 ret = ncsi_xmit_cmd(&nca); 916 if (ret) 917 goto error; 918 919 nd->state = ncsi_dev_state_probe_cis; 920 break; 921 case ncsi_dev_state_probe_cis: 922 ndp->pending_req_num = NCSI_RESERVED_CHANNEL; 923 924 /* Clear initial state */ 925 nca.type = NCSI_PKT_CMD_CIS; 926 nca.package = ndp->active_package->id; 927 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) { 928 nca.channel = index; 929 ret = ncsi_xmit_cmd(&nca); 930 if (ret) 931 goto error; 932 } 933 934 nd->state = ncsi_dev_state_probe_gvi; 935 break; 936 case ncsi_dev_state_probe_gvi: 937 case ncsi_dev_state_probe_gc: 938 case ncsi_dev_state_probe_gls: 939 np = ndp->active_package; 940 ndp->pending_req_num = np->channel_num; 941 942 /* Retrieve version, capability or link status */ 943 if (nd->state == ncsi_dev_state_probe_gvi) 944 nca.type = NCSI_PKT_CMD_GVI; 945 else if (nd->state == ncsi_dev_state_probe_gc) 946 nca.type = NCSI_PKT_CMD_GC; 947 else 948 nca.type = NCSI_PKT_CMD_GLS; 949 950 nca.package = np->id; 951 NCSI_FOR_EACH_CHANNEL(np, nc) { 952 nca.channel = nc->id; 953 ret = ncsi_xmit_cmd(&nca); 954 if (ret) 955 goto error; 956 } 957 958 if (nd->state == ncsi_dev_state_probe_gvi) 959 nd->state = ncsi_dev_state_probe_gc; 960 else if (nd->state == ncsi_dev_state_probe_gc) 961 nd->state = ncsi_dev_state_probe_gls; 962 else 963 nd->state = ncsi_dev_state_probe_dp; 964 break; 965 case ncsi_dev_state_probe_dp: 966 ndp->pending_req_num = 1; 967 968 /* Deselect the active package */ 969 nca.type = NCSI_PKT_CMD_DP; 970 nca.package = ndp->active_package->id; 971 nca.channel = NCSI_RESERVED_CHANNEL; 972 ret = ncsi_xmit_cmd(&nca); 973 if (ret) 974 goto error; 975 976 /* Scan channels in next package */ 977 nd->state = ncsi_dev_state_probe_channel; 978 break; 979 default: 980 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n", 981 nd->state); 982 } 983 984 return; 985 error: 986 ncsi_report_link(ndp, true); 987 } 988 989 static void ncsi_dev_work(struct work_struct *work) 990 { 991 struct ncsi_dev_priv *ndp = container_of(work, 992 struct ncsi_dev_priv, work); 993 struct ncsi_dev *nd = &ndp->ndev; 994 995 switch (nd->state & ncsi_dev_state_major) { 996 case ncsi_dev_state_probe: 997 ncsi_probe_channel(ndp); 998 break; 999 case ncsi_dev_state_suspend: 1000 ncsi_suspend_channel(ndp); 1001 break; 1002 case ncsi_dev_state_config: 1003 ncsi_configure_channel(ndp); 1004 break; 1005 default: 1006 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n", 1007 nd->state); 1008 } 1009 } 1010 1011 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp) 1012 { 1013 struct ncsi_channel *nc; 1014 int old_state; 1015 unsigned long flags; 1016 1017 spin_lock_irqsave(&ndp->lock, flags); 1018 nc = list_first_or_null_rcu(&ndp->channel_queue, 1019 struct ncsi_channel, link); 1020 if (!nc) { 1021 spin_unlock_irqrestore(&ndp->lock, flags); 1022 goto out; 1023 } 1024 1025 list_del_init(&nc->link); 1026 spin_unlock_irqrestore(&ndp->lock, flags); 1027 1028 spin_lock_irqsave(&nc->lock, flags); 1029 old_state = nc->state; 1030 nc->state = NCSI_CHANNEL_INVISIBLE; 1031 spin_unlock_irqrestore(&nc->lock, flags); 1032 1033 ndp->active_channel = nc; 1034 ndp->active_package = nc->package; 1035 1036 switch (old_state) { 1037 case NCSI_CHANNEL_INACTIVE: 1038 ndp->ndev.state = ncsi_dev_state_config; 1039 ncsi_configure_channel(ndp); 1040 break; 1041 case NCSI_CHANNEL_ACTIVE: 1042 ndp->ndev.state = ncsi_dev_state_suspend; 1043 ncsi_suspend_channel(ndp); 1044 break; 1045 default: 1046 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n", 1047 old_state, nc->package->id, nc->id); 1048 ncsi_report_link(ndp, false); 1049 return -EINVAL; 1050 } 1051 1052 return 0; 1053 1054 out: 1055 ndp->active_channel = NULL; 1056 ndp->active_package = NULL; 1057 if (ndp->flags & NCSI_DEV_RESHUFFLE) { 1058 ndp->flags &= ~NCSI_DEV_RESHUFFLE; 1059 return ncsi_choose_active_channel(ndp); 1060 } 1061 1062 ncsi_report_link(ndp, false); 1063 return -ENODEV; 1064 } 1065 1066 #if IS_ENABLED(CONFIG_IPV6) 1067 static int ncsi_inet6addr_event(struct notifier_block *this, 1068 unsigned long event, void *data) 1069 { 1070 struct inet6_ifaddr *ifa = data; 1071 struct net_device *dev = ifa->idev->dev; 1072 struct ncsi_dev *nd = ncsi_find_dev(dev); 1073 struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; 1074 struct ncsi_package *np; 1075 struct ncsi_channel *nc; 1076 struct ncsi_cmd_arg nca; 1077 bool action; 1078 int ret; 1079 1080 if (!ndp || (ipv6_addr_type(&ifa->addr) & 1081 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK))) 1082 return NOTIFY_OK; 1083 1084 switch (event) { 1085 case NETDEV_UP: 1086 action = (++ndp->inet6_addr_num) == 1; 1087 nca.type = NCSI_PKT_CMD_EGMF; 1088 break; 1089 case NETDEV_DOWN: 1090 action = (--ndp->inet6_addr_num == 0); 1091 nca.type = NCSI_PKT_CMD_DGMF; 1092 break; 1093 default: 1094 return NOTIFY_OK; 1095 } 1096 1097 /* We might not have active channel or packages. The IPv6 1098 * required multicast will be enabled when active channel 1099 * or packages are chosen. 1100 */ 1101 np = ndp->active_package; 1102 nc = ndp->active_channel; 1103 if (!action || !np || !nc) 1104 return NOTIFY_OK; 1105 1106 /* We needn't enable or disable it if the function isn't supported */ 1107 if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC)) 1108 return NOTIFY_OK; 1109 1110 nca.ndp = ndp; 1111 nca.req_flags = 0; 1112 nca.package = np->id; 1113 nca.channel = nc->id; 1114 nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap; 1115 ret = ncsi_xmit_cmd(&nca); 1116 if (ret) { 1117 netdev_warn(dev, "Fail to %s global multicast filter (%d)\n", 1118 (event == NETDEV_UP) ? "enable" : "disable", ret); 1119 return NOTIFY_DONE; 1120 } 1121 1122 return NOTIFY_OK; 1123 } 1124 1125 static struct notifier_block ncsi_inet6addr_notifier = { 1126 .notifier_call = ncsi_inet6addr_event, 1127 }; 1128 #endif /* CONFIG_IPV6 */ 1129 1130 struct ncsi_dev *ncsi_register_dev(struct net_device *dev, 1131 void (*handler)(struct ncsi_dev *ndev)) 1132 { 1133 struct ncsi_dev_priv *ndp; 1134 struct ncsi_dev *nd; 1135 unsigned long flags; 1136 int i; 1137 1138 /* Check if the device has been registered or not */ 1139 nd = ncsi_find_dev(dev); 1140 if (nd) 1141 return nd; 1142 1143 /* Create NCSI device */ 1144 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC); 1145 if (!ndp) 1146 return NULL; 1147 1148 nd = &ndp->ndev; 1149 nd->state = ncsi_dev_state_registered; 1150 nd->dev = dev; 1151 nd->handler = handler; 1152 ndp->pending_req_num = 0; 1153 INIT_LIST_HEAD(&ndp->channel_queue); 1154 INIT_WORK(&ndp->work, ncsi_dev_work); 1155 1156 /* Initialize private NCSI device */ 1157 spin_lock_init(&ndp->lock); 1158 INIT_LIST_HEAD(&ndp->packages); 1159 ndp->request_id = NCSI_REQ_START_IDX; 1160 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) { 1161 ndp->requests[i].id = i; 1162 ndp->requests[i].ndp = ndp; 1163 setup_timer(&ndp->requests[i].timer, 1164 ncsi_request_timeout, 1165 (unsigned long)&ndp->requests[i]); 1166 } 1167 1168 spin_lock_irqsave(&ncsi_dev_lock, flags); 1169 #if IS_ENABLED(CONFIG_IPV6) 1170 ndp->inet6_addr_num = 0; 1171 if (list_empty(&ncsi_dev_list)) 1172 register_inet6addr_notifier(&ncsi_inet6addr_notifier); 1173 #endif 1174 list_add_tail_rcu(&ndp->node, &ncsi_dev_list); 1175 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1176 1177 /* Register NCSI packet Rx handler */ 1178 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI); 1179 ndp->ptype.func = ncsi_rcv_rsp; 1180 ndp->ptype.dev = dev; 1181 dev_add_pack(&ndp->ptype); 1182 1183 return nd; 1184 } 1185 EXPORT_SYMBOL_GPL(ncsi_register_dev); 1186 1187 int ncsi_start_dev(struct ncsi_dev *nd) 1188 { 1189 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1190 int ret; 1191 1192 if (nd->state != ncsi_dev_state_registered && 1193 nd->state != ncsi_dev_state_functional) 1194 return -ENOTTY; 1195 1196 if (!(ndp->flags & NCSI_DEV_PROBED)) { 1197 nd->state = ncsi_dev_state_probe; 1198 schedule_work(&ndp->work); 1199 return 0; 1200 } 1201 1202 if (ndp->flags & NCSI_DEV_HWA) 1203 ret = ncsi_enable_hwa(ndp); 1204 else 1205 ret = ncsi_choose_active_channel(ndp); 1206 1207 return ret; 1208 } 1209 EXPORT_SYMBOL_GPL(ncsi_start_dev); 1210 1211 void ncsi_stop_dev(struct ncsi_dev *nd) 1212 { 1213 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1214 struct ncsi_package *np; 1215 struct ncsi_channel *nc; 1216 bool chained; 1217 int old_state; 1218 unsigned long flags; 1219 1220 /* Stop the channel monitor and reset channel's state */ 1221 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1222 NCSI_FOR_EACH_CHANNEL(np, nc) { 1223 ncsi_stop_channel_monitor(nc); 1224 1225 spin_lock_irqsave(&nc->lock, flags); 1226 chained = !list_empty(&nc->link); 1227 old_state = nc->state; 1228 nc->state = NCSI_CHANNEL_INACTIVE; 1229 spin_unlock_irqrestore(&nc->lock, flags); 1230 1231 WARN_ON_ONCE(chained || 1232 old_state == NCSI_CHANNEL_INVISIBLE); 1233 } 1234 } 1235 1236 ncsi_report_link(ndp, true); 1237 } 1238 EXPORT_SYMBOL_GPL(ncsi_stop_dev); 1239 1240 void ncsi_unregister_dev(struct ncsi_dev *nd) 1241 { 1242 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd); 1243 struct ncsi_package *np, *tmp; 1244 unsigned long flags; 1245 1246 dev_remove_pack(&ndp->ptype); 1247 1248 list_for_each_entry_safe(np, tmp, &ndp->packages, node) 1249 ncsi_remove_package(np); 1250 1251 spin_lock_irqsave(&ncsi_dev_lock, flags); 1252 list_del_rcu(&ndp->node); 1253 #if IS_ENABLED(CONFIG_IPV6) 1254 if (list_empty(&ncsi_dev_list)) 1255 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier); 1256 #endif 1257 spin_unlock_irqrestore(&ncsi_dev_lock, flags); 1258 1259 kfree(ndp); 1260 } 1261 EXPORT_SYMBOL_GPL(ncsi_unregister_dev); 1262