1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/inetdevice.h> 6 #include <net/netevent.h> 7 #include <linux/idr.h> 8 #include <net/dst_metadata.h> 9 #include <net/arp.h> 10 11 #include "cmsg.h" 12 #include "main.h" 13 #include "../nfp_net_repr.h" 14 #include "../nfp_net.h" 15 16 #define NFP_FL_MAX_ROUTES 32 17 18 /** 19 * struct nfp_tun_active_tuns - periodic message of active tunnels 20 * @seq: sequence number of the message 21 * @count: number of tunnels report in message 22 * @flags: options part of the request 23 * @tun_info.ipv4: dest IPv4 address of active route 24 * @tun_info.egress_port: port the encapsulated packet egressed 25 * @tun_info.extra: reserved for future use 26 * @tun_info: tunnels that have sent traffic in reported period 27 */ 28 struct nfp_tun_active_tuns { 29 __be32 seq; 30 __be32 count; 31 __be32 flags; 32 struct route_ip_info { 33 __be32 ipv4; 34 __be32 egress_port; 35 __be32 extra[2]; 36 } tun_info[]; 37 }; 38 39 /** 40 * struct nfp_tun_neigh - neighbour/route entry on the NFP 41 * @dst_ipv4: destination IPv4 address 42 * @src_ipv4: source IPv4 address 43 * @dst_addr: destination MAC address 44 * @src_addr: source MAC address 45 * @port_id: NFP port to output packet on - associated with source IPv4 46 */ 47 struct nfp_tun_neigh { 48 __be32 dst_ipv4; 49 __be32 src_ipv4; 50 u8 dst_addr[ETH_ALEN]; 51 u8 src_addr[ETH_ALEN]; 52 __be32 port_id; 53 }; 54 55 /** 56 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup 57 * @ingress_port: ingress port of packet that signalled request 58 * @ipv4_addr: destination ipv4 address for route 59 * @reserved: reserved for future use 60 */ 61 struct nfp_tun_req_route_ipv4 { 62 __be32 ingress_port; 63 __be32 ipv4_addr; 64 __be32 reserved[2]; 65 }; 66 67 /** 68 * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP 69 * @ipv4_addr: destination of route 70 * @list: list pointer 71 */ 72 struct nfp_ipv4_route_entry { 73 __be32 ipv4_addr; 74 struct list_head list; 75 }; 76 77 #define NFP_FL_IPV4_ADDRS_MAX 32 78 79 /** 80 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP 81 * @count: number of IPs populated in the array 82 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses 83 */ 84 struct nfp_tun_ipv4_addr { 85 __be32 count; 86 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX]; 87 }; 88 89 /** 90 * struct nfp_ipv4_addr_entry - cached IPv4 addresses 91 * @ipv4_addr: IP address 92 * @ref_count: number of rules currently using this IP 93 * @list: list pointer 94 */ 95 struct nfp_ipv4_addr_entry { 96 __be32 ipv4_addr; 97 int ref_count; 98 struct list_head list; 99 }; 100 101 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 102 103 /** 104 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP 105 * @flags: MAC address offload options 106 * @count: number of MAC addresses in the message (should be 1) 107 * @index: index of MAC address in the lookup table 108 * @addr: interface MAC address 109 */ 110 struct nfp_tun_mac_addr_offload { 111 __be16 flags; 112 __be16 count; 113 __be16 index; 114 u8 addr[ETH_ALEN]; 115 }; 116 117 enum nfp_flower_mac_offload_cmd { 118 NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, 119 NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, 120 NFP_TUNNEL_MAC_OFFLOAD_MOD = 2, 121 }; 122 123 #define NFP_MAX_MAC_INDEX 0xff 124 125 /** 126 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC 127 * @ht_node: Hashtable entry 128 * @addr: Offloaded MAC address 129 * @index: Offloaded index for given MAC address 130 * @ref_count: Number of devs using this MAC address 131 * @repr_list: List of reprs sharing this MAC address 132 */ 133 struct nfp_tun_offloaded_mac { 134 struct rhash_head ht_node; 135 u8 addr[ETH_ALEN]; 136 u16 index; 137 int ref_count; 138 struct list_head repr_list; 139 }; 140 141 static const struct rhashtable_params offloaded_macs_params = { 142 .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr), 143 .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node), 144 .key_len = ETH_ALEN, 145 .automatic_shrinking = true, 146 }; 147 148 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) 149 { 150 struct nfp_tun_active_tuns *payload; 151 struct net_device *netdev; 152 int count, i, pay_len; 153 struct neighbour *n; 154 __be32 ipv4_addr; 155 u32 port; 156 157 payload = nfp_flower_cmsg_get_data(skb); 158 count = be32_to_cpu(payload->count); 159 if (count > NFP_FL_MAX_ROUTES) { 160 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n"); 161 return; 162 } 163 164 pay_len = nfp_flower_cmsg_get_data_len(skb); 165 if (pay_len != struct_size(payload, tun_info, count)) { 166 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); 167 return; 168 } 169 170 rcu_read_lock(); 171 for (i = 0; i < count; i++) { 172 ipv4_addr = payload->tun_info[i].ipv4; 173 port = be32_to_cpu(payload->tun_info[i].egress_port); 174 netdev = nfp_app_dev_get(app, port, NULL); 175 if (!netdev) 176 continue; 177 178 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev); 179 if (!n) 180 continue; 181 182 /* Update the used timestamp of neighbour */ 183 neigh_event_send(n, NULL); 184 neigh_release(n); 185 } 186 rcu_read_unlock(); 187 } 188 189 static int 190 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, 191 gfp_t flag) 192 { 193 struct sk_buff *skb; 194 unsigned char *msg; 195 196 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); 197 if (!skb) 198 return -ENOMEM; 199 200 msg = nfp_flower_cmsg_get_data(skb); 201 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb)); 202 203 nfp_ctrl_tx(app->ctrl, skb); 204 return 0; 205 } 206 207 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) 208 { 209 struct nfp_flower_priv *priv = app->priv; 210 struct nfp_ipv4_route_entry *entry; 211 struct list_head *ptr, *storage; 212 213 spin_lock_bh(&priv->tun.neigh_off_lock); 214 list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { 215 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); 216 if (entry->ipv4_addr == ipv4_addr) { 217 spin_unlock_bh(&priv->tun.neigh_off_lock); 218 return true; 219 } 220 } 221 spin_unlock_bh(&priv->tun.neigh_off_lock); 222 return false; 223 } 224 225 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) 226 { 227 struct nfp_flower_priv *priv = app->priv; 228 struct nfp_ipv4_route_entry *entry; 229 struct list_head *ptr, *storage; 230 231 spin_lock_bh(&priv->tun.neigh_off_lock); 232 list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { 233 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); 234 if (entry->ipv4_addr == ipv4_addr) { 235 spin_unlock_bh(&priv->tun.neigh_off_lock); 236 return; 237 } 238 } 239 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 240 if (!entry) { 241 spin_unlock_bh(&priv->tun.neigh_off_lock); 242 nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); 243 return; 244 } 245 246 entry->ipv4_addr = ipv4_addr; 247 list_add_tail(&entry->list, &priv->tun.neigh_off_list); 248 spin_unlock_bh(&priv->tun.neigh_off_lock); 249 } 250 251 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) 252 { 253 struct nfp_flower_priv *priv = app->priv; 254 struct nfp_ipv4_route_entry *entry; 255 struct list_head *ptr, *storage; 256 257 spin_lock_bh(&priv->tun.neigh_off_lock); 258 list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { 259 entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); 260 if (entry->ipv4_addr == ipv4_addr) { 261 list_del(&entry->list); 262 kfree(entry); 263 break; 264 } 265 } 266 spin_unlock_bh(&priv->tun.neigh_off_lock); 267 } 268 269 static void 270 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, 271 struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) 272 { 273 struct nfp_tun_neigh payload; 274 u32 port_id; 275 276 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 277 if (!port_id) 278 return; 279 280 memset(&payload, 0, sizeof(struct nfp_tun_neigh)); 281 payload.dst_ipv4 = flow->daddr; 282 283 /* If entry has expired send dst IP with all other fields 0. */ 284 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { 285 nfp_tun_del_route_from_cache(app, payload.dst_ipv4); 286 /* Trigger ARP to verify invalid neighbour state. */ 287 neigh_event_send(neigh, NULL); 288 goto send_msg; 289 } 290 291 /* Have a valid neighbour so populate rest of entry. */ 292 payload.src_ipv4 = flow->saddr; 293 ether_addr_copy(payload.src_addr, netdev->dev_addr); 294 neigh_ha_snapshot(payload.dst_addr, neigh, netdev); 295 payload.port_id = cpu_to_be32(port_id); 296 /* Add destination of new route to NFP cache. */ 297 nfp_tun_add_route_to_cache(app, payload.dst_ipv4); 298 299 send_msg: 300 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, 301 sizeof(struct nfp_tun_neigh), 302 (unsigned char *)&payload, flag); 303 } 304 305 static int 306 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, 307 void *ptr) 308 { 309 struct nfp_flower_priv *app_priv; 310 struct netevent_redirect *redir; 311 struct flowi4 flow = {}; 312 struct neighbour *n; 313 struct nfp_app *app; 314 struct rtable *rt; 315 int err; 316 317 switch (event) { 318 case NETEVENT_REDIRECT: 319 redir = (struct netevent_redirect *)ptr; 320 n = redir->neigh; 321 break; 322 case NETEVENT_NEIGH_UPDATE: 323 n = (struct neighbour *)ptr; 324 break; 325 default: 326 return NOTIFY_DONE; 327 } 328 329 flow.daddr = *(__be32 *)n->primary_key; 330 331 /* Only concerned with route changes for representors. */ 332 if (!nfp_netdev_is_nfp_repr(n->dev)) 333 return NOTIFY_DONE; 334 335 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 336 app = app_priv->app; 337 338 /* Only concerned with changes to routes already added to NFP. */ 339 if (!nfp_tun_has_route(app, flow.daddr)) 340 return NOTIFY_DONE; 341 342 #if IS_ENABLED(CONFIG_INET) 343 /* Do a route lookup to populate flow data. */ 344 rt = ip_route_output_key(dev_net(n->dev), &flow); 345 err = PTR_ERR_OR_ZERO(rt); 346 if (err) 347 return NOTIFY_DONE; 348 349 ip_rt_put(rt); 350 #else 351 return NOTIFY_DONE; 352 #endif 353 354 flow.flowi4_proto = IPPROTO_UDP; 355 nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); 356 357 return NOTIFY_OK; 358 } 359 360 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) 361 { 362 struct nfp_tun_req_route_ipv4 *payload; 363 struct net_device *netdev; 364 struct flowi4 flow = {}; 365 struct neighbour *n; 366 struct rtable *rt; 367 int err; 368 369 payload = nfp_flower_cmsg_get_data(skb); 370 371 rcu_read_lock(); 372 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 373 if (!netdev) 374 goto fail_rcu_unlock; 375 376 flow.daddr = payload->ipv4_addr; 377 flow.flowi4_proto = IPPROTO_UDP; 378 379 #if IS_ENABLED(CONFIG_INET) 380 /* Do a route lookup on same namespace as ingress port. */ 381 rt = ip_route_output_key(dev_net(netdev), &flow); 382 err = PTR_ERR_OR_ZERO(rt); 383 if (err) 384 goto fail_rcu_unlock; 385 #else 386 goto fail_rcu_unlock; 387 #endif 388 389 /* Get the neighbour entry for the lookup */ 390 n = dst_neigh_lookup(&rt->dst, &flow.daddr); 391 ip_rt_put(rt); 392 if (!n) 393 goto fail_rcu_unlock; 394 nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); 395 neigh_release(n); 396 rcu_read_unlock(); 397 return; 398 399 fail_rcu_unlock: 400 rcu_read_unlock(); 401 nfp_flower_cmsg_warn(app, "Requested route not found.\n"); 402 } 403 404 static void nfp_tun_write_ipv4_list(struct nfp_app *app) 405 { 406 struct nfp_flower_priv *priv = app->priv; 407 struct nfp_ipv4_addr_entry *entry; 408 struct nfp_tun_ipv4_addr payload; 409 struct list_head *ptr, *storage; 410 int count; 411 412 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr)); 413 mutex_lock(&priv->tun.ipv4_off_lock); 414 count = 0; 415 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 416 if (count >= NFP_FL_IPV4_ADDRS_MAX) { 417 mutex_unlock(&priv->tun.ipv4_off_lock); 418 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n"); 419 return; 420 } 421 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 422 payload.ipv4_addr[count++] = entry->ipv4_addr; 423 } 424 payload.count = cpu_to_be32(count); 425 mutex_unlock(&priv->tun.ipv4_off_lock); 426 427 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, 428 sizeof(struct nfp_tun_ipv4_addr), 429 &payload, GFP_KERNEL); 430 } 431 432 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) 433 { 434 struct nfp_flower_priv *priv = app->priv; 435 struct nfp_ipv4_addr_entry *entry; 436 struct list_head *ptr, *storage; 437 438 mutex_lock(&priv->tun.ipv4_off_lock); 439 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 440 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 441 if (entry->ipv4_addr == ipv4) { 442 entry->ref_count++; 443 mutex_unlock(&priv->tun.ipv4_off_lock); 444 return; 445 } 446 } 447 448 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 449 if (!entry) { 450 mutex_unlock(&priv->tun.ipv4_off_lock); 451 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); 452 return; 453 } 454 entry->ipv4_addr = ipv4; 455 entry->ref_count = 1; 456 list_add_tail(&entry->list, &priv->tun.ipv4_off_list); 457 mutex_unlock(&priv->tun.ipv4_off_lock); 458 459 nfp_tun_write_ipv4_list(app); 460 } 461 462 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) 463 { 464 struct nfp_flower_priv *priv = app->priv; 465 struct nfp_ipv4_addr_entry *entry; 466 struct list_head *ptr, *storage; 467 468 mutex_lock(&priv->tun.ipv4_off_lock); 469 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 470 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 471 if (entry->ipv4_addr == ipv4) { 472 entry->ref_count--; 473 if (!entry->ref_count) { 474 list_del(&entry->list); 475 kfree(entry); 476 } 477 break; 478 } 479 } 480 mutex_unlock(&priv->tun.ipv4_off_lock); 481 482 nfp_tun_write_ipv4_list(app); 483 } 484 485 static int 486 __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) 487 { 488 struct nfp_tun_mac_addr_offload payload; 489 490 memset(&payload, 0, sizeof(payload)); 491 492 if (del) 493 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG); 494 495 /* FW supports multiple MACs per cmsg but restrict to single. */ 496 payload.count = cpu_to_be16(1); 497 payload.index = cpu_to_be16(idx); 498 ether_addr_copy(payload.addr, mac); 499 500 return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, 501 sizeof(struct nfp_tun_mac_addr_offload), 502 &payload, GFP_KERNEL); 503 } 504 505 static bool nfp_tunnel_port_is_phy_repr(int port) 506 { 507 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == 508 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) 509 return true; 510 511 return false; 512 } 513 514 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port) 515 { 516 return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; 517 } 518 519 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id) 520 { 521 return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 522 } 523 524 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx) 525 { 526 return nfp_mac_idx >> 8; 527 } 528 529 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) 530 { 531 return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 532 } 533 534 static struct nfp_tun_offloaded_mac * 535 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac) 536 { 537 struct nfp_flower_priv *priv = app->priv; 538 539 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, 540 offloaded_macs_params); 541 } 542 543 static void 544 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, 545 struct net_device *netdev, bool mod) 546 { 547 if (nfp_netdev_is_nfp_repr(netdev)) { 548 struct nfp_flower_repr_priv *repr_priv; 549 struct nfp_repr *repr; 550 551 repr = netdev_priv(netdev); 552 repr_priv = repr->app_priv; 553 554 /* If modifing MAC, remove repr from old list first. */ 555 if (mod) 556 list_del(&repr_priv->mac_list); 557 558 list_add_tail(&repr_priv->mac_list, &entry->repr_list); 559 } 560 561 entry->ref_count++; 562 } 563 564 static int 565 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, 566 int port, bool mod) 567 { 568 struct nfp_flower_priv *priv = app->priv; 569 int ida_idx = NFP_MAX_MAC_INDEX, err; 570 struct nfp_tun_offloaded_mac *entry; 571 u16 nfp_mac_idx = 0; 572 573 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); 574 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { 575 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); 576 return 0; 577 } 578 579 /* Assign a global index if non-repr or MAC address is now shared. */ 580 if (entry || !port) { 581 ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, 582 NFP_MAX_MAC_INDEX, GFP_KERNEL); 583 if (ida_idx < 0) 584 return ida_idx; 585 586 nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); 587 } else { 588 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); 589 } 590 591 if (!entry) { 592 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 593 if (!entry) { 594 err = -ENOMEM; 595 goto err_free_ida; 596 } 597 598 ether_addr_copy(entry->addr, netdev->dev_addr); 599 INIT_LIST_HEAD(&entry->repr_list); 600 601 if (rhashtable_insert_fast(&priv->tun.offloaded_macs, 602 &entry->ht_node, 603 offloaded_macs_params)) { 604 err = -ENOMEM; 605 goto err_free_entry; 606 } 607 } 608 609 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, 610 nfp_mac_idx, false); 611 if (err) { 612 /* If not shared then free. */ 613 if (!entry->ref_count) 614 goto err_remove_hash; 615 goto err_free_ida; 616 } 617 618 entry->index = nfp_mac_idx; 619 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); 620 621 return 0; 622 623 err_remove_hash: 624 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, 625 offloaded_macs_params); 626 err_free_entry: 627 kfree(entry); 628 err_free_ida: 629 if (ida_idx != NFP_MAX_MAC_INDEX) 630 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); 631 632 return err; 633 } 634 635 static int 636 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, 637 u8 *mac, bool mod) 638 { 639 struct nfp_flower_priv *priv = app->priv; 640 struct nfp_flower_repr_priv *repr_priv; 641 struct nfp_tun_offloaded_mac *entry; 642 struct nfp_repr *repr; 643 int ida_idx; 644 645 entry = nfp_tunnel_lookup_offloaded_macs(app, mac); 646 if (!entry) 647 return 0; 648 649 entry->ref_count--; 650 /* If del is part of a mod then mac_list is still in use elsewheree. */ 651 if (nfp_netdev_is_nfp_repr(netdev) && !mod) { 652 repr = netdev_priv(netdev); 653 repr_priv = repr->app_priv; 654 list_del(&repr_priv->mac_list); 655 } 656 657 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ 658 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { 659 u16 nfp_mac_idx; 660 int port, err; 661 662 repr_priv = list_first_entry(&entry->repr_list, 663 struct nfp_flower_repr_priv, 664 mac_list); 665 repr = repr_priv->nfp_repr; 666 port = nfp_repr_get_port_id(repr->netdev); 667 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); 668 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); 669 if (err) { 670 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 671 netdev_name(netdev)); 672 return 0; 673 } 674 675 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 676 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); 677 entry->index = nfp_mac_idx; 678 return 0; 679 } 680 681 if (entry->ref_count) 682 return 0; 683 684 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, 685 &entry->ht_node, 686 offloaded_macs_params)); 687 /* If MAC has global ID then extract and free the ida entry. */ 688 if (nfp_tunnel_is_mac_idx_global(entry->index)) { 689 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 690 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); 691 } 692 693 kfree(entry); 694 695 return __nfp_tunnel_offload_mac(app, mac, 0, true); 696 } 697 698 static int 699 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, 700 enum nfp_flower_mac_offload_cmd cmd) 701 { 702 struct nfp_flower_non_repr_priv *nr_priv = NULL; 703 bool non_repr = false, *mac_offloaded; 704 u8 *off_mac = NULL; 705 int err, port = 0; 706 707 if (nfp_netdev_is_nfp_repr(netdev)) { 708 struct nfp_flower_repr_priv *repr_priv; 709 struct nfp_repr *repr; 710 711 repr = netdev_priv(netdev); 712 if (repr->app != app) 713 return 0; 714 715 repr_priv = repr->app_priv; 716 mac_offloaded = &repr_priv->mac_offloaded; 717 off_mac = &repr_priv->offloaded_mac_addr[0]; 718 port = nfp_repr_get_port_id(netdev); 719 if (!nfp_tunnel_port_is_phy_repr(port)) 720 return 0; 721 } else if (nfp_fl_is_netdev_to_offload(netdev)) { 722 nr_priv = nfp_flower_non_repr_priv_get(app, netdev); 723 if (!nr_priv) 724 return -ENOMEM; 725 726 mac_offloaded = &nr_priv->mac_offloaded; 727 off_mac = &nr_priv->offloaded_mac_addr[0]; 728 non_repr = true; 729 } else { 730 return 0; 731 } 732 733 if (!is_valid_ether_addr(netdev->dev_addr)) { 734 err = -EINVAL; 735 goto err_put_non_repr_priv; 736 } 737 738 if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded) 739 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD; 740 741 switch (cmd) { 742 case NFP_TUNNEL_MAC_OFFLOAD_ADD: 743 err = nfp_tunnel_add_shared_mac(app, netdev, port, false); 744 if (err) 745 goto err_put_non_repr_priv; 746 747 if (non_repr) 748 __nfp_flower_non_repr_priv_get(nr_priv); 749 750 *mac_offloaded = true; 751 ether_addr_copy(off_mac, netdev->dev_addr); 752 break; 753 case NFP_TUNNEL_MAC_OFFLOAD_DEL: 754 /* Only attempt delete if add was successful. */ 755 if (!*mac_offloaded) 756 break; 757 758 if (non_repr) 759 __nfp_flower_non_repr_priv_put(nr_priv); 760 761 *mac_offloaded = false; 762 763 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, 764 false); 765 if (err) 766 goto err_put_non_repr_priv; 767 768 break; 769 case NFP_TUNNEL_MAC_OFFLOAD_MOD: 770 /* Ignore if changing to the same address. */ 771 if (ether_addr_equal(netdev->dev_addr, off_mac)) 772 break; 773 774 err = nfp_tunnel_add_shared_mac(app, netdev, port, true); 775 if (err) 776 goto err_put_non_repr_priv; 777 778 /* Delete the previous MAC address. */ 779 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true); 780 if (err) 781 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", 782 netdev_name(netdev)); 783 784 ether_addr_copy(off_mac, netdev->dev_addr); 785 break; 786 default: 787 err = -EINVAL; 788 goto err_put_non_repr_priv; 789 } 790 791 if (non_repr) 792 __nfp_flower_non_repr_priv_put(nr_priv); 793 794 return 0; 795 796 err_put_non_repr_priv: 797 if (non_repr) 798 __nfp_flower_non_repr_priv_put(nr_priv); 799 800 return err; 801 } 802 803 int nfp_tunnel_mac_event_handler(struct nfp_app *app, 804 struct net_device *netdev, 805 unsigned long event, void *ptr) 806 { 807 int err; 808 809 if (event == NETDEV_DOWN) { 810 err = nfp_tunnel_offload_mac(app, netdev, 811 NFP_TUNNEL_MAC_OFFLOAD_DEL); 812 if (err) 813 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n", 814 netdev_name(netdev)); 815 } else if (event == NETDEV_UP) { 816 err = nfp_tunnel_offload_mac(app, netdev, 817 NFP_TUNNEL_MAC_OFFLOAD_ADD); 818 if (err) 819 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 820 netdev_name(netdev)); 821 } else if (event == NETDEV_CHANGEADDR) { 822 /* Only offload addr change if netdev is already up. */ 823 if (!(netdev->flags & IFF_UP)) 824 return NOTIFY_OK; 825 826 err = nfp_tunnel_offload_mac(app, netdev, 827 NFP_TUNNEL_MAC_OFFLOAD_MOD); 828 if (err) 829 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", 830 netdev_name(netdev)); 831 } 832 return NOTIFY_OK; 833 } 834 835 int nfp_tunnel_config_start(struct nfp_app *app) 836 { 837 struct nfp_flower_priv *priv = app->priv; 838 int err; 839 840 /* Initialise rhash for MAC offload tracking. */ 841 err = rhashtable_init(&priv->tun.offloaded_macs, 842 &offloaded_macs_params); 843 if (err) 844 return err; 845 846 ida_init(&priv->tun.mac_off_ids); 847 848 /* Initialise priv data for IPv4 offloading. */ 849 mutex_init(&priv->tun.ipv4_off_lock); 850 INIT_LIST_HEAD(&priv->tun.ipv4_off_list); 851 852 /* Initialise priv data for neighbour offloading. */ 853 spin_lock_init(&priv->tun.neigh_off_lock); 854 INIT_LIST_HEAD(&priv->tun.neigh_off_list); 855 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; 856 857 err = register_netevent_notifier(&priv->tun.neigh_nb); 858 if (err) { 859 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 860 nfp_check_rhashtable_empty, NULL); 861 return err; 862 } 863 864 return 0; 865 } 866 867 void nfp_tunnel_config_stop(struct nfp_app *app) 868 { 869 struct nfp_flower_priv *priv = app->priv; 870 struct nfp_ipv4_route_entry *route_entry; 871 struct nfp_ipv4_addr_entry *ip_entry; 872 struct list_head *ptr, *storage; 873 874 unregister_netevent_notifier(&priv->tun.neigh_nb); 875 876 ida_destroy(&priv->tun.mac_off_ids); 877 878 /* Free any memory that may be occupied by ipv4 list. */ 879 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 880 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 881 list_del(&ip_entry->list); 882 kfree(ip_entry); 883 } 884 885 /* Free any memory that may be occupied by the route list. */ 886 list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { 887 route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, 888 list); 889 list_del(&route_entry->list); 890 kfree(route_entry); 891 } 892 893 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ 894 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 895 nfp_check_rhashtable_empty, NULL); 896 } 897