1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/inetdevice.h> 6 #include <net/netevent.h> 7 #include <linux/idr.h> 8 #include <net/dst_metadata.h> 9 #include <net/arp.h> 10 11 #include "cmsg.h" 12 #include "main.h" 13 #include "../nfp_net_repr.h" 14 #include "../nfp_net.h" 15 16 #define NFP_FL_MAX_ROUTES 32 17 18 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 19 #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) 20 #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) 21 #define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) 22 23 /** 24 * struct nfp_tun_pre_tun_rule - rule matched before decap 25 * @flags: options for the rule offset 26 * @port_idx: index of destination MAC address for the rule 27 * @vlan_tci: VLAN info associated with MAC 28 * @host_ctx_id: stats context of rule to update 29 */ 30 struct nfp_tun_pre_tun_rule { 31 __be32 flags; 32 __be16 port_idx; 33 __be16 vlan_tci; 34 __be32 host_ctx_id; 35 }; 36 37 /** 38 * struct nfp_tun_active_tuns - periodic message of active tunnels 39 * @seq: sequence number of the message 40 * @count: number of tunnels report in message 41 * @flags: options part of the request 42 * @tun_info.ipv4: dest IPv4 address of active route 43 * @tun_info.egress_port: port the encapsulated packet egressed 44 * @tun_info.extra: reserved for future use 45 * @tun_info: tunnels that have sent traffic in reported period 46 */ 47 struct nfp_tun_active_tuns { 48 __be32 seq; 49 __be32 count; 50 __be32 flags; 51 struct route_ip_info { 52 __be32 ipv4; 53 __be32 egress_port; 54 __be32 extra[2]; 55 } tun_info[]; 56 }; 57 58 /** 59 * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels 60 * @seq: sequence number of the message 61 * @count: number of tunnels report in message 62 * @flags: options part of the request 63 * @tun_info.ipv6: dest IPv6 address of active route 64 * @tun_info.egress_port: port the encapsulated packet egressed 65 * @tun_info.extra: reserved for future use 66 * @tun_info: tunnels that have sent traffic in reported period 67 */ 68 struct nfp_tun_active_tuns_v6 { 69 __be32 seq; 70 __be32 count; 71 __be32 flags; 72 struct route_ip_info_v6 { 73 struct in6_addr ipv6; 74 __be32 egress_port; 75 __be32 extra[2]; 76 } tun_info[]; 77 }; 78 79 /** 80 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup 81 * @ingress_port: ingress port of packet that signalled request 82 * @ipv4_addr: destination ipv4 address for route 83 * @reserved: reserved for future use 84 */ 85 struct nfp_tun_req_route_ipv4 { 86 __be32 ingress_port; 87 __be32 ipv4_addr; 88 __be32 reserved[2]; 89 }; 90 91 /** 92 * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup 93 * @ingress_port: ingress port of packet that signalled request 94 * @ipv6_addr: destination ipv6 address for route 95 */ 96 struct nfp_tun_req_route_ipv6 { 97 __be32 ingress_port; 98 struct in6_addr ipv6_addr; 99 }; 100 101 /** 102 * struct nfp_offloaded_route - routes that are offloaded to the NFP 103 * @list: list pointer 104 * @ip_add: destination of route - can be IPv4 or IPv6 105 */ 106 struct nfp_offloaded_route { 107 struct list_head list; 108 u8 ip_add[]; 109 }; 110 111 #define NFP_FL_IPV4_ADDRS_MAX 32 112 113 /** 114 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP 115 * @count: number of IPs populated in the array 116 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses 117 */ 118 struct nfp_tun_ipv4_addr { 119 __be32 count; 120 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX]; 121 }; 122 123 /** 124 * struct nfp_ipv4_addr_entry - cached IPv4 addresses 125 * @ipv4_addr: IP address 126 * @ref_count: number of rules currently using this IP 127 * @list: list pointer 128 */ 129 struct nfp_ipv4_addr_entry { 130 __be32 ipv4_addr; 131 int ref_count; 132 struct list_head list; 133 }; 134 135 #define NFP_FL_IPV6_ADDRS_MAX 4 136 137 /** 138 * struct nfp_tun_ipv6_addr - set the IP address list on the NFP 139 * @count: number of IPs populated in the array 140 * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses 141 */ 142 struct nfp_tun_ipv6_addr { 143 __be32 count; 144 struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX]; 145 }; 146 147 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 148 149 /** 150 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP 151 * @flags: MAC address offload options 152 * @count: number of MAC addresses in the message (should be 1) 153 * @index: index of MAC address in the lookup table 154 * @addr: interface MAC address 155 */ 156 struct nfp_tun_mac_addr_offload { 157 __be16 flags; 158 __be16 count; 159 __be16 index; 160 u8 addr[ETH_ALEN]; 161 }; 162 163 enum nfp_flower_mac_offload_cmd { 164 NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, 165 NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, 166 NFP_TUNNEL_MAC_OFFLOAD_MOD = 2, 167 }; 168 169 #define NFP_MAX_MAC_INDEX 0xff 170 171 /** 172 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC 173 * @ht_node: Hashtable entry 174 * @addr: Offloaded MAC address 175 * @index: Offloaded index for given MAC address 176 * @ref_count: Number of devs using this MAC address 177 * @repr_list: List of reprs sharing this MAC address 178 * @bridge_count: Number of bridge/internal devs with MAC 179 */ 180 struct nfp_tun_offloaded_mac { 181 struct rhash_head ht_node; 182 u8 addr[ETH_ALEN]; 183 u16 index; 184 int ref_count; 185 struct list_head repr_list; 186 int bridge_count; 187 }; 188 189 static const struct rhashtable_params offloaded_macs_params = { 190 .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr), 191 .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node), 192 .key_len = ETH_ALEN, 193 .automatic_shrinking = true, 194 }; 195 196 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) 197 { 198 struct nfp_tun_active_tuns *payload; 199 struct net_device *netdev; 200 int count, i, pay_len; 201 struct neighbour *n; 202 __be32 ipv4_addr; 203 u32 port; 204 205 payload = nfp_flower_cmsg_get_data(skb); 206 count = be32_to_cpu(payload->count); 207 if (count > NFP_FL_MAX_ROUTES) { 208 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n"); 209 return; 210 } 211 212 pay_len = nfp_flower_cmsg_get_data_len(skb); 213 if (pay_len != struct_size(payload, tun_info, count)) { 214 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); 215 return; 216 } 217 218 rcu_read_lock(); 219 for (i = 0; i < count; i++) { 220 ipv4_addr = payload->tun_info[i].ipv4; 221 port = be32_to_cpu(payload->tun_info[i].egress_port); 222 netdev = nfp_app_dev_get(app, port, NULL); 223 if (!netdev) 224 continue; 225 226 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev); 227 if (!n) 228 continue; 229 230 /* Update the used timestamp of neighbour */ 231 neigh_event_send(n, NULL); 232 neigh_release(n); 233 } 234 rcu_read_unlock(); 235 } 236 237 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb) 238 { 239 #if IS_ENABLED(CONFIG_IPV6) 240 struct nfp_tun_active_tuns_v6 *payload; 241 struct net_device *netdev; 242 int count, i, pay_len; 243 struct neighbour *n; 244 void *ipv6_add; 245 u32 port; 246 247 payload = nfp_flower_cmsg_get_data(skb); 248 count = be32_to_cpu(payload->count); 249 if (count > NFP_FL_IPV6_ADDRS_MAX) { 250 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n"); 251 return; 252 } 253 254 pay_len = nfp_flower_cmsg_get_data_len(skb); 255 if (pay_len != struct_size(payload, tun_info, count)) { 256 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); 257 return; 258 } 259 260 rcu_read_lock(); 261 for (i = 0; i < count; i++) { 262 ipv6_add = &payload->tun_info[i].ipv6; 263 port = be32_to_cpu(payload->tun_info[i].egress_port); 264 netdev = nfp_app_dev_get(app, port, NULL); 265 if (!netdev) 266 continue; 267 268 n = neigh_lookup(&nd_tbl, ipv6_add, netdev); 269 if (!n) 270 continue; 271 272 /* Update the used timestamp of neighbour */ 273 neigh_event_send(n, NULL); 274 neigh_release(n); 275 } 276 rcu_read_unlock(); 277 #endif 278 } 279 280 static int 281 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, 282 gfp_t flag) 283 { 284 struct nfp_flower_priv *priv = app->priv; 285 struct sk_buff *skb; 286 unsigned char *msg; 287 288 if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) && 289 (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || 290 mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) 291 plen -= sizeof(struct nfp_tun_neigh_ext); 292 293 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); 294 if (!skb) 295 return -ENOMEM; 296 297 msg = nfp_flower_cmsg_get_data(skb); 298 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb)); 299 300 nfp_ctrl_tx(app->ctrl, skb); 301 return 0; 302 } 303 304 static void 305 nfp_tun_mutual_link(struct nfp_predt_entry *predt, 306 struct nfp_neigh_entry *neigh) 307 { 308 struct nfp_fl_payload *flow_pay = predt->flow_pay; 309 struct nfp_tun_neigh_ext *ext; 310 struct nfp_tun_neigh *common; 311 312 if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6) 313 return; 314 315 /* In the case of bonding it is possible that there might already 316 * be a flow linked (as the MAC address gets shared). If a flow 317 * is already linked just return. 318 */ 319 if (neigh->flow) 320 return; 321 322 common = neigh->is_ipv6 ? 323 &((struct nfp_tun_neigh_v6 *)neigh->payload)->common : 324 &((struct nfp_tun_neigh_v4 *)neigh->payload)->common; 325 ext = neigh->is_ipv6 ? 326 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 327 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 328 329 if (memcmp(flow_pay->pre_tun_rule.loc_mac, 330 common->src_addr, ETH_ALEN) || 331 memcmp(flow_pay->pre_tun_rule.rem_mac, 332 common->dst_addr, ETH_ALEN)) 333 return; 334 335 list_add(&neigh->list_head, &predt->nn_list); 336 neigh->flow = predt; 337 ext->host_ctx = flow_pay->meta.host_ctx_id; 338 ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci; 339 ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid; 340 } 341 342 static void 343 nfp_tun_link_predt_entries(struct nfp_app *app, 344 struct nfp_neigh_entry *nn_entry) 345 { 346 struct nfp_flower_priv *priv = app->priv; 347 struct nfp_predt_entry *predt, *tmp; 348 349 list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) { 350 nfp_tun_mutual_link(predt, nn_entry); 351 } 352 } 353 354 void nfp_tun_link_and_update_nn_entries(struct nfp_app *app, 355 struct nfp_predt_entry *predt) 356 { 357 struct nfp_flower_priv *priv = app->priv; 358 struct nfp_neigh_entry *nn_entry; 359 struct rhashtable_iter iter; 360 size_t neigh_size; 361 u8 type; 362 363 rhashtable_walk_enter(&priv->neigh_table, &iter); 364 rhashtable_walk_start(&iter); 365 while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) { 366 if (IS_ERR(nn_entry)) 367 continue; 368 nfp_tun_mutual_link(predt, nn_entry); 369 neigh_size = nn_entry->is_ipv6 ? 370 sizeof(struct nfp_tun_neigh_v6) : 371 sizeof(struct nfp_tun_neigh_v4); 372 type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 373 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 374 nfp_flower_xmit_tun_conf(app, type, neigh_size, 375 nn_entry->payload, 376 GFP_ATOMIC); 377 } 378 rhashtable_walk_stop(&iter); 379 rhashtable_walk_exit(&iter); 380 } 381 382 static void nfp_tun_cleanup_nn_entries(struct nfp_app *app) 383 { 384 struct nfp_flower_priv *priv = app->priv; 385 struct nfp_neigh_entry *neigh; 386 struct nfp_tun_neigh_ext *ext; 387 struct rhashtable_iter iter; 388 size_t neigh_size; 389 u8 type; 390 391 rhashtable_walk_enter(&priv->neigh_table, &iter); 392 rhashtable_walk_start(&iter); 393 while ((neigh = rhashtable_walk_next(&iter)) != NULL) { 394 if (IS_ERR(neigh)) 395 continue; 396 ext = neigh->is_ipv6 ? 397 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 398 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 399 ext->host_ctx = cpu_to_be32(U32_MAX); 400 ext->vlan_tpid = cpu_to_be16(U16_MAX); 401 ext->vlan_tci = cpu_to_be16(U16_MAX); 402 403 neigh_size = neigh->is_ipv6 ? 404 sizeof(struct nfp_tun_neigh_v6) : 405 sizeof(struct nfp_tun_neigh_v4); 406 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 407 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 408 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 409 GFP_ATOMIC); 410 411 rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node, 412 neigh_table_params); 413 if (neigh->flow) 414 list_del(&neigh->list_head); 415 kfree(neigh); 416 } 417 rhashtable_walk_stop(&iter); 418 rhashtable_walk_exit(&iter); 419 } 420 421 void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, 422 struct nfp_predt_entry *predt) 423 { 424 struct nfp_neigh_entry *neigh, *tmp; 425 struct nfp_tun_neigh_ext *ext; 426 size_t neigh_size; 427 u8 type; 428 429 list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) { 430 ext = neigh->is_ipv6 ? 431 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 432 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 433 neigh->flow = NULL; 434 ext->host_ctx = cpu_to_be32(U32_MAX); 435 ext->vlan_tpid = cpu_to_be16(U16_MAX); 436 ext->vlan_tci = cpu_to_be16(U16_MAX); 437 list_del(&neigh->list_head); 438 neigh_size = neigh->is_ipv6 ? 439 sizeof(struct nfp_tun_neigh_v6) : 440 sizeof(struct nfp_tun_neigh_v4); 441 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 442 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 443 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 444 GFP_ATOMIC); 445 } 446 } 447 448 static void 449 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, 450 void *flow, struct neighbour *neigh, bool is_ipv6) 451 { 452 bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead; 453 size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) : 454 sizeof(struct nfp_tun_neigh_v4); 455 unsigned long cookie = (unsigned long)neigh; 456 struct nfp_flower_priv *priv = app->priv; 457 struct nfp_neigh_entry *nn_entry; 458 u32 port_id; 459 u8 mtype; 460 461 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 462 if (!port_id) 463 return; 464 465 spin_lock_bh(&priv->predt_lock); 466 nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie, 467 neigh_table_params); 468 if (!nn_entry && !neigh_invalid) { 469 struct nfp_tun_neigh_ext *ext; 470 struct nfp_tun_neigh *common; 471 472 nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size, 473 GFP_ATOMIC); 474 if (!nn_entry) 475 goto err; 476 477 nn_entry->payload = (char *)&nn_entry[1]; 478 nn_entry->neigh_cookie = cookie; 479 nn_entry->is_ipv6 = is_ipv6; 480 nn_entry->flow = NULL; 481 if (is_ipv6) { 482 struct flowi6 *flowi6 = (struct flowi6 *)flow; 483 struct nfp_tun_neigh_v6 *payload; 484 485 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 486 payload->src_ipv6 = flowi6->saddr; 487 payload->dst_ipv6 = flowi6->daddr; 488 common = &payload->common; 489 ext = &payload->ext; 490 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 491 } else { 492 struct flowi4 *flowi4 = (struct flowi4 *)flow; 493 struct nfp_tun_neigh_v4 *payload; 494 495 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 496 payload->src_ipv4 = flowi4->saddr; 497 payload->dst_ipv4 = flowi4->daddr; 498 common = &payload->common; 499 ext = &payload->ext; 500 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 501 } 502 ext->host_ctx = cpu_to_be32(U32_MAX); 503 ext->vlan_tpid = cpu_to_be16(U16_MAX); 504 ext->vlan_tci = cpu_to_be16(U16_MAX); 505 ether_addr_copy(common->src_addr, netdev->dev_addr); 506 neigh_ha_snapshot(common->dst_addr, neigh, netdev); 507 common->port_id = cpu_to_be32(port_id); 508 509 if (rhashtable_insert_fast(&priv->neigh_table, 510 &nn_entry->ht_node, 511 neigh_table_params)) 512 goto err; 513 514 nfp_tun_link_predt_entries(app, nn_entry); 515 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 516 nn_entry->payload, 517 GFP_ATOMIC); 518 } else if (nn_entry && neigh_invalid) { 519 if (is_ipv6) { 520 struct flowi6 *flowi6 = (struct flowi6 *)flow; 521 struct nfp_tun_neigh_v6 *payload; 522 523 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 524 memset(payload, 0, sizeof(struct nfp_tun_neigh_v6)); 525 payload->dst_ipv6 = flowi6->daddr; 526 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 527 } else { 528 struct flowi4 *flowi4 = (struct flowi4 *)flow; 529 struct nfp_tun_neigh_v4 *payload; 530 531 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 532 memset(payload, 0, sizeof(struct nfp_tun_neigh_v4)); 533 payload->dst_ipv4 = flowi4->daddr; 534 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 535 } 536 /* Trigger ARP to verify invalid neighbour state. */ 537 neigh_event_send(neigh, NULL); 538 rhashtable_remove_fast(&priv->neigh_table, 539 &nn_entry->ht_node, 540 neigh_table_params); 541 542 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 543 nn_entry->payload, 544 GFP_ATOMIC); 545 546 if (nn_entry->flow) 547 list_del(&nn_entry->list_head); 548 kfree(nn_entry); 549 } 550 551 spin_unlock_bh(&priv->predt_lock); 552 return; 553 554 err: 555 kfree(nn_entry); 556 spin_unlock_bh(&priv->predt_lock); 557 nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n"); 558 } 559 560 static int 561 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, 562 void *ptr) 563 { 564 struct nfp_flower_priv *app_priv; 565 struct netevent_redirect *redir; 566 struct flowi4 flow4 = {}; 567 struct flowi6 flow6 = {}; 568 struct neighbour *n; 569 struct nfp_app *app; 570 bool neigh_invalid; 571 bool ipv6 = false; 572 int err; 573 574 switch (event) { 575 case NETEVENT_REDIRECT: 576 redir = (struct netevent_redirect *)ptr; 577 n = redir->neigh; 578 break; 579 case NETEVENT_NEIGH_UPDATE: 580 n = (struct neighbour *)ptr; 581 break; 582 default: 583 return NOTIFY_DONE; 584 } 585 586 if (n->tbl->family == AF_INET6) 587 ipv6 = true; 588 589 neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; 590 591 if (ipv6) 592 flow6.daddr = *(struct in6_addr *)n->primary_key; 593 else 594 flow4.daddr = *(__be32 *)n->primary_key; 595 596 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 597 app = app_priv->app; 598 599 if (!nfp_netdev_is_nfp_repr(n->dev) && 600 !nfp_flower_internal_port_can_offload(app, n->dev)) 601 return NOTIFY_DONE; 602 603 #if IS_ENABLED(CONFIG_INET) 604 if (ipv6) { 605 #if IS_ENABLED(CONFIG_IPV6) 606 if (!neigh_invalid) { 607 struct dst_entry *dst; 608 /* Use ipv6_dst_lookup_flow to populate flow6->saddr 609 * and other fields. This information is only needed 610 * for new entries, lookup can be skipped when an entry 611 * gets invalidated - as only the daddr is needed for 612 * deleting. 613 */ 614 dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL, 615 &flow6, NULL); 616 if (IS_ERR(dst)) 617 return NOTIFY_DONE; 618 619 dst_release(dst); 620 } 621 nfp_tun_write_neigh(n->dev, app, &flow6, n, true); 622 #else 623 return NOTIFY_DONE; 624 #endif /* CONFIG_IPV6 */ 625 } else { 626 if (!neigh_invalid) { 627 struct rtable *rt; 628 /* Use ip_route_output_key to populate flow4->saddr and 629 * other fields. This information is only needed for 630 * new entries, lookup can be skipped when an entry 631 * gets invalidated - as only the daddr is needed for 632 * deleting. 633 */ 634 rt = ip_route_output_key(dev_net(n->dev), &flow4); 635 err = PTR_ERR_OR_ZERO(rt); 636 if (err) 637 return NOTIFY_DONE; 638 639 ip_rt_put(rt); 640 } 641 nfp_tun_write_neigh(n->dev, app, &flow4, n, false); 642 } 643 #else 644 return NOTIFY_DONE; 645 #endif /* CONFIG_INET */ 646 647 return NOTIFY_OK; 648 } 649 650 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) 651 { 652 struct nfp_tun_req_route_ipv4 *payload; 653 struct net_device *netdev; 654 struct flowi4 flow = {}; 655 struct neighbour *n; 656 struct rtable *rt; 657 int err; 658 659 payload = nfp_flower_cmsg_get_data(skb); 660 661 rcu_read_lock(); 662 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 663 if (!netdev) 664 goto fail_rcu_unlock; 665 666 flow.daddr = payload->ipv4_addr; 667 flow.flowi4_proto = IPPROTO_UDP; 668 669 #if IS_ENABLED(CONFIG_INET) 670 /* Do a route lookup on same namespace as ingress port. */ 671 rt = ip_route_output_key(dev_net(netdev), &flow); 672 err = PTR_ERR_OR_ZERO(rt); 673 if (err) 674 goto fail_rcu_unlock; 675 #else 676 goto fail_rcu_unlock; 677 #endif 678 679 /* Get the neighbour entry for the lookup */ 680 n = dst_neigh_lookup(&rt->dst, &flow.daddr); 681 ip_rt_put(rt); 682 if (!n) 683 goto fail_rcu_unlock; 684 nfp_tun_write_neigh(n->dev, app, &flow, n, false); 685 neigh_release(n); 686 rcu_read_unlock(); 687 return; 688 689 fail_rcu_unlock: 690 rcu_read_unlock(); 691 nfp_flower_cmsg_warn(app, "Requested route not found.\n"); 692 } 693 694 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) 695 { 696 struct nfp_tun_req_route_ipv6 *payload; 697 struct net_device *netdev; 698 struct flowi6 flow = {}; 699 struct dst_entry *dst; 700 struct neighbour *n; 701 702 payload = nfp_flower_cmsg_get_data(skb); 703 704 rcu_read_lock(); 705 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 706 if (!netdev) 707 goto fail_rcu_unlock; 708 709 flow.daddr = payload->ipv6_addr; 710 flow.flowi6_proto = IPPROTO_UDP; 711 712 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 713 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow, 714 NULL); 715 if (IS_ERR(dst)) 716 goto fail_rcu_unlock; 717 #else 718 goto fail_rcu_unlock; 719 #endif 720 721 n = dst_neigh_lookup(dst, &flow.daddr); 722 dst_release(dst); 723 if (!n) 724 goto fail_rcu_unlock; 725 726 nfp_tun_write_neigh(n->dev, app, &flow, n, true); 727 neigh_release(n); 728 rcu_read_unlock(); 729 return; 730 731 fail_rcu_unlock: 732 rcu_read_unlock(); 733 nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n"); 734 } 735 736 static void nfp_tun_write_ipv4_list(struct nfp_app *app) 737 { 738 struct nfp_flower_priv *priv = app->priv; 739 struct nfp_ipv4_addr_entry *entry; 740 struct nfp_tun_ipv4_addr payload; 741 struct list_head *ptr, *storage; 742 int count; 743 744 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr)); 745 mutex_lock(&priv->tun.ipv4_off_lock); 746 count = 0; 747 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 748 if (count >= NFP_FL_IPV4_ADDRS_MAX) { 749 mutex_unlock(&priv->tun.ipv4_off_lock); 750 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n"); 751 return; 752 } 753 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 754 payload.ipv4_addr[count++] = entry->ipv4_addr; 755 } 756 payload.count = cpu_to_be32(count); 757 mutex_unlock(&priv->tun.ipv4_off_lock); 758 759 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, 760 sizeof(struct nfp_tun_ipv4_addr), 761 &payload, GFP_KERNEL); 762 } 763 764 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) 765 { 766 struct nfp_flower_priv *priv = app->priv; 767 struct nfp_ipv4_addr_entry *entry; 768 struct list_head *ptr, *storage; 769 770 mutex_lock(&priv->tun.ipv4_off_lock); 771 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 772 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 773 if (entry->ipv4_addr == ipv4) { 774 entry->ref_count++; 775 mutex_unlock(&priv->tun.ipv4_off_lock); 776 return; 777 } 778 } 779 780 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 781 if (!entry) { 782 mutex_unlock(&priv->tun.ipv4_off_lock); 783 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); 784 return; 785 } 786 entry->ipv4_addr = ipv4; 787 entry->ref_count = 1; 788 list_add_tail(&entry->list, &priv->tun.ipv4_off_list); 789 mutex_unlock(&priv->tun.ipv4_off_lock); 790 791 nfp_tun_write_ipv4_list(app); 792 } 793 794 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) 795 { 796 struct nfp_flower_priv *priv = app->priv; 797 struct nfp_ipv4_addr_entry *entry; 798 struct list_head *ptr, *storage; 799 800 mutex_lock(&priv->tun.ipv4_off_lock); 801 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 802 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 803 if (entry->ipv4_addr == ipv4) { 804 entry->ref_count--; 805 if (!entry->ref_count) { 806 list_del(&entry->list); 807 kfree(entry); 808 } 809 break; 810 } 811 } 812 mutex_unlock(&priv->tun.ipv4_off_lock); 813 814 nfp_tun_write_ipv4_list(app); 815 } 816 817 static void nfp_tun_write_ipv6_list(struct nfp_app *app) 818 { 819 struct nfp_flower_priv *priv = app->priv; 820 struct nfp_ipv6_addr_entry *entry; 821 struct nfp_tun_ipv6_addr payload; 822 int count = 0; 823 824 memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr)); 825 mutex_lock(&priv->tun.ipv6_off_lock); 826 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) { 827 if (count >= NFP_FL_IPV6_ADDRS_MAX) { 828 nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n"); 829 break; 830 } 831 payload.ipv6_addr[count++] = entry->ipv6_addr; 832 } 833 mutex_unlock(&priv->tun.ipv6_off_lock); 834 payload.count = cpu_to_be32(count); 835 836 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6, 837 sizeof(struct nfp_tun_ipv6_addr), 838 &payload, GFP_KERNEL); 839 } 840 841 struct nfp_ipv6_addr_entry * 842 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6) 843 { 844 struct nfp_flower_priv *priv = app->priv; 845 struct nfp_ipv6_addr_entry *entry; 846 847 mutex_lock(&priv->tun.ipv6_off_lock); 848 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) 849 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) { 850 entry->ref_count++; 851 mutex_unlock(&priv->tun.ipv6_off_lock); 852 return entry; 853 } 854 855 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 856 if (!entry) { 857 mutex_unlock(&priv->tun.ipv6_off_lock); 858 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); 859 return NULL; 860 } 861 entry->ipv6_addr = *ipv6; 862 entry->ref_count = 1; 863 list_add_tail(&entry->list, &priv->tun.ipv6_off_list); 864 mutex_unlock(&priv->tun.ipv6_off_lock); 865 866 nfp_tun_write_ipv6_list(app); 867 868 return entry; 869 } 870 871 void 872 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry) 873 { 874 struct nfp_flower_priv *priv = app->priv; 875 bool freed = false; 876 877 mutex_lock(&priv->tun.ipv6_off_lock); 878 if (!--entry->ref_count) { 879 list_del(&entry->list); 880 kfree(entry); 881 freed = true; 882 } 883 mutex_unlock(&priv->tun.ipv6_off_lock); 884 885 if (freed) 886 nfp_tun_write_ipv6_list(app); 887 } 888 889 static int 890 __nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del) 891 { 892 struct nfp_tun_mac_addr_offload payload; 893 894 memset(&payload, 0, sizeof(payload)); 895 896 if (del) 897 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG); 898 899 /* FW supports multiple MACs per cmsg but restrict to single. */ 900 payload.count = cpu_to_be16(1); 901 payload.index = cpu_to_be16(idx); 902 ether_addr_copy(payload.addr, mac); 903 904 return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, 905 sizeof(struct nfp_tun_mac_addr_offload), 906 &payload, GFP_KERNEL); 907 } 908 909 static bool nfp_tunnel_port_is_phy_repr(int port) 910 { 911 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == 912 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) 913 return true; 914 915 return false; 916 } 917 918 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port) 919 { 920 return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; 921 } 922 923 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id) 924 { 925 return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 926 } 927 928 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx) 929 { 930 return nfp_mac_idx >> 8; 931 } 932 933 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) 934 { 935 return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 936 } 937 938 static struct nfp_tun_offloaded_mac * 939 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac) 940 { 941 struct nfp_flower_priv *priv = app->priv; 942 943 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, 944 offloaded_macs_params); 945 } 946 947 static void 948 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, 949 struct net_device *netdev, bool mod) 950 { 951 if (nfp_netdev_is_nfp_repr(netdev)) { 952 struct nfp_flower_repr_priv *repr_priv; 953 struct nfp_repr *repr; 954 955 repr = netdev_priv(netdev); 956 repr_priv = repr->app_priv; 957 958 /* If modifing MAC, remove repr from old list first. */ 959 if (mod) 960 list_del(&repr_priv->mac_list); 961 962 list_add_tail(&repr_priv->mac_list, &entry->repr_list); 963 } else if (nfp_flower_is_supported_bridge(netdev)) { 964 entry->bridge_count++; 965 } 966 967 entry->ref_count++; 968 } 969 970 static int 971 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, 972 int port, bool mod) 973 { 974 struct nfp_flower_priv *priv = app->priv; 975 struct nfp_tun_offloaded_mac *entry; 976 int ida_idx = -1, err; 977 u16 nfp_mac_idx = 0; 978 979 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); 980 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { 981 if (entry->bridge_count || 982 !nfp_flower_is_supported_bridge(netdev)) { 983 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, 984 netdev, mod); 985 return 0; 986 } 987 988 /* MAC is global but matches need to go to pre_tun table. */ 989 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; 990 } 991 992 if (!nfp_mac_idx) { 993 /* Assign a global index if non-repr or MAC is now shared. */ 994 if (entry || !port) { 995 ida_idx = ida_alloc_max(&priv->tun.mac_off_ids, 996 NFP_MAX_MAC_INDEX, GFP_KERNEL); 997 if (ida_idx < 0) 998 return ida_idx; 999 1000 nfp_mac_idx = 1001 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); 1002 1003 if (nfp_flower_is_supported_bridge(netdev)) 1004 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; 1005 1006 } else { 1007 nfp_mac_idx = 1008 nfp_tunnel_get_mac_idx_from_phy_port_id(port); 1009 } 1010 } 1011 1012 if (!entry) { 1013 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1014 if (!entry) { 1015 err = -ENOMEM; 1016 goto err_free_ida; 1017 } 1018 1019 ether_addr_copy(entry->addr, netdev->dev_addr); 1020 INIT_LIST_HEAD(&entry->repr_list); 1021 1022 if (rhashtable_insert_fast(&priv->tun.offloaded_macs, 1023 &entry->ht_node, 1024 offloaded_macs_params)) { 1025 err = -ENOMEM; 1026 goto err_free_entry; 1027 } 1028 } 1029 1030 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, 1031 nfp_mac_idx, false); 1032 if (err) { 1033 /* If not shared then free. */ 1034 if (!entry->ref_count) 1035 goto err_remove_hash; 1036 goto err_free_ida; 1037 } 1038 1039 entry->index = nfp_mac_idx; 1040 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); 1041 1042 return 0; 1043 1044 err_remove_hash: 1045 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, 1046 offloaded_macs_params); 1047 err_free_entry: 1048 kfree(entry); 1049 err_free_ida: 1050 if (ida_idx != -1) 1051 ida_free(&priv->tun.mac_off_ids, ida_idx); 1052 1053 return err; 1054 } 1055 1056 static int 1057 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, 1058 const u8 *mac, bool mod) 1059 { 1060 struct nfp_flower_priv *priv = app->priv; 1061 struct nfp_flower_repr_priv *repr_priv; 1062 struct nfp_tun_offloaded_mac *entry; 1063 struct nfp_repr *repr; 1064 u16 nfp_mac_idx; 1065 int ida_idx; 1066 1067 entry = nfp_tunnel_lookup_offloaded_macs(app, mac); 1068 if (!entry) 1069 return 0; 1070 1071 entry->ref_count--; 1072 /* If del is part of a mod then mac_list is still in use elsewheree. */ 1073 if (nfp_netdev_is_nfp_repr(netdev) && !mod) { 1074 repr = netdev_priv(netdev); 1075 repr_priv = repr->app_priv; 1076 list_del(&repr_priv->mac_list); 1077 } 1078 1079 if (nfp_flower_is_supported_bridge(netdev)) { 1080 entry->bridge_count--; 1081 1082 if (!entry->bridge_count && entry->ref_count) { 1083 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; 1084 if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, 1085 false)) { 1086 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 1087 netdev_name(netdev)); 1088 return 0; 1089 } 1090 1091 entry->index = nfp_mac_idx; 1092 return 0; 1093 } 1094 } 1095 1096 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ 1097 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { 1098 int port, err; 1099 1100 repr_priv = list_first_entry(&entry->repr_list, 1101 struct nfp_flower_repr_priv, 1102 mac_list); 1103 repr = repr_priv->nfp_repr; 1104 port = nfp_repr_get_port_id(repr->netdev); 1105 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); 1106 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); 1107 if (err) { 1108 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 1109 netdev_name(netdev)); 1110 return 0; 1111 } 1112 1113 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 1114 ida_free(&priv->tun.mac_off_ids, ida_idx); 1115 entry->index = nfp_mac_idx; 1116 return 0; 1117 } 1118 1119 if (entry->ref_count) 1120 return 0; 1121 1122 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, 1123 &entry->ht_node, 1124 offloaded_macs_params)); 1125 1126 if (nfp_flower_is_supported_bridge(netdev)) 1127 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; 1128 else 1129 nfp_mac_idx = entry->index; 1130 1131 /* If MAC has global ID then extract and free the ida entry. */ 1132 if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { 1133 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 1134 ida_free(&priv->tun.mac_off_ids, ida_idx); 1135 } 1136 1137 kfree(entry); 1138 1139 return __nfp_tunnel_offload_mac(app, mac, 0, true); 1140 } 1141 1142 static int 1143 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, 1144 enum nfp_flower_mac_offload_cmd cmd) 1145 { 1146 struct nfp_flower_non_repr_priv *nr_priv = NULL; 1147 bool non_repr = false, *mac_offloaded; 1148 u8 *off_mac = NULL; 1149 int err, port = 0; 1150 1151 if (nfp_netdev_is_nfp_repr(netdev)) { 1152 struct nfp_flower_repr_priv *repr_priv; 1153 struct nfp_repr *repr; 1154 1155 repr = netdev_priv(netdev); 1156 if (repr->app != app) 1157 return 0; 1158 1159 repr_priv = repr->app_priv; 1160 if (repr_priv->on_bridge) 1161 return 0; 1162 1163 mac_offloaded = &repr_priv->mac_offloaded; 1164 off_mac = &repr_priv->offloaded_mac_addr[0]; 1165 port = nfp_repr_get_port_id(netdev); 1166 if (!nfp_tunnel_port_is_phy_repr(port)) 1167 return 0; 1168 } else if (nfp_fl_is_netdev_to_offload(netdev)) { 1169 nr_priv = nfp_flower_non_repr_priv_get(app, netdev); 1170 if (!nr_priv) 1171 return -ENOMEM; 1172 1173 mac_offloaded = &nr_priv->mac_offloaded; 1174 off_mac = &nr_priv->offloaded_mac_addr[0]; 1175 non_repr = true; 1176 } else { 1177 return 0; 1178 } 1179 1180 if (!is_valid_ether_addr(netdev->dev_addr)) { 1181 err = -EINVAL; 1182 goto err_put_non_repr_priv; 1183 } 1184 1185 if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded) 1186 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD; 1187 1188 switch (cmd) { 1189 case NFP_TUNNEL_MAC_OFFLOAD_ADD: 1190 err = nfp_tunnel_add_shared_mac(app, netdev, port, false); 1191 if (err) 1192 goto err_put_non_repr_priv; 1193 1194 if (non_repr) 1195 __nfp_flower_non_repr_priv_get(nr_priv); 1196 1197 *mac_offloaded = true; 1198 ether_addr_copy(off_mac, netdev->dev_addr); 1199 break; 1200 case NFP_TUNNEL_MAC_OFFLOAD_DEL: 1201 /* Only attempt delete if add was successful. */ 1202 if (!*mac_offloaded) 1203 break; 1204 1205 if (non_repr) 1206 __nfp_flower_non_repr_priv_put(nr_priv); 1207 1208 *mac_offloaded = false; 1209 1210 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, 1211 false); 1212 if (err) 1213 goto err_put_non_repr_priv; 1214 1215 break; 1216 case NFP_TUNNEL_MAC_OFFLOAD_MOD: 1217 /* Ignore if changing to the same address. */ 1218 if (ether_addr_equal(netdev->dev_addr, off_mac)) 1219 break; 1220 1221 err = nfp_tunnel_add_shared_mac(app, netdev, port, true); 1222 if (err) 1223 goto err_put_non_repr_priv; 1224 1225 /* Delete the previous MAC address. */ 1226 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true); 1227 if (err) 1228 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", 1229 netdev_name(netdev)); 1230 1231 ether_addr_copy(off_mac, netdev->dev_addr); 1232 break; 1233 default: 1234 err = -EINVAL; 1235 goto err_put_non_repr_priv; 1236 } 1237 1238 if (non_repr) 1239 __nfp_flower_non_repr_priv_put(nr_priv); 1240 1241 return 0; 1242 1243 err_put_non_repr_priv: 1244 if (non_repr) 1245 __nfp_flower_non_repr_priv_put(nr_priv); 1246 1247 return err; 1248 } 1249 1250 int nfp_tunnel_mac_event_handler(struct nfp_app *app, 1251 struct net_device *netdev, 1252 unsigned long event, void *ptr) 1253 { 1254 int err; 1255 1256 if (event == NETDEV_DOWN) { 1257 err = nfp_tunnel_offload_mac(app, netdev, 1258 NFP_TUNNEL_MAC_OFFLOAD_DEL); 1259 if (err) 1260 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n", 1261 netdev_name(netdev)); 1262 } else if (event == NETDEV_UP) { 1263 err = nfp_tunnel_offload_mac(app, netdev, 1264 NFP_TUNNEL_MAC_OFFLOAD_ADD); 1265 if (err) 1266 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 1267 netdev_name(netdev)); 1268 } else if (event == NETDEV_CHANGEADDR) { 1269 /* Only offload addr change if netdev is already up. */ 1270 if (!(netdev->flags & IFF_UP)) 1271 return NOTIFY_OK; 1272 1273 err = nfp_tunnel_offload_mac(app, netdev, 1274 NFP_TUNNEL_MAC_OFFLOAD_MOD); 1275 if (err) 1276 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", 1277 netdev_name(netdev)); 1278 } else if (event == NETDEV_CHANGEUPPER) { 1279 /* If a repr is attached to a bridge then tunnel packets 1280 * entering the physical port are directed through the bridge 1281 * datapath and cannot be directly detunneled. Therefore, 1282 * associated offloaded MACs and indexes should not be used 1283 * by fw for detunneling. 1284 */ 1285 struct netdev_notifier_changeupper_info *info = ptr; 1286 struct net_device *upper = info->upper_dev; 1287 struct nfp_flower_repr_priv *repr_priv; 1288 struct nfp_repr *repr; 1289 1290 if (!nfp_netdev_is_nfp_repr(netdev) || 1291 !nfp_flower_is_supported_bridge(upper)) 1292 return NOTIFY_OK; 1293 1294 repr = netdev_priv(netdev); 1295 if (repr->app != app) 1296 return NOTIFY_OK; 1297 1298 repr_priv = repr->app_priv; 1299 1300 if (info->linking) { 1301 if (nfp_tunnel_offload_mac(app, netdev, 1302 NFP_TUNNEL_MAC_OFFLOAD_DEL)) 1303 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", 1304 netdev_name(netdev)); 1305 repr_priv->on_bridge = true; 1306 } else { 1307 repr_priv->on_bridge = false; 1308 1309 if (!(netdev->flags & IFF_UP)) 1310 return NOTIFY_OK; 1311 1312 if (nfp_tunnel_offload_mac(app, netdev, 1313 NFP_TUNNEL_MAC_OFFLOAD_ADD)) 1314 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 1315 netdev_name(netdev)); 1316 } 1317 } 1318 return NOTIFY_OK; 1319 } 1320 1321 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, 1322 struct nfp_fl_payload *flow) 1323 { 1324 struct nfp_flower_priv *app_priv = app->priv; 1325 struct nfp_tun_offloaded_mac *mac_entry; 1326 struct nfp_flower_meta_tci *key_meta; 1327 struct nfp_tun_pre_tun_rule payload; 1328 struct net_device *internal_dev; 1329 int err; 1330 1331 if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) 1332 return -ENOSPC; 1333 1334 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 1335 1336 internal_dev = flow->pre_tun_rule.dev; 1337 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 1338 payload.host_ctx_id = flow->meta.host_ctx_id; 1339 1340 /* Lookup MAC index for the pre-tunnel rule egress device. 1341 * Note that because the device is always an internal port, it will 1342 * have a constant global index so does not need to be tracked. 1343 */ 1344 mac_entry = nfp_tunnel_lookup_offloaded_macs(app, 1345 internal_dev->dev_addr); 1346 if (!mac_entry) 1347 return -ENOENT; 1348 1349 /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being 1350 * set/clear for port_idx. 1351 */ 1352 key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; 1353 if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) 1354 mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; 1355 else 1356 mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; 1357 1358 payload.port_idx = cpu_to_be16(mac_entry->index); 1359 1360 /* Copy mac id and vlan to flow - dev may not exist at delete time. */ 1361 flow->pre_tun_rule.vlan_tci = payload.vlan_tci; 1362 flow->pre_tun_rule.port_idx = payload.port_idx; 1363 1364 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 1365 sizeof(struct nfp_tun_pre_tun_rule), 1366 (unsigned char *)&payload, GFP_KERNEL); 1367 if (err) 1368 return err; 1369 1370 app_priv->pre_tun_rule_cnt++; 1371 1372 return 0; 1373 } 1374 1375 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, 1376 struct nfp_fl_payload *flow) 1377 { 1378 struct nfp_flower_priv *app_priv = app->priv; 1379 struct nfp_tun_pre_tun_rule payload; 1380 u32 tmp_flags = 0; 1381 int err; 1382 1383 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 1384 1385 tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; 1386 payload.flags = cpu_to_be32(tmp_flags); 1387 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 1388 payload.port_idx = flow->pre_tun_rule.port_idx; 1389 1390 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 1391 sizeof(struct nfp_tun_pre_tun_rule), 1392 (unsigned char *)&payload, GFP_KERNEL); 1393 if (err) 1394 return err; 1395 1396 app_priv->pre_tun_rule_cnt--; 1397 1398 return 0; 1399 } 1400 1401 int nfp_tunnel_config_start(struct nfp_app *app) 1402 { 1403 struct nfp_flower_priv *priv = app->priv; 1404 int err; 1405 1406 /* Initialise rhash for MAC offload tracking. */ 1407 err = rhashtable_init(&priv->tun.offloaded_macs, 1408 &offloaded_macs_params); 1409 if (err) 1410 return err; 1411 1412 ida_init(&priv->tun.mac_off_ids); 1413 1414 /* Initialise priv data for IPv4/v6 offloading. */ 1415 mutex_init(&priv->tun.ipv4_off_lock); 1416 INIT_LIST_HEAD(&priv->tun.ipv4_off_list); 1417 mutex_init(&priv->tun.ipv6_off_lock); 1418 INIT_LIST_HEAD(&priv->tun.ipv6_off_list); 1419 1420 /* Initialise priv data for neighbour offloading. */ 1421 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; 1422 1423 err = register_netevent_notifier(&priv->tun.neigh_nb); 1424 if (err) { 1425 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 1426 nfp_check_rhashtable_empty, NULL); 1427 return err; 1428 } 1429 1430 return 0; 1431 } 1432 1433 void nfp_tunnel_config_stop(struct nfp_app *app) 1434 { 1435 struct nfp_flower_priv *priv = app->priv; 1436 struct nfp_ipv4_addr_entry *ip_entry; 1437 struct list_head *ptr, *storage; 1438 1439 unregister_netevent_notifier(&priv->tun.neigh_nb); 1440 1441 ida_destroy(&priv->tun.mac_off_ids); 1442 1443 /* Free any memory that may be occupied by ipv4 list. */ 1444 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 1445 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 1446 list_del(&ip_entry->list); 1447 kfree(ip_entry); 1448 } 1449 1450 mutex_destroy(&priv->tun.ipv6_off_lock); 1451 1452 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ 1453 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 1454 nfp_check_rhashtable_empty, NULL); 1455 1456 nfp_tun_cleanup_nn_entries(app); 1457 } 1458