1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/inetdevice.h> 6 #include <net/netevent.h> 7 #include <linux/idr.h> 8 #include <net/dst_metadata.h> 9 #include <net/arp.h> 10 11 #include "cmsg.h" 12 #include "main.h" 13 #include "../nfp_net_repr.h" 14 #include "../nfp_net.h" 15 16 #define NFP_FL_MAX_ROUTES 32 17 18 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 19 #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) 20 #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) 21 #define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) 22 23 /** 24 * struct nfp_tun_pre_tun_rule - rule matched before decap 25 * @flags: options for the rule offset 26 * @port_idx: index of destination MAC address for the rule 27 * @vlan_tci: VLAN info associated with MAC 28 * @host_ctx_id: stats context of rule to update 29 */ 30 struct nfp_tun_pre_tun_rule { 31 __be32 flags; 32 __be16 port_idx; 33 __be16 vlan_tci; 34 __be32 host_ctx_id; 35 }; 36 37 /** 38 * struct nfp_tun_active_tuns - periodic message of active tunnels 39 * @seq: sequence number of the message 40 * @count: number of tunnels report in message 41 * @flags: options part of the request 42 * @tun_info.ipv4: dest IPv4 address of active route 43 * @tun_info.egress_port: port the encapsulated packet egressed 44 * @tun_info.extra: reserved for future use 45 * @tun_info: tunnels that have sent traffic in reported period 46 */ 47 struct nfp_tun_active_tuns { 48 __be32 seq; 49 __be32 count; 50 __be32 flags; 51 struct route_ip_info { 52 __be32 ipv4; 53 __be32 egress_port; 54 __be32 extra[2]; 55 } tun_info[]; 56 }; 57 58 /** 59 * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels 60 * @seq: sequence number of the message 61 * @count: number of tunnels report in message 62 * @flags: options part of the request 63 * @tun_info.ipv6: dest IPv6 address of active route 64 * @tun_info.egress_port: port the encapsulated packet egressed 65 * @tun_info.extra: reserved for future use 66 * @tun_info: tunnels that have sent traffic in reported period 67 */ 68 struct nfp_tun_active_tuns_v6 { 69 __be32 seq; 70 __be32 count; 71 __be32 flags; 72 struct route_ip_info_v6 { 73 struct in6_addr ipv6; 74 __be32 egress_port; 75 __be32 extra[2]; 76 } tun_info[]; 77 }; 78 79 /** 80 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup 81 * @ingress_port: ingress port of packet that signalled request 82 * @ipv4_addr: destination ipv4 address for route 83 * @reserved: reserved for future use 84 */ 85 struct nfp_tun_req_route_ipv4 { 86 __be32 ingress_port; 87 __be32 ipv4_addr; 88 __be32 reserved[2]; 89 }; 90 91 /** 92 * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup 93 * @ingress_port: ingress port of packet that signalled request 94 * @ipv6_addr: destination ipv6 address for route 95 */ 96 struct nfp_tun_req_route_ipv6 { 97 __be32 ingress_port; 98 struct in6_addr ipv6_addr; 99 }; 100 101 /** 102 * struct nfp_offloaded_route - routes that are offloaded to the NFP 103 * @list: list pointer 104 * @ip_add: destination of route - can be IPv4 or IPv6 105 */ 106 struct nfp_offloaded_route { 107 struct list_head list; 108 u8 ip_add[]; 109 }; 110 111 #define NFP_FL_IPV4_ADDRS_MAX 32 112 113 /** 114 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP 115 * @count: number of IPs populated in the array 116 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses 117 */ 118 struct nfp_tun_ipv4_addr { 119 __be32 count; 120 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX]; 121 }; 122 123 /** 124 * struct nfp_ipv4_addr_entry - cached IPv4 addresses 125 * @ipv4_addr: IP address 126 * @ref_count: number of rules currently using this IP 127 * @list: list pointer 128 */ 129 struct nfp_ipv4_addr_entry { 130 __be32 ipv4_addr; 131 int ref_count; 132 struct list_head list; 133 }; 134 135 #define NFP_FL_IPV6_ADDRS_MAX 4 136 137 /** 138 * struct nfp_tun_ipv6_addr - set the IP address list on the NFP 139 * @count: number of IPs populated in the array 140 * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses 141 */ 142 struct nfp_tun_ipv6_addr { 143 __be32 count; 144 struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX]; 145 }; 146 147 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 148 149 /** 150 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP 151 * @flags: MAC address offload options 152 * @count: number of MAC addresses in the message (should be 1) 153 * @index: index of MAC address in the lookup table 154 * @addr: interface MAC address 155 */ 156 struct nfp_tun_mac_addr_offload { 157 __be16 flags; 158 __be16 count; 159 __be16 index; 160 u8 addr[ETH_ALEN]; 161 }; 162 163 enum nfp_flower_mac_offload_cmd { 164 NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, 165 NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, 166 NFP_TUNNEL_MAC_OFFLOAD_MOD = 2, 167 }; 168 169 #define NFP_MAX_MAC_INDEX 0xff 170 171 /** 172 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC 173 * @ht_node: Hashtable entry 174 * @addr: Offloaded MAC address 175 * @index: Offloaded index for given MAC address 176 * @ref_count: Number of devs using this MAC address 177 * @repr_list: List of reprs sharing this MAC address 178 * @bridge_count: Number of bridge/internal devs with MAC 179 */ 180 struct nfp_tun_offloaded_mac { 181 struct rhash_head ht_node; 182 u8 addr[ETH_ALEN]; 183 u16 index; 184 int ref_count; 185 struct list_head repr_list; 186 int bridge_count; 187 }; 188 189 static const struct rhashtable_params offloaded_macs_params = { 190 .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr), 191 .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node), 192 .key_len = ETH_ALEN, 193 .automatic_shrinking = true, 194 }; 195 196 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) 197 { 198 struct nfp_tun_active_tuns *payload; 199 struct net_device *netdev; 200 int count, i, pay_len; 201 struct neighbour *n; 202 __be32 ipv4_addr; 203 u32 port; 204 205 payload = nfp_flower_cmsg_get_data(skb); 206 count = be32_to_cpu(payload->count); 207 if (count > NFP_FL_MAX_ROUTES) { 208 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n"); 209 return; 210 } 211 212 pay_len = nfp_flower_cmsg_get_data_len(skb); 213 if (pay_len != struct_size(payload, tun_info, count)) { 214 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); 215 return; 216 } 217 218 rcu_read_lock(); 219 for (i = 0; i < count; i++) { 220 ipv4_addr = payload->tun_info[i].ipv4; 221 port = be32_to_cpu(payload->tun_info[i].egress_port); 222 netdev = nfp_app_dev_get(app, port, NULL); 223 if (!netdev) 224 continue; 225 226 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev); 227 if (!n) 228 continue; 229 230 /* Update the used timestamp of neighbour */ 231 neigh_event_send(n, NULL); 232 neigh_release(n); 233 } 234 rcu_read_unlock(); 235 } 236 237 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb) 238 { 239 #if IS_ENABLED(CONFIG_IPV6) 240 struct nfp_tun_active_tuns_v6 *payload; 241 struct net_device *netdev; 242 int count, i, pay_len; 243 struct neighbour *n; 244 void *ipv6_add; 245 u32 port; 246 247 payload = nfp_flower_cmsg_get_data(skb); 248 count = be32_to_cpu(payload->count); 249 if (count > NFP_FL_IPV6_ADDRS_MAX) { 250 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n"); 251 return; 252 } 253 254 pay_len = nfp_flower_cmsg_get_data_len(skb); 255 if (pay_len != struct_size(payload, tun_info, count)) { 256 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); 257 return; 258 } 259 260 rcu_read_lock(); 261 for (i = 0; i < count; i++) { 262 ipv6_add = &payload->tun_info[i].ipv6; 263 port = be32_to_cpu(payload->tun_info[i].egress_port); 264 netdev = nfp_app_dev_get(app, port, NULL); 265 if (!netdev) 266 continue; 267 268 n = neigh_lookup(&nd_tbl, ipv6_add, netdev); 269 if (!n) 270 continue; 271 272 /* Update the used timestamp of neighbour */ 273 neigh_event_send(n, NULL); 274 neigh_release(n); 275 } 276 rcu_read_unlock(); 277 #endif 278 } 279 280 static int 281 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, 282 gfp_t flag) 283 { 284 struct nfp_flower_priv *priv = app->priv; 285 struct sk_buff *skb; 286 unsigned char *msg; 287 288 if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) && 289 (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || 290 mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) 291 plen -= sizeof(struct nfp_tun_neigh_ext); 292 293 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); 294 if (!skb) 295 return -ENOMEM; 296 297 msg = nfp_flower_cmsg_get_data(skb); 298 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb)); 299 300 nfp_ctrl_tx(app->ctrl, skb); 301 return 0; 302 } 303 304 static void 305 nfp_tun_mutual_link(struct nfp_predt_entry *predt, 306 struct nfp_neigh_entry *neigh) 307 { 308 struct nfp_fl_payload *flow_pay = predt->flow_pay; 309 struct nfp_tun_neigh_ext *ext; 310 struct nfp_tun_neigh *common; 311 312 if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6) 313 return; 314 315 /* In the case of bonding it is possible that there might already 316 * be a flow linked (as the MAC address gets shared). If a flow 317 * is already linked just return. 318 */ 319 if (neigh->flow) 320 return; 321 322 common = neigh->is_ipv6 ? 323 &((struct nfp_tun_neigh_v6 *)neigh->payload)->common : 324 &((struct nfp_tun_neigh_v4 *)neigh->payload)->common; 325 ext = neigh->is_ipv6 ? 326 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 327 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 328 329 if (memcmp(flow_pay->pre_tun_rule.loc_mac, 330 common->src_addr, ETH_ALEN) || 331 memcmp(flow_pay->pre_tun_rule.rem_mac, 332 common->dst_addr, ETH_ALEN)) 333 return; 334 335 list_add(&neigh->list_head, &predt->nn_list); 336 neigh->flow = predt; 337 ext->host_ctx = flow_pay->meta.host_ctx_id; 338 ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci; 339 ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid; 340 } 341 342 static void 343 nfp_tun_link_predt_entries(struct nfp_app *app, 344 struct nfp_neigh_entry *nn_entry) 345 { 346 struct nfp_flower_priv *priv = app->priv; 347 struct nfp_predt_entry *predt, *tmp; 348 349 list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) { 350 nfp_tun_mutual_link(predt, nn_entry); 351 } 352 } 353 354 void nfp_tun_link_and_update_nn_entries(struct nfp_app *app, 355 struct nfp_predt_entry *predt) 356 { 357 struct nfp_flower_priv *priv = app->priv; 358 struct nfp_neigh_entry *nn_entry; 359 struct rhashtable_iter iter; 360 size_t neigh_size; 361 u8 type; 362 363 rhashtable_walk_enter(&priv->neigh_table, &iter); 364 rhashtable_walk_start(&iter); 365 while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) { 366 if (IS_ERR(nn_entry)) 367 continue; 368 nfp_tun_mutual_link(predt, nn_entry); 369 neigh_size = nn_entry->is_ipv6 ? 370 sizeof(struct nfp_tun_neigh_v6) : 371 sizeof(struct nfp_tun_neigh_v4); 372 type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 373 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 374 nfp_flower_xmit_tun_conf(app, type, neigh_size, 375 nn_entry->payload, 376 GFP_ATOMIC); 377 } 378 rhashtable_walk_stop(&iter); 379 rhashtable_walk_exit(&iter); 380 } 381 382 static void nfp_tun_cleanup_nn_entries(struct nfp_app *app) 383 { 384 struct nfp_flower_priv *priv = app->priv; 385 struct nfp_neigh_entry *neigh; 386 struct nfp_tun_neigh_ext *ext; 387 struct rhashtable_iter iter; 388 size_t neigh_size; 389 u8 type; 390 391 rhashtable_walk_enter(&priv->neigh_table, &iter); 392 rhashtable_walk_start(&iter); 393 while ((neigh = rhashtable_walk_next(&iter)) != NULL) { 394 if (IS_ERR(neigh)) 395 continue; 396 ext = neigh->is_ipv6 ? 397 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 398 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 399 ext->host_ctx = cpu_to_be32(U32_MAX); 400 ext->vlan_tpid = cpu_to_be16(U16_MAX); 401 ext->vlan_tci = cpu_to_be16(U16_MAX); 402 403 neigh_size = neigh->is_ipv6 ? 404 sizeof(struct nfp_tun_neigh_v6) : 405 sizeof(struct nfp_tun_neigh_v4); 406 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 407 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 408 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 409 GFP_ATOMIC); 410 411 rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node, 412 neigh_table_params); 413 if (neigh->flow) 414 list_del(&neigh->list_head); 415 kfree(neigh); 416 } 417 rhashtable_walk_stop(&iter); 418 rhashtable_walk_exit(&iter); 419 } 420 421 void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, 422 struct nfp_predt_entry *predt) 423 { 424 struct nfp_neigh_entry *neigh, *tmp; 425 struct nfp_tun_neigh_ext *ext; 426 size_t neigh_size; 427 u8 type; 428 429 list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) { 430 ext = neigh->is_ipv6 ? 431 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 432 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 433 neigh->flow = NULL; 434 ext->host_ctx = cpu_to_be32(U32_MAX); 435 ext->vlan_tpid = cpu_to_be16(U16_MAX); 436 ext->vlan_tci = cpu_to_be16(U16_MAX); 437 list_del(&neigh->list_head); 438 neigh_size = neigh->is_ipv6 ? 439 sizeof(struct nfp_tun_neigh_v6) : 440 sizeof(struct nfp_tun_neigh_v4); 441 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 442 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 443 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 444 GFP_ATOMIC); 445 } 446 } 447 448 static void 449 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, 450 void *flow, struct neighbour *neigh, bool is_ipv6, 451 bool override) 452 { 453 bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead; 454 size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) : 455 sizeof(struct nfp_tun_neigh_v4); 456 unsigned long cookie = (unsigned long)neigh; 457 struct nfp_flower_priv *priv = app->priv; 458 struct nfp_neigh_entry *nn_entry; 459 u32 port_id; 460 u8 mtype; 461 462 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 463 if (!port_id) 464 return; 465 466 spin_lock_bh(&priv->predt_lock); 467 nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie, 468 neigh_table_params); 469 if (!nn_entry && !neigh_invalid) { 470 struct nfp_tun_neigh_ext *ext; 471 struct nfp_tun_neigh *common; 472 473 nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size, 474 GFP_ATOMIC); 475 if (!nn_entry) 476 goto err; 477 478 nn_entry->payload = (char *)&nn_entry[1]; 479 nn_entry->neigh_cookie = cookie; 480 nn_entry->is_ipv6 = is_ipv6; 481 nn_entry->flow = NULL; 482 if (is_ipv6) { 483 struct flowi6 *flowi6 = (struct flowi6 *)flow; 484 struct nfp_tun_neigh_v6 *payload; 485 486 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 487 payload->src_ipv6 = flowi6->saddr; 488 payload->dst_ipv6 = flowi6->daddr; 489 common = &payload->common; 490 ext = &payload->ext; 491 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 492 } else { 493 struct flowi4 *flowi4 = (struct flowi4 *)flow; 494 struct nfp_tun_neigh_v4 *payload; 495 496 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 497 payload->src_ipv4 = flowi4->saddr; 498 payload->dst_ipv4 = flowi4->daddr; 499 common = &payload->common; 500 ext = &payload->ext; 501 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 502 } 503 ext->host_ctx = cpu_to_be32(U32_MAX); 504 ext->vlan_tpid = cpu_to_be16(U16_MAX); 505 ext->vlan_tci = cpu_to_be16(U16_MAX); 506 ether_addr_copy(common->src_addr, netdev->dev_addr); 507 neigh_ha_snapshot(common->dst_addr, neigh, netdev); 508 common->port_id = cpu_to_be32(port_id); 509 510 if (rhashtable_insert_fast(&priv->neigh_table, 511 &nn_entry->ht_node, 512 neigh_table_params)) 513 goto err; 514 515 nfp_tun_link_predt_entries(app, nn_entry); 516 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 517 nn_entry->payload, 518 GFP_ATOMIC); 519 } else if (nn_entry && neigh_invalid) { 520 if (is_ipv6) { 521 struct flowi6 *flowi6 = (struct flowi6 *)flow; 522 struct nfp_tun_neigh_v6 *payload; 523 524 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 525 memset(payload, 0, sizeof(struct nfp_tun_neigh_v6)); 526 payload->dst_ipv6 = flowi6->daddr; 527 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 528 } else { 529 struct flowi4 *flowi4 = (struct flowi4 *)flow; 530 struct nfp_tun_neigh_v4 *payload; 531 532 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 533 memset(payload, 0, sizeof(struct nfp_tun_neigh_v4)); 534 payload->dst_ipv4 = flowi4->daddr; 535 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 536 } 537 /* Trigger ARP to verify invalid neighbour state. */ 538 neigh_event_send(neigh, NULL); 539 rhashtable_remove_fast(&priv->neigh_table, 540 &nn_entry->ht_node, 541 neigh_table_params); 542 543 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 544 nn_entry->payload, 545 GFP_ATOMIC); 546 547 if (nn_entry->flow) 548 list_del(&nn_entry->list_head); 549 kfree(nn_entry); 550 } else if (nn_entry && !neigh_invalid && override) { 551 mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 552 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 553 nfp_tun_link_predt_entries(app, nn_entry); 554 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 555 nn_entry->payload, 556 GFP_ATOMIC); 557 } 558 559 spin_unlock_bh(&priv->predt_lock); 560 return; 561 562 err: 563 kfree(nn_entry); 564 spin_unlock_bh(&priv->predt_lock); 565 nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n"); 566 } 567 568 static int 569 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, 570 void *ptr) 571 { 572 struct nfp_flower_priv *app_priv; 573 struct netevent_redirect *redir; 574 struct neighbour *n; 575 struct nfp_app *app; 576 bool neigh_invalid; 577 int err; 578 579 switch (event) { 580 case NETEVENT_REDIRECT: 581 redir = (struct netevent_redirect *)ptr; 582 n = redir->neigh; 583 break; 584 case NETEVENT_NEIGH_UPDATE: 585 n = (struct neighbour *)ptr; 586 break; 587 default: 588 return NOTIFY_DONE; 589 } 590 591 neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; 592 593 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 594 app = app_priv->app; 595 596 if (!nfp_netdev_is_nfp_repr(n->dev) && 597 !nfp_flower_internal_port_can_offload(app, n->dev)) 598 return NOTIFY_DONE; 599 600 #if IS_ENABLED(CONFIG_INET) 601 if (n->tbl->family == AF_INET6) { 602 #if IS_ENABLED(CONFIG_IPV6) 603 struct flowi6 flow6 = {}; 604 605 flow6.daddr = *(struct in6_addr *)n->primary_key; 606 if (!neigh_invalid) { 607 struct dst_entry *dst; 608 /* Use ipv6_dst_lookup_flow to populate flow6->saddr 609 * and other fields. This information is only needed 610 * for new entries, lookup can be skipped when an entry 611 * gets invalidated - as only the daddr is needed for 612 * deleting. 613 */ 614 dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL, 615 &flow6, NULL); 616 if (IS_ERR(dst)) 617 return NOTIFY_DONE; 618 619 dst_release(dst); 620 } 621 nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false); 622 #else 623 return NOTIFY_DONE; 624 #endif /* CONFIG_IPV6 */ 625 } else { 626 struct flowi4 flow4 = {}; 627 628 flow4.daddr = *(__be32 *)n->primary_key; 629 if (!neigh_invalid) { 630 struct rtable *rt; 631 /* Use ip_route_output_key to populate flow4->saddr and 632 * other fields. This information is only needed for 633 * new entries, lookup can be skipped when an entry 634 * gets invalidated - as only the daddr is needed for 635 * deleting. 636 */ 637 rt = ip_route_output_key(dev_net(n->dev), &flow4); 638 err = PTR_ERR_OR_ZERO(rt); 639 if (err) 640 return NOTIFY_DONE; 641 642 ip_rt_put(rt); 643 } 644 nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false); 645 } 646 #else 647 return NOTIFY_DONE; 648 #endif /* CONFIG_INET */ 649 650 return NOTIFY_OK; 651 } 652 653 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) 654 { 655 struct nfp_tun_req_route_ipv4 *payload; 656 struct net_device *netdev; 657 struct flowi4 flow = {}; 658 struct neighbour *n; 659 struct rtable *rt; 660 int err; 661 662 payload = nfp_flower_cmsg_get_data(skb); 663 664 rcu_read_lock(); 665 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 666 if (!netdev) 667 goto fail_rcu_unlock; 668 669 flow.daddr = payload->ipv4_addr; 670 flow.flowi4_proto = IPPROTO_UDP; 671 672 #if IS_ENABLED(CONFIG_INET) 673 /* Do a route lookup on same namespace as ingress port. */ 674 rt = ip_route_output_key(dev_net(netdev), &flow); 675 err = PTR_ERR_OR_ZERO(rt); 676 if (err) 677 goto fail_rcu_unlock; 678 #else 679 goto fail_rcu_unlock; 680 #endif 681 682 /* Get the neighbour entry for the lookup */ 683 n = dst_neigh_lookup(&rt->dst, &flow.daddr); 684 ip_rt_put(rt); 685 if (!n) 686 goto fail_rcu_unlock; 687 nfp_tun_write_neigh(n->dev, app, &flow, n, false, true); 688 neigh_release(n); 689 rcu_read_unlock(); 690 return; 691 692 fail_rcu_unlock: 693 rcu_read_unlock(); 694 nfp_flower_cmsg_warn(app, "Requested route not found.\n"); 695 } 696 697 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) 698 { 699 struct nfp_tun_req_route_ipv6 *payload; 700 struct net_device *netdev; 701 struct flowi6 flow = {}; 702 struct dst_entry *dst; 703 struct neighbour *n; 704 705 payload = nfp_flower_cmsg_get_data(skb); 706 707 rcu_read_lock(); 708 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 709 if (!netdev) 710 goto fail_rcu_unlock; 711 712 flow.daddr = payload->ipv6_addr; 713 flow.flowi6_proto = IPPROTO_UDP; 714 715 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 716 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow, 717 NULL); 718 if (IS_ERR(dst)) 719 goto fail_rcu_unlock; 720 #else 721 goto fail_rcu_unlock; 722 #endif 723 724 n = dst_neigh_lookup(dst, &flow.daddr); 725 dst_release(dst); 726 if (!n) 727 goto fail_rcu_unlock; 728 729 nfp_tun_write_neigh(n->dev, app, &flow, n, true, true); 730 neigh_release(n); 731 rcu_read_unlock(); 732 return; 733 734 fail_rcu_unlock: 735 rcu_read_unlock(); 736 nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n"); 737 } 738 739 static void nfp_tun_write_ipv4_list(struct nfp_app *app) 740 { 741 struct nfp_flower_priv *priv = app->priv; 742 struct nfp_ipv4_addr_entry *entry; 743 struct nfp_tun_ipv4_addr payload; 744 struct list_head *ptr, *storage; 745 int count; 746 747 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr)); 748 mutex_lock(&priv->tun.ipv4_off_lock); 749 count = 0; 750 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 751 if (count >= NFP_FL_IPV4_ADDRS_MAX) { 752 mutex_unlock(&priv->tun.ipv4_off_lock); 753 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n"); 754 return; 755 } 756 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 757 payload.ipv4_addr[count++] = entry->ipv4_addr; 758 } 759 payload.count = cpu_to_be32(count); 760 mutex_unlock(&priv->tun.ipv4_off_lock); 761 762 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, 763 sizeof(struct nfp_tun_ipv4_addr), 764 &payload, GFP_KERNEL); 765 } 766 767 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) 768 { 769 struct nfp_flower_priv *priv = app->priv; 770 struct nfp_ipv4_addr_entry *entry; 771 struct list_head *ptr, *storage; 772 773 mutex_lock(&priv->tun.ipv4_off_lock); 774 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 775 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 776 if (entry->ipv4_addr == ipv4) { 777 entry->ref_count++; 778 mutex_unlock(&priv->tun.ipv4_off_lock); 779 return; 780 } 781 } 782 783 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 784 if (!entry) { 785 mutex_unlock(&priv->tun.ipv4_off_lock); 786 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); 787 return; 788 } 789 entry->ipv4_addr = ipv4; 790 entry->ref_count = 1; 791 list_add_tail(&entry->list, &priv->tun.ipv4_off_list); 792 mutex_unlock(&priv->tun.ipv4_off_lock); 793 794 nfp_tun_write_ipv4_list(app); 795 } 796 797 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) 798 { 799 struct nfp_flower_priv *priv = app->priv; 800 struct nfp_ipv4_addr_entry *entry; 801 struct list_head *ptr, *storage; 802 803 mutex_lock(&priv->tun.ipv4_off_lock); 804 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 805 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 806 if (entry->ipv4_addr == ipv4) { 807 entry->ref_count--; 808 if (!entry->ref_count) { 809 list_del(&entry->list); 810 kfree(entry); 811 } 812 break; 813 } 814 } 815 mutex_unlock(&priv->tun.ipv4_off_lock); 816 817 nfp_tun_write_ipv4_list(app); 818 } 819 820 static void nfp_tun_write_ipv6_list(struct nfp_app *app) 821 { 822 struct nfp_flower_priv *priv = app->priv; 823 struct nfp_ipv6_addr_entry *entry; 824 struct nfp_tun_ipv6_addr payload; 825 int count = 0; 826 827 memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr)); 828 mutex_lock(&priv->tun.ipv6_off_lock); 829 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) { 830 if (count >= NFP_FL_IPV6_ADDRS_MAX) { 831 nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n"); 832 break; 833 } 834 payload.ipv6_addr[count++] = entry->ipv6_addr; 835 } 836 mutex_unlock(&priv->tun.ipv6_off_lock); 837 payload.count = cpu_to_be32(count); 838 839 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6, 840 sizeof(struct nfp_tun_ipv6_addr), 841 &payload, GFP_KERNEL); 842 } 843 844 struct nfp_ipv6_addr_entry * 845 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6) 846 { 847 struct nfp_flower_priv *priv = app->priv; 848 struct nfp_ipv6_addr_entry *entry; 849 850 mutex_lock(&priv->tun.ipv6_off_lock); 851 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) 852 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) { 853 entry->ref_count++; 854 mutex_unlock(&priv->tun.ipv6_off_lock); 855 return entry; 856 } 857 858 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 859 if (!entry) { 860 mutex_unlock(&priv->tun.ipv6_off_lock); 861 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); 862 return NULL; 863 } 864 entry->ipv6_addr = *ipv6; 865 entry->ref_count = 1; 866 list_add_tail(&entry->list, &priv->tun.ipv6_off_list); 867 mutex_unlock(&priv->tun.ipv6_off_lock); 868 869 nfp_tun_write_ipv6_list(app); 870 871 return entry; 872 } 873 874 void 875 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry) 876 { 877 struct nfp_flower_priv *priv = app->priv; 878 bool freed = false; 879 880 mutex_lock(&priv->tun.ipv6_off_lock); 881 if (!--entry->ref_count) { 882 list_del(&entry->list); 883 kfree(entry); 884 freed = true; 885 } 886 mutex_unlock(&priv->tun.ipv6_off_lock); 887 888 if (freed) 889 nfp_tun_write_ipv6_list(app); 890 } 891 892 static int 893 __nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del) 894 { 895 struct nfp_tun_mac_addr_offload payload; 896 897 memset(&payload, 0, sizeof(payload)); 898 899 if (del) 900 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG); 901 902 /* FW supports multiple MACs per cmsg but restrict to single. */ 903 payload.count = cpu_to_be16(1); 904 payload.index = cpu_to_be16(idx); 905 ether_addr_copy(payload.addr, mac); 906 907 return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, 908 sizeof(struct nfp_tun_mac_addr_offload), 909 &payload, GFP_KERNEL); 910 } 911 912 static bool nfp_tunnel_port_is_phy_repr(int port) 913 { 914 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == 915 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) 916 return true; 917 918 return false; 919 } 920 921 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port) 922 { 923 return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; 924 } 925 926 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id) 927 { 928 return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 929 } 930 931 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx) 932 { 933 return nfp_mac_idx >> 8; 934 } 935 936 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) 937 { 938 return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 939 } 940 941 static struct nfp_tun_offloaded_mac * 942 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac) 943 { 944 struct nfp_flower_priv *priv = app->priv; 945 946 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, 947 offloaded_macs_params); 948 } 949 950 static void 951 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, 952 struct net_device *netdev, bool mod) 953 { 954 if (nfp_netdev_is_nfp_repr(netdev)) { 955 struct nfp_flower_repr_priv *repr_priv; 956 struct nfp_repr *repr; 957 958 repr = netdev_priv(netdev); 959 repr_priv = repr->app_priv; 960 961 /* If modifing MAC, remove repr from old list first. */ 962 if (mod) 963 list_del(&repr_priv->mac_list); 964 965 list_add_tail(&repr_priv->mac_list, &entry->repr_list); 966 } else if (nfp_flower_is_supported_bridge(netdev)) { 967 entry->bridge_count++; 968 } 969 970 entry->ref_count++; 971 } 972 973 static int 974 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, 975 int port, bool mod) 976 { 977 struct nfp_flower_priv *priv = app->priv; 978 struct nfp_tun_offloaded_mac *entry; 979 int ida_idx = -1, err; 980 u16 nfp_mac_idx = 0; 981 982 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); 983 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { 984 if (entry->bridge_count || 985 !nfp_flower_is_supported_bridge(netdev)) { 986 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, 987 netdev, mod); 988 return 0; 989 } 990 991 /* MAC is global but matches need to go to pre_tun table. */ 992 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; 993 } 994 995 if (!nfp_mac_idx) { 996 /* Assign a global index if non-repr or MAC is now shared. */ 997 if (entry || !port) { 998 ida_idx = ida_alloc_max(&priv->tun.mac_off_ids, 999 NFP_MAX_MAC_INDEX, GFP_KERNEL); 1000 if (ida_idx < 0) 1001 return ida_idx; 1002 1003 nfp_mac_idx = 1004 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); 1005 1006 if (nfp_flower_is_supported_bridge(netdev)) 1007 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; 1008 1009 } else { 1010 nfp_mac_idx = 1011 nfp_tunnel_get_mac_idx_from_phy_port_id(port); 1012 } 1013 } 1014 1015 if (!entry) { 1016 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1017 if (!entry) { 1018 err = -ENOMEM; 1019 goto err_free_ida; 1020 } 1021 1022 ether_addr_copy(entry->addr, netdev->dev_addr); 1023 INIT_LIST_HEAD(&entry->repr_list); 1024 1025 if (rhashtable_insert_fast(&priv->tun.offloaded_macs, 1026 &entry->ht_node, 1027 offloaded_macs_params)) { 1028 err = -ENOMEM; 1029 goto err_free_entry; 1030 } 1031 } 1032 1033 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, 1034 nfp_mac_idx, false); 1035 if (err) { 1036 /* If not shared then free. */ 1037 if (!entry->ref_count) 1038 goto err_remove_hash; 1039 goto err_free_ida; 1040 } 1041 1042 entry->index = nfp_mac_idx; 1043 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); 1044 1045 return 0; 1046 1047 err_remove_hash: 1048 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, 1049 offloaded_macs_params); 1050 err_free_entry: 1051 kfree(entry); 1052 err_free_ida: 1053 if (ida_idx != -1) 1054 ida_free(&priv->tun.mac_off_ids, ida_idx); 1055 1056 return err; 1057 } 1058 1059 static int 1060 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, 1061 const u8 *mac, bool mod) 1062 { 1063 struct nfp_flower_priv *priv = app->priv; 1064 struct nfp_flower_repr_priv *repr_priv; 1065 struct nfp_tun_offloaded_mac *entry; 1066 struct nfp_repr *repr; 1067 u16 nfp_mac_idx; 1068 int ida_idx; 1069 1070 entry = nfp_tunnel_lookup_offloaded_macs(app, mac); 1071 if (!entry) 1072 return 0; 1073 1074 entry->ref_count--; 1075 /* If del is part of a mod then mac_list is still in use elsewhere. */ 1076 if (nfp_netdev_is_nfp_repr(netdev) && !mod) { 1077 repr = netdev_priv(netdev); 1078 repr_priv = repr->app_priv; 1079 list_del(&repr_priv->mac_list); 1080 } 1081 1082 if (nfp_flower_is_supported_bridge(netdev)) { 1083 entry->bridge_count--; 1084 1085 if (!entry->bridge_count && entry->ref_count) { 1086 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; 1087 if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, 1088 false)) { 1089 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 1090 netdev_name(netdev)); 1091 return 0; 1092 } 1093 1094 entry->index = nfp_mac_idx; 1095 return 0; 1096 } 1097 } 1098 1099 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ 1100 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { 1101 int port, err; 1102 1103 repr_priv = list_first_entry(&entry->repr_list, 1104 struct nfp_flower_repr_priv, 1105 mac_list); 1106 repr = repr_priv->nfp_repr; 1107 port = nfp_repr_get_port_id(repr->netdev); 1108 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); 1109 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); 1110 if (err) { 1111 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 1112 netdev_name(netdev)); 1113 return 0; 1114 } 1115 1116 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 1117 ida_free(&priv->tun.mac_off_ids, ida_idx); 1118 entry->index = nfp_mac_idx; 1119 return 0; 1120 } 1121 1122 if (entry->ref_count) 1123 return 0; 1124 1125 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, 1126 &entry->ht_node, 1127 offloaded_macs_params)); 1128 1129 if (nfp_flower_is_supported_bridge(netdev)) 1130 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; 1131 else 1132 nfp_mac_idx = entry->index; 1133 1134 /* If MAC has global ID then extract and free the ida entry. */ 1135 if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { 1136 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 1137 ida_free(&priv->tun.mac_off_ids, ida_idx); 1138 } 1139 1140 kfree(entry); 1141 1142 return __nfp_tunnel_offload_mac(app, mac, 0, true); 1143 } 1144 1145 static int 1146 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, 1147 enum nfp_flower_mac_offload_cmd cmd) 1148 { 1149 struct nfp_flower_non_repr_priv *nr_priv = NULL; 1150 bool non_repr = false, *mac_offloaded; 1151 u8 *off_mac = NULL; 1152 int err, port = 0; 1153 1154 if (nfp_netdev_is_nfp_repr(netdev)) { 1155 struct nfp_flower_repr_priv *repr_priv; 1156 struct nfp_repr *repr; 1157 1158 repr = netdev_priv(netdev); 1159 if (repr->app != app) 1160 return 0; 1161 1162 repr_priv = repr->app_priv; 1163 if (repr_priv->on_bridge) 1164 return 0; 1165 1166 mac_offloaded = &repr_priv->mac_offloaded; 1167 off_mac = &repr_priv->offloaded_mac_addr[0]; 1168 port = nfp_repr_get_port_id(netdev); 1169 if (!nfp_tunnel_port_is_phy_repr(port)) 1170 return 0; 1171 } else if (nfp_fl_is_netdev_to_offload(netdev)) { 1172 nr_priv = nfp_flower_non_repr_priv_get(app, netdev); 1173 if (!nr_priv) 1174 return -ENOMEM; 1175 1176 mac_offloaded = &nr_priv->mac_offloaded; 1177 off_mac = &nr_priv->offloaded_mac_addr[0]; 1178 non_repr = true; 1179 } else { 1180 return 0; 1181 } 1182 1183 if (!is_valid_ether_addr(netdev->dev_addr)) { 1184 err = -EINVAL; 1185 goto err_put_non_repr_priv; 1186 } 1187 1188 if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded) 1189 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD; 1190 1191 switch (cmd) { 1192 case NFP_TUNNEL_MAC_OFFLOAD_ADD: 1193 err = nfp_tunnel_add_shared_mac(app, netdev, port, false); 1194 if (err) 1195 goto err_put_non_repr_priv; 1196 1197 if (non_repr) 1198 __nfp_flower_non_repr_priv_get(nr_priv); 1199 1200 *mac_offloaded = true; 1201 ether_addr_copy(off_mac, netdev->dev_addr); 1202 break; 1203 case NFP_TUNNEL_MAC_OFFLOAD_DEL: 1204 /* Only attempt delete if add was successful. */ 1205 if (!*mac_offloaded) 1206 break; 1207 1208 if (non_repr) 1209 __nfp_flower_non_repr_priv_put(nr_priv); 1210 1211 *mac_offloaded = false; 1212 1213 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, 1214 false); 1215 if (err) 1216 goto err_put_non_repr_priv; 1217 1218 break; 1219 case NFP_TUNNEL_MAC_OFFLOAD_MOD: 1220 /* Ignore if changing to the same address. */ 1221 if (ether_addr_equal(netdev->dev_addr, off_mac)) 1222 break; 1223 1224 err = nfp_tunnel_add_shared_mac(app, netdev, port, true); 1225 if (err) 1226 goto err_put_non_repr_priv; 1227 1228 /* Delete the previous MAC address. */ 1229 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true); 1230 if (err) 1231 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", 1232 netdev_name(netdev)); 1233 1234 ether_addr_copy(off_mac, netdev->dev_addr); 1235 break; 1236 default: 1237 err = -EINVAL; 1238 goto err_put_non_repr_priv; 1239 } 1240 1241 if (non_repr) 1242 __nfp_flower_non_repr_priv_put(nr_priv); 1243 1244 return 0; 1245 1246 err_put_non_repr_priv: 1247 if (non_repr) 1248 __nfp_flower_non_repr_priv_put(nr_priv); 1249 1250 return err; 1251 } 1252 1253 int nfp_tunnel_mac_event_handler(struct nfp_app *app, 1254 struct net_device *netdev, 1255 unsigned long event, void *ptr) 1256 { 1257 int err; 1258 1259 if (event == NETDEV_DOWN) { 1260 err = nfp_tunnel_offload_mac(app, netdev, 1261 NFP_TUNNEL_MAC_OFFLOAD_DEL); 1262 if (err) 1263 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n", 1264 netdev_name(netdev)); 1265 } else if (event == NETDEV_UP) { 1266 err = nfp_tunnel_offload_mac(app, netdev, 1267 NFP_TUNNEL_MAC_OFFLOAD_ADD); 1268 if (err) 1269 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 1270 netdev_name(netdev)); 1271 } else if (event == NETDEV_CHANGEADDR) { 1272 /* Only offload addr change if netdev is already up. */ 1273 if (!(netdev->flags & IFF_UP)) 1274 return NOTIFY_OK; 1275 1276 err = nfp_tunnel_offload_mac(app, netdev, 1277 NFP_TUNNEL_MAC_OFFLOAD_MOD); 1278 if (err) 1279 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", 1280 netdev_name(netdev)); 1281 } else if (event == NETDEV_CHANGEUPPER) { 1282 /* If a repr is attached to a bridge then tunnel packets 1283 * entering the physical port are directed through the bridge 1284 * datapath and cannot be directly detunneled. Therefore, 1285 * associated offloaded MACs and indexes should not be used 1286 * by fw for detunneling. 1287 */ 1288 struct netdev_notifier_changeupper_info *info = ptr; 1289 struct net_device *upper = info->upper_dev; 1290 struct nfp_flower_repr_priv *repr_priv; 1291 struct nfp_repr *repr; 1292 1293 if (!nfp_netdev_is_nfp_repr(netdev) || 1294 !nfp_flower_is_supported_bridge(upper)) 1295 return NOTIFY_OK; 1296 1297 repr = netdev_priv(netdev); 1298 if (repr->app != app) 1299 return NOTIFY_OK; 1300 1301 repr_priv = repr->app_priv; 1302 1303 if (info->linking) { 1304 if (nfp_tunnel_offload_mac(app, netdev, 1305 NFP_TUNNEL_MAC_OFFLOAD_DEL)) 1306 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", 1307 netdev_name(netdev)); 1308 repr_priv->on_bridge = true; 1309 } else { 1310 repr_priv->on_bridge = false; 1311 1312 if (!(netdev->flags & IFF_UP)) 1313 return NOTIFY_OK; 1314 1315 if (nfp_tunnel_offload_mac(app, netdev, 1316 NFP_TUNNEL_MAC_OFFLOAD_ADD)) 1317 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 1318 netdev_name(netdev)); 1319 } 1320 } 1321 return NOTIFY_OK; 1322 } 1323 1324 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, 1325 struct nfp_fl_payload *flow) 1326 { 1327 struct nfp_flower_priv *app_priv = app->priv; 1328 struct nfp_tun_offloaded_mac *mac_entry; 1329 struct nfp_flower_meta_tci *key_meta; 1330 struct nfp_tun_pre_tun_rule payload; 1331 struct net_device *internal_dev; 1332 int err; 1333 1334 if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) 1335 return -ENOSPC; 1336 1337 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 1338 1339 internal_dev = flow->pre_tun_rule.dev; 1340 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 1341 payload.host_ctx_id = flow->meta.host_ctx_id; 1342 1343 /* Lookup MAC index for the pre-tunnel rule egress device. 1344 * Note that because the device is always an internal port, it will 1345 * have a constant global index so does not need to be tracked. 1346 */ 1347 mac_entry = nfp_tunnel_lookup_offloaded_macs(app, 1348 internal_dev->dev_addr); 1349 if (!mac_entry) 1350 return -ENOENT; 1351 1352 /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being 1353 * set/clear for port_idx. 1354 */ 1355 key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; 1356 if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) 1357 mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; 1358 else 1359 mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; 1360 1361 payload.port_idx = cpu_to_be16(mac_entry->index); 1362 1363 /* Copy mac id and vlan to flow - dev may not exist at delete time. */ 1364 flow->pre_tun_rule.vlan_tci = payload.vlan_tci; 1365 flow->pre_tun_rule.port_idx = payload.port_idx; 1366 1367 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 1368 sizeof(struct nfp_tun_pre_tun_rule), 1369 (unsigned char *)&payload, GFP_KERNEL); 1370 if (err) 1371 return err; 1372 1373 app_priv->pre_tun_rule_cnt++; 1374 1375 return 0; 1376 } 1377 1378 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, 1379 struct nfp_fl_payload *flow) 1380 { 1381 struct nfp_flower_priv *app_priv = app->priv; 1382 struct nfp_tun_pre_tun_rule payload; 1383 u32 tmp_flags = 0; 1384 int err; 1385 1386 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 1387 1388 tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; 1389 payload.flags = cpu_to_be32(tmp_flags); 1390 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 1391 payload.port_idx = flow->pre_tun_rule.port_idx; 1392 1393 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 1394 sizeof(struct nfp_tun_pre_tun_rule), 1395 (unsigned char *)&payload, GFP_KERNEL); 1396 if (err) 1397 return err; 1398 1399 app_priv->pre_tun_rule_cnt--; 1400 1401 return 0; 1402 } 1403 1404 int nfp_tunnel_config_start(struct nfp_app *app) 1405 { 1406 struct nfp_flower_priv *priv = app->priv; 1407 int err; 1408 1409 /* Initialise rhash for MAC offload tracking. */ 1410 err = rhashtable_init(&priv->tun.offloaded_macs, 1411 &offloaded_macs_params); 1412 if (err) 1413 return err; 1414 1415 ida_init(&priv->tun.mac_off_ids); 1416 1417 /* Initialise priv data for IPv4/v6 offloading. */ 1418 mutex_init(&priv->tun.ipv4_off_lock); 1419 INIT_LIST_HEAD(&priv->tun.ipv4_off_list); 1420 mutex_init(&priv->tun.ipv6_off_lock); 1421 INIT_LIST_HEAD(&priv->tun.ipv6_off_list); 1422 1423 /* Initialise priv data for neighbour offloading. */ 1424 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; 1425 1426 err = register_netevent_notifier(&priv->tun.neigh_nb); 1427 if (err) { 1428 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 1429 nfp_check_rhashtable_empty, NULL); 1430 return err; 1431 } 1432 1433 return 0; 1434 } 1435 1436 void nfp_tunnel_config_stop(struct nfp_app *app) 1437 { 1438 struct nfp_flower_priv *priv = app->priv; 1439 struct nfp_ipv4_addr_entry *ip_entry; 1440 struct list_head *ptr, *storage; 1441 1442 unregister_netevent_notifier(&priv->tun.neigh_nb); 1443 1444 ida_destroy(&priv->tun.mac_off_ids); 1445 1446 /* Free any memory that may be occupied by ipv4 list. */ 1447 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 1448 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 1449 list_del(&ip_entry->list); 1450 kfree(ip_entry); 1451 } 1452 1453 mutex_destroy(&priv->tun.ipv6_off_lock); 1454 1455 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ 1456 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 1457 nfp_check_rhashtable_empty, NULL); 1458 1459 nfp_tun_cleanup_nn_entries(app); 1460 } 1461