1 /* 2 * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like 3 * implementation 4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> 5 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/spinlock.h> 16 #include <linux/hashtable.h> 17 #include <linux/crc32.h> 18 #include <linux/netdevice.h> 19 #include <linux/inetdevice.h> 20 #include <linux/if_vlan.h> 21 #include <linux/if_bridge.h> 22 #include <net/neighbour.h> 23 #include <net/switchdev.h> 24 #include <net/ip_fib.h> 25 #include <net/arp.h> 26 27 #include "rocker.h" 28 #include "rocker_tlv.h" 29 30 struct ofdpa_flow_tbl_key { 31 u32 priority; 32 enum rocker_of_dpa_table_id tbl_id; 33 union { 34 struct { 35 u32 in_pport; 36 u32 in_pport_mask; 37 enum rocker_of_dpa_table_id goto_tbl; 38 } ig_port; 39 struct { 40 u32 in_pport; 41 __be16 vlan_id; 42 __be16 vlan_id_mask; 43 enum rocker_of_dpa_table_id goto_tbl; 44 bool untagged; 45 __be16 new_vlan_id; 46 } vlan; 47 struct { 48 u32 in_pport; 49 u32 in_pport_mask; 50 __be16 eth_type; 51 u8 eth_dst[ETH_ALEN]; 52 u8 eth_dst_mask[ETH_ALEN]; 53 __be16 vlan_id; 54 __be16 vlan_id_mask; 55 enum rocker_of_dpa_table_id goto_tbl; 56 bool copy_to_cpu; 57 } term_mac; 58 struct { 59 __be16 eth_type; 60 __be32 dst4; 61 __be32 dst4_mask; 62 enum rocker_of_dpa_table_id goto_tbl; 63 u32 group_id; 64 } ucast_routing; 65 struct { 66 u8 eth_dst[ETH_ALEN]; 67 u8 eth_dst_mask[ETH_ALEN]; 68 int has_eth_dst; 69 int has_eth_dst_mask; 70 __be16 vlan_id; 71 u32 tunnel_id; 72 enum rocker_of_dpa_table_id goto_tbl; 73 u32 group_id; 74 bool copy_to_cpu; 75 } bridge; 76 struct { 77 u32 in_pport; 78 u32 in_pport_mask; 79 u8 eth_src[ETH_ALEN]; 80 u8 eth_src_mask[ETH_ALEN]; 81 u8 eth_dst[ETH_ALEN]; 82 u8 eth_dst_mask[ETH_ALEN]; 83 __be16 eth_type; 84 __be16 vlan_id; 85 __be16 vlan_id_mask; 86 u8 ip_proto; 87 u8 ip_proto_mask; 88 u8 ip_tos; 89 u8 ip_tos_mask; 90 u32 group_id; 91 } acl; 92 }; 93 }; 94 95 struct ofdpa_flow_tbl_entry { 96 struct hlist_node entry; 97 u32 cmd; 98 u64 cookie; 99 struct ofdpa_flow_tbl_key key; 100 size_t key_len; 101 u32 key_crc32; /* key */ 102 struct fib_info *fi; 103 }; 104 105 struct ofdpa_group_tbl_entry { 106 struct hlist_node entry; 107 u32 cmd; 108 u32 group_id; /* key */ 109 u16 group_count; 110 u32 *group_ids; 111 union { 112 struct { 113 u8 pop_vlan; 114 } l2_interface; 115 struct { 116 u8 eth_src[ETH_ALEN]; 117 u8 eth_dst[ETH_ALEN]; 118 __be16 vlan_id; 119 u32 group_id; 120 } l2_rewrite; 121 struct { 122 u8 eth_src[ETH_ALEN]; 123 u8 eth_dst[ETH_ALEN]; 124 __be16 vlan_id; 125 bool ttl_check; 126 u32 group_id; 127 } l3_unicast; 128 }; 129 }; 130 131 struct ofdpa_fdb_tbl_entry { 132 struct hlist_node entry; 133 u32 key_crc32; /* key */ 134 bool learned; 135 unsigned long touched; 136 struct ofdpa_fdb_tbl_key { 137 struct ofdpa_port *ofdpa_port; 138 u8 addr[ETH_ALEN]; 139 __be16 vlan_id; 140 } key; 141 }; 142 143 struct ofdpa_internal_vlan_tbl_entry { 144 struct hlist_node entry; 145 int ifindex; /* key */ 146 u32 ref_count; 147 __be16 vlan_id; 148 }; 149 150 struct ofdpa_neigh_tbl_entry { 151 struct hlist_node entry; 152 __be32 ip_addr; /* key */ 153 struct net_device *dev; 154 u32 ref_count; 155 u32 index; 156 u8 eth_dst[ETH_ALEN]; 157 bool ttl_check; 158 }; 159 160 enum { 161 OFDPA_CTRL_LINK_LOCAL_MCAST, 162 OFDPA_CTRL_LOCAL_ARP, 163 OFDPA_CTRL_IPV4_MCAST, 164 OFDPA_CTRL_IPV6_MCAST, 165 OFDPA_CTRL_DFLT_BRIDGING, 166 OFDPA_CTRL_DFLT_OVS, 167 OFDPA_CTRL_MAX, 168 }; 169 170 #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00 171 #define OFDPA_N_INTERNAL_VLANS 255 172 #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) 173 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS) 174 #define OFDPA_UNTAGGED_VID 0 175 176 struct ofdpa { 177 struct rocker *rocker; 178 DECLARE_HASHTABLE(flow_tbl, 16); 179 spinlock_t flow_tbl_lock; /* for flow tbl accesses */ 180 u64 flow_tbl_next_cookie; 181 DECLARE_HASHTABLE(group_tbl, 16); 182 spinlock_t group_tbl_lock; /* for group tbl accesses */ 183 struct timer_list fdb_cleanup_timer; 184 DECLARE_HASHTABLE(fdb_tbl, 16); 185 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ 186 unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN]; 187 DECLARE_HASHTABLE(internal_vlan_tbl, 8); 188 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ 189 DECLARE_HASHTABLE(neigh_tbl, 16); 190 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ 191 u32 neigh_tbl_next_index; 192 unsigned long ageing_time; 193 bool fib_aborted; 194 }; 195 196 struct ofdpa_port { 197 struct ofdpa *ofdpa; 198 struct rocker_port *rocker_port; 199 struct net_device *dev; 200 u32 pport; 201 struct net_device *bridge_dev; 202 __be16 internal_vlan_id; 203 int stp_state; 204 u32 brport_flags; 205 unsigned long ageing_time; 206 bool ctrls[OFDPA_CTRL_MAX]; 207 unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN]; 208 }; 209 210 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 211 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 212 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 213 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; 214 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 215 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; 216 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; 217 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; 218 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; 219 220 /* Rocker priority levels for flow table entries. Higher 221 * priority match takes precedence over lower priority match. 222 */ 223 224 enum { 225 OFDPA_PRIORITY_UNKNOWN = 0, 226 OFDPA_PRIORITY_IG_PORT = 1, 227 OFDPA_PRIORITY_VLAN = 1, 228 OFDPA_PRIORITY_TERM_MAC_UCAST = 0, 229 OFDPA_PRIORITY_TERM_MAC_MCAST = 1, 230 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, 231 OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, 232 OFDPA_PRIORITY_BRIDGING_VLAN = 3, 233 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, 234 OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, 235 OFDPA_PRIORITY_BRIDGING_TENANT = 3, 236 OFDPA_PRIORITY_ACL_CTRL = 3, 237 OFDPA_PRIORITY_ACL_NORMAL = 2, 238 OFDPA_PRIORITY_ACL_DFLT = 1, 239 }; 240 241 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id) 242 { 243 u16 start = OFDPA_INTERNAL_VLAN_ID_BASE; 244 u16 end = 0xffe; 245 u16 _vlan_id = ntohs(vlan_id); 246 247 return (_vlan_id >= start && _vlan_id <= end); 248 } 249 250 static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port, 251 u16 vid, bool *pop_vlan) 252 { 253 __be16 vlan_id; 254 255 if (pop_vlan) 256 *pop_vlan = false; 257 vlan_id = htons(vid); 258 if (!vlan_id) { 259 vlan_id = ofdpa_port->internal_vlan_id; 260 if (pop_vlan) 261 *pop_vlan = true; 262 } 263 264 return vlan_id; 265 } 266 267 static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port, 268 __be16 vlan_id) 269 { 270 if (ofdpa_vlan_id_is_internal(vlan_id)) 271 return 0; 272 273 return ntohs(vlan_id); 274 } 275 276 static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port, 277 const char *kind) 278 { 279 return ofdpa_port->bridge_dev && 280 !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind); 281 } 282 283 static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port) 284 { 285 return ofdpa_port_is_slave(ofdpa_port, "bridge"); 286 } 287 288 static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port) 289 { 290 return ofdpa_port_is_slave(ofdpa_port, "openvswitch"); 291 } 292 293 #define OFDPA_OP_FLAG_REMOVE BIT(0) 294 #define OFDPA_OP_FLAG_NOWAIT BIT(1) 295 #define OFDPA_OP_FLAG_LEARNED BIT(2) 296 #define OFDPA_OP_FLAG_REFRESH BIT(3) 297 298 static bool ofdpa_flags_nowait(int flags) 299 { 300 return flags & OFDPA_OP_FLAG_NOWAIT; 301 } 302 303 /************************************************************* 304 * Flow, group, FDB, internal VLAN and neigh command prepares 305 *************************************************************/ 306 307 static int 308 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, 309 const struct ofdpa_flow_tbl_entry *entry) 310 { 311 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, 312 entry->key.ig_port.in_pport)) 313 return -EMSGSIZE; 314 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, 315 entry->key.ig_port.in_pport_mask)) 316 return -EMSGSIZE; 317 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, 318 entry->key.ig_port.goto_tbl)) 319 return -EMSGSIZE; 320 321 return 0; 322 } 323 324 static int 325 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, 326 const struct ofdpa_flow_tbl_entry *entry) 327 { 328 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, 329 entry->key.vlan.in_pport)) 330 return -EMSGSIZE; 331 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, 332 entry->key.vlan.vlan_id)) 333 return -EMSGSIZE; 334 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, 335 entry->key.vlan.vlan_id_mask)) 336 return -EMSGSIZE; 337 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, 338 entry->key.vlan.goto_tbl)) 339 return -EMSGSIZE; 340 if (entry->key.vlan.untagged && 341 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, 342 entry->key.vlan.new_vlan_id)) 343 return -EMSGSIZE; 344 345 return 0; 346 } 347 348 static int 349 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, 350 const struct ofdpa_flow_tbl_entry *entry) 351 { 352 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, 353 entry->key.term_mac.in_pport)) 354 return -EMSGSIZE; 355 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, 356 entry->key.term_mac.in_pport_mask)) 357 return -EMSGSIZE; 358 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, 359 entry->key.term_mac.eth_type)) 360 return -EMSGSIZE; 361 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, 362 ETH_ALEN, entry->key.term_mac.eth_dst)) 363 return -EMSGSIZE; 364 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, 365 ETH_ALEN, entry->key.term_mac.eth_dst_mask)) 366 return -EMSGSIZE; 367 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, 368 entry->key.term_mac.vlan_id)) 369 return -EMSGSIZE; 370 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, 371 entry->key.term_mac.vlan_id_mask)) 372 return -EMSGSIZE; 373 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, 374 entry->key.term_mac.goto_tbl)) 375 return -EMSGSIZE; 376 if (entry->key.term_mac.copy_to_cpu && 377 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, 378 entry->key.term_mac.copy_to_cpu)) 379 return -EMSGSIZE; 380 381 return 0; 382 } 383 384 static int 385 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, 386 const struct ofdpa_flow_tbl_entry *entry) 387 { 388 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, 389 entry->key.ucast_routing.eth_type)) 390 return -EMSGSIZE; 391 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, 392 entry->key.ucast_routing.dst4)) 393 return -EMSGSIZE; 394 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, 395 entry->key.ucast_routing.dst4_mask)) 396 return -EMSGSIZE; 397 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, 398 entry->key.ucast_routing.goto_tbl)) 399 return -EMSGSIZE; 400 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, 401 entry->key.ucast_routing.group_id)) 402 return -EMSGSIZE; 403 404 return 0; 405 } 406 407 static int 408 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, 409 const struct ofdpa_flow_tbl_entry *entry) 410 { 411 if (entry->key.bridge.has_eth_dst && 412 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, 413 ETH_ALEN, entry->key.bridge.eth_dst)) 414 return -EMSGSIZE; 415 if (entry->key.bridge.has_eth_dst_mask && 416 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, 417 ETH_ALEN, entry->key.bridge.eth_dst_mask)) 418 return -EMSGSIZE; 419 if (entry->key.bridge.vlan_id && 420 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, 421 entry->key.bridge.vlan_id)) 422 return -EMSGSIZE; 423 if (entry->key.bridge.tunnel_id && 424 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, 425 entry->key.bridge.tunnel_id)) 426 return -EMSGSIZE; 427 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, 428 entry->key.bridge.goto_tbl)) 429 return -EMSGSIZE; 430 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, 431 entry->key.bridge.group_id)) 432 return -EMSGSIZE; 433 if (entry->key.bridge.copy_to_cpu && 434 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, 435 entry->key.bridge.copy_to_cpu)) 436 return -EMSGSIZE; 437 438 return 0; 439 } 440 441 static int 442 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, 443 const struct ofdpa_flow_tbl_entry *entry) 444 { 445 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, 446 entry->key.acl.in_pport)) 447 return -EMSGSIZE; 448 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, 449 entry->key.acl.in_pport_mask)) 450 return -EMSGSIZE; 451 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, 452 ETH_ALEN, entry->key.acl.eth_src)) 453 return -EMSGSIZE; 454 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, 455 ETH_ALEN, entry->key.acl.eth_src_mask)) 456 return -EMSGSIZE; 457 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, 458 ETH_ALEN, entry->key.acl.eth_dst)) 459 return -EMSGSIZE; 460 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, 461 ETH_ALEN, entry->key.acl.eth_dst_mask)) 462 return -EMSGSIZE; 463 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, 464 entry->key.acl.eth_type)) 465 return -EMSGSIZE; 466 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, 467 entry->key.acl.vlan_id)) 468 return -EMSGSIZE; 469 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, 470 entry->key.acl.vlan_id_mask)) 471 return -EMSGSIZE; 472 473 switch (ntohs(entry->key.acl.eth_type)) { 474 case ETH_P_IP: 475 case ETH_P_IPV6: 476 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, 477 entry->key.acl.ip_proto)) 478 return -EMSGSIZE; 479 if (rocker_tlv_put_u8(desc_info, 480 ROCKER_TLV_OF_DPA_IP_PROTO_MASK, 481 entry->key.acl.ip_proto_mask)) 482 return -EMSGSIZE; 483 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, 484 entry->key.acl.ip_tos & 0x3f)) 485 return -EMSGSIZE; 486 if (rocker_tlv_put_u8(desc_info, 487 ROCKER_TLV_OF_DPA_IP_DSCP_MASK, 488 entry->key.acl.ip_tos_mask & 0x3f)) 489 return -EMSGSIZE; 490 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, 491 (entry->key.acl.ip_tos & 0xc0) >> 6)) 492 return -EMSGSIZE; 493 if (rocker_tlv_put_u8(desc_info, 494 ROCKER_TLV_OF_DPA_IP_ECN_MASK, 495 (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) 496 return -EMSGSIZE; 497 break; 498 } 499 500 if (entry->key.acl.group_id != ROCKER_GROUP_NONE && 501 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, 502 entry->key.acl.group_id)) 503 return -EMSGSIZE; 504 505 return 0; 506 } 507 508 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port, 509 struct rocker_desc_info *desc_info, 510 void *priv) 511 { 512 const struct ofdpa_flow_tbl_entry *entry = priv; 513 struct rocker_tlv *cmd_info; 514 int err = 0; 515 516 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) 517 return -EMSGSIZE; 518 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 519 if (!cmd_info) 520 return -EMSGSIZE; 521 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, 522 entry->key.tbl_id)) 523 return -EMSGSIZE; 524 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, 525 entry->key.priority)) 526 return -EMSGSIZE; 527 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) 528 return -EMSGSIZE; 529 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, 530 entry->cookie)) 531 return -EMSGSIZE; 532 533 switch (entry->key.tbl_id) { 534 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: 535 err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry); 536 break; 537 case ROCKER_OF_DPA_TABLE_ID_VLAN: 538 err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry); 539 break; 540 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: 541 err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry); 542 break; 543 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: 544 err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry); 545 break; 546 case ROCKER_OF_DPA_TABLE_ID_BRIDGING: 547 err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry); 548 break; 549 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: 550 err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry); 551 break; 552 default: 553 err = -ENOTSUPP; 554 break; 555 } 556 557 if (err) 558 return err; 559 560 rocker_tlv_nest_end(desc_info, cmd_info); 561 562 return 0; 563 } 564 565 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port, 566 struct rocker_desc_info *desc_info, 567 void *priv) 568 { 569 const struct ofdpa_flow_tbl_entry *entry = priv; 570 struct rocker_tlv *cmd_info; 571 572 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) 573 return -EMSGSIZE; 574 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 575 if (!cmd_info) 576 return -EMSGSIZE; 577 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, 578 entry->cookie)) 579 return -EMSGSIZE; 580 rocker_tlv_nest_end(desc_info, cmd_info); 581 582 return 0; 583 } 584 585 static int 586 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, 587 struct ofdpa_group_tbl_entry *entry) 588 { 589 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, 590 ROCKER_GROUP_PORT_GET(entry->group_id))) 591 return -EMSGSIZE; 592 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, 593 entry->l2_interface.pop_vlan)) 594 return -EMSGSIZE; 595 596 return 0; 597 } 598 599 static int 600 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, 601 const struct ofdpa_group_tbl_entry *entry) 602 { 603 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, 604 entry->l2_rewrite.group_id)) 605 return -EMSGSIZE; 606 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && 607 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, 608 ETH_ALEN, entry->l2_rewrite.eth_src)) 609 return -EMSGSIZE; 610 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && 611 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, 612 ETH_ALEN, entry->l2_rewrite.eth_dst)) 613 return -EMSGSIZE; 614 if (entry->l2_rewrite.vlan_id && 615 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, 616 entry->l2_rewrite.vlan_id)) 617 return -EMSGSIZE; 618 619 return 0; 620 } 621 622 static int 623 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, 624 const struct ofdpa_group_tbl_entry *entry) 625 { 626 int i; 627 struct rocker_tlv *group_ids; 628 629 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, 630 entry->group_count)) 631 return -EMSGSIZE; 632 633 group_ids = rocker_tlv_nest_start(desc_info, 634 ROCKER_TLV_OF_DPA_GROUP_IDS); 635 if (!group_ids) 636 return -EMSGSIZE; 637 638 for (i = 0; i < entry->group_count; i++) 639 /* Note TLV array is 1-based */ 640 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) 641 return -EMSGSIZE; 642 643 rocker_tlv_nest_end(desc_info, group_ids); 644 645 return 0; 646 } 647 648 static int 649 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, 650 const struct ofdpa_group_tbl_entry *entry) 651 { 652 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && 653 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, 654 ETH_ALEN, entry->l3_unicast.eth_src)) 655 return -EMSGSIZE; 656 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && 657 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, 658 ETH_ALEN, entry->l3_unicast.eth_dst)) 659 return -EMSGSIZE; 660 if (entry->l3_unicast.vlan_id && 661 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, 662 entry->l3_unicast.vlan_id)) 663 return -EMSGSIZE; 664 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, 665 entry->l3_unicast.ttl_check)) 666 return -EMSGSIZE; 667 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, 668 entry->l3_unicast.group_id)) 669 return -EMSGSIZE; 670 671 return 0; 672 } 673 674 static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port, 675 struct rocker_desc_info *desc_info, 676 void *priv) 677 { 678 struct ofdpa_group_tbl_entry *entry = priv; 679 struct rocker_tlv *cmd_info; 680 int err = 0; 681 682 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) 683 return -EMSGSIZE; 684 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 685 if (!cmd_info) 686 return -EMSGSIZE; 687 688 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, 689 entry->group_id)) 690 return -EMSGSIZE; 691 692 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { 693 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: 694 err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry); 695 break; 696 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: 697 err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry); 698 break; 699 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: 700 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: 701 err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry); 702 break; 703 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: 704 err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry); 705 break; 706 default: 707 err = -ENOTSUPP; 708 break; 709 } 710 711 if (err) 712 return err; 713 714 rocker_tlv_nest_end(desc_info, cmd_info); 715 716 return 0; 717 } 718 719 static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port, 720 struct rocker_desc_info *desc_info, 721 void *priv) 722 { 723 const struct ofdpa_group_tbl_entry *entry = priv; 724 struct rocker_tlv *cmd_info; 725 726 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) 727 return -EMSGSIZE; 728 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 729 if (!cmd_info) 730 return -EMSGSIZE; 731 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, 732 entry->group_id)) 733 return -EMSGSIZE; 734 rocker_tlv_nest_end(desc_info, cmd_info); 735 736 return 0; 737 } 738 739 /*************************************************** 740 * Flow, group, FDB, internal VLAN and neigh tables 741 ***************************************************/ 742 743 static struct ofdpa_flow_tbl_entry * 744 ofdpa_flow_tbl_find(const struct ofdpa *ofdpa, 745 const struct ofdpa_flow_tbl_entry *match) 746 { 747 struct ofdpa_flow_tbl_entry *found; 748 size_t key_len = match->key_len ? match->key_len : sizeof(found->key); 749 750 hash_for_each_possible(ofdpa->flow_tbl, found, 751 entry, match->key_crc32) { 752 if (memcmp(&found->key, &match->key, key_len) == 0) 753 return found; 754 } 755 756 return NULL; 757 } 758 759 static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port, 760 int flags, struct ofdpa_flow_tbl_entry *match) 761 { 762 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 763 struct ofdpa_flow_tbl_entry *found; 764 size_t key_len = match->key_len ? match->key_len : sizeof(found->key); 765 unsigned long lock_flags; 766 767 match->key_crc32 = crc32(~0, &match->key, key_len); 768 769 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); 770 771 found = ofdpa_flow_tbl_find(ofdpa, match); 772 773 if (found) { 774 match->cookie = found->cookie; 775 hash_del(&found->entry); 776 kfree(found); 777 found = match; 778 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; 779 } else { 780 found = match; 781 found->cookie = ofdpa->flow_tbl_next_cookie++; 782 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; 783 } 784 785 hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32); 786 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); 787 788 return rocker_cmd_exec(ofdpa_port->rocker_port, 789 ofdpa_flags_nowait(flags), 790 ofdpa_cmd_flow_tbl_add, 791 found, NULL, NULL); 792 return 0; 793 } 794 795 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port, 796 int flags, struct ofdpa_flow_tbl_entry *match) 797 { 798 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 799 struct ofdpa_flow_tbl_entry *found; 800 size_t key_len = match->key_len ? match->key_len : sizeof(found->key); 801 unsigned long lock_flags; 802 int err = 0; 803 804 match->key_crc32 = crc32(~0, &match->key, key_len); 805 806 spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); 807 808 found = ofdpa_flow_tbl_find(ofdpa, match); 809 810 if (found) { 811 hash_del(&found->entry); 812 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; 813 } 814 815 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); 816 817 kfree(match); 818 819 if (found) { 820 err = rocker_cmd_exec(ofdpa_port->rocker_port, 821 ofdpa_flags_nowait(flags), 822 ofdpa_cmd_flow_tbl_del, 823 found, NULL, NULL); 824 kfree(found); 825 } 826 827 return err; 828 } 829 830 static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags, 831 struct ofdpa_flow_tbl_entry *entry) 832 { 833 if (flags & OFDPA_OP_FLAG_REMOVE) 834 return ofdpa_flow_tbl_del(ofdpa_port, flags, entry); 835 else 836 return ofdpa_flow_tbl_add(ofdpa_port, flags, entry); 837 } 838 839 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags, 840 u32 in_pport, u32 in_pport_mask, 841 enum rocker_of_dpa_table_id goto_tbl) 842 { 843 struct ofdpa_flow_tbl_entry *entry; 844 845 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 846 if (!entry) 847 return -ENOMEM; 848 849 entry->key.priority = OFDPA_PRIORITY_IG_PORT; 850 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; 851 entry->key.ig_port.in_pport = in_pport; 852 entry->key.ig_port.in_pport_mask = in_pport_mask; 853 entry->key.ig_port.goto_tbl = goto_tbl; 854 855 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry); 856 } 857 858 static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port, 859 int flags, 860 u32 in_pport, __be16 vlan_id, 861 __be16 vlan_id_mask, 862 enum rocker_of_dpa_table_id goto_tbl, 863 bool untagged, __be16 new_vlan_id) 864 { 865 struct ofdpa_flow_tbl_entry *entry; 866 867 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 868 if (!entry) 869 return -ENOMEM; 870 871 entry->key.priority = OFDPA_PRIORITY_VLAN; 872 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; 873 entry->key.vlan.in_pport = in_pport; 874 entry->key.vlan.vlan_id = vlan_id; 875 entry->key.vlan.vlan_id_mask = vlan_id_mask; 876 entry->key.vlan.goto_tbl = goto_tbl; 877 878 entry->key.vlan.untagged = untagged; 879 entry->key.vlan.new_vlan_id = new_vlan_id; 880 881 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry); 882 } 883 884 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port, 885 u32 in_pport, u32 in_pport_mask, 886 __be16 eth_type, const u8 *eth_dst, 887 const u8 *eth_dst_mask, __be16 vlan_id, 888 __be16 vlan_id_mask, bool copy_to_cpu, 889 int flags) 890 { 891 struct ofdpa_flow_tbl_entry *entry; 892 893 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 894 if (!entry) 895 return -ENOMEM; 896 897 if (is_multicast_ether_addr(eth_dst)) { 898 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST; 899 entry->key.term_mac.goto_tbl = 900 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; 901 } else { 902 entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST; 903 entry->key.term_mac.goto_tbl = 904 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; 905 } 906 907 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; 908 entry->key.term_mac.in_pport = in_pport; 909 entry->key.term_mac.in_pport_mask = in_pport_mask; 910 entry->key.term_mac.eth_type = eth_type; 911 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); 912 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); 913 entry->key.term_mac.vlan_id = vlan_id; 914 entry->key.term_mac.vlan_id_mask = vlan_id_mask; 915 entry->key.term_mac.copy_to_cpu = copy_to_cpu; 916 917 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry); 918 } 919 920 static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port, 921 int flags, const u8 *eth_dst, 922 const u8 *eth_dst_mask, __be16 vlan_id, 923 u32 tunnel_id, 924 enum rocker_of_dpa_table_id goto_tbl, 925 u32 group_id, bool copy_to_cpu) 926 { 927 struct ofdpa_flow_tbl_entry *entry; 928 u32 priority; 929 bool vlan_bridging = !!vlan_id; 930 bool dflt = !eth_dst || (eth_dst && eth_dst_mask); 931 bool wild = false; 932 933 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 934 if (!entry) 935 return -ENOMEM; 936 937 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; 938 939 if (eth_dst) { 940 entry->key.bridge.has_eth_dst = 1; 941 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); 942 } 943 if (eth_dst_mask) { 944 entry->key.bridge.has_eth_dst_mask = 1; 945 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); 946 if (!ether_addr_equal(eth_dst_mask, ff_mac)) 947 wild = true; 948 } 949 950 priority = OFDPA_PRIORITY_UNKNOWN; 951 if (vlan_bridging && dflt && wild) 952 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD; 953 else if (vlan_bridging && dflt && !wild) 954 priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; 955 else if (vlan_bridging && !dflt) 956 priority = OFDPA_PRIORITY_BRIDGING_VLAN; 957 else if (!vlan_bridging && dflt && wild) 958 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD; 959 else if (!vlan_bridging && dflt && !wild) 960 priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; 961 else if (!vlan_bridging && !dflt) 962 priority = OFDPA_PRIORITY_BRIDGING_TENANT; 963 964 entry->key.priority = priority; 965 entry->key.bridge.vlan_id = vlan_id; 966 entry->key.bridge.tunnel_id = tunnel_id; 967 entry->key.bridge.goto_tbl = goto_tbl; 968 entry->key.bridge.group_id = group_id; 969 entry->key.bridge.copy_to_cpu = copy_to_cpu; 970 971 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry); 972 } 973 974 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port, 975 __be16 eth_type, __be32 dst, 976 __be32 dst_mask, u32 priority, 977 enum rocker_of_dpa_table_id goto_tbl, 978 u32 group_id, struct fib_info *fi, 979 int flags) 980 { 981 struct ofdpa_flow_tbl_entry *entry; 982 983 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 984 if (!entry) 985 return -ENOMEM; 986 987 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; 988 entry->key.priority = priority; 989 entry->key.ucast_routing.eth_type = eth_type; 990 entry->key.ucast_routing.dst4 = dst; 991 entry->key.ucast_routing.dst4_mask = dst_mask; 992 entry->key.ucast_routing.goto_tbl = goto_tbl; 993 entry->key.ucast_routing.group_id = group_id; 994 entry->key_len = offsetof(struct ofdpa_flow_tbl_key, 995 ucast_routing.group_id); 996 entry->fi = fi; 997 998 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry); 999 } 1000 1001 static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags, 1002 u32 in_pport, u32 in_pport_mask, 1003 const u8 *eth_src, const u8 *eth_src_mask, 1004 const u8 *eth_dst, const u8 *eth_dst_mask, 1005 __be16 eth_type, __be16 vlan_id, 1006 __be16 vlan_id_mask, u8 ip_proto, 1007 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, 1008 u32 group_id) 1009 { 1010 u32 priority; 1011 struct ofdpa_flow_tbl_entry *entry; 1012 1013 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1014 if (!entry) 1015 return -ENOMEM; 1016 1017 priority = OFDPA_PRIORITY_ACL_NORMAL; 1018 if (eth_dst && eth_dst_mask) { 1019 if (ether_addr_equal(eth_dst_mask, mcast_mac)) 1020 priority = OFDPA_PRIORITY_ACL_DFLT; 1021 else if (is_link_local_ether_addr(eth_dst)) 1022 priority = OFDPA_PRIORITY_ACL_CTRL; 1023 } 1024 1025 entry->key.priority = priority; 1026 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; 1027 entry->key.acl.in_pport = in_pport; 1028 entry->key.acl.in_pport_mask = in_pport_mask; 1029 1030 if (eth_src) 1031 ether_addr_copy(entry->key.acl.eth_src, eth_src); 1032 if (eth_src_mask) 1033 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); 1034 if (eth_dst) 1035 ether_addr_copy(entry->key.acl.eth_dst, eth_dst); 1036 if (eth_dst_mask) 1037 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); 1038 1039 entry->key.acl.eth_type = eth_type; 1040 entry->key.acl.vlan_id = vlan_id; 1041 entry->key.acl.vlan_id_mask = vlan_id_mask; 1042 entry->key.acl.ip_proto = ip_proto; 1043 entry->key.acl.ip_proto_mask = ip_proto_mask; 1044 entry->key.acl.ip_tos = ip_tos; 1045 entry->key.acl.ip_tos_mask = ip_tos_mask; 1046 entry->key.acl.group_id = group_id; 1047 1048 return ofdpa_flow_tbl_do(ofdpa_port, flags, entry); 1049 } 1050 1051 static struct ofdpa_group_tbl_entry * 1052 ofdpa_group_tbl_find(const struct ofdpa *ofdpa, 1053 const struct ofdpa_group_tbl_entry *match) 1054 { 1055 struct ofdpa_group_tbl_entry *found; 1056 1057 hash_for_each_possible(ofdpa->group_tbl, found, 1058 entry, match->group_id) { 1059 if (found->group_id == match->group_id) 1060 return found; 1061 } 1062 1063 return NULL; 1064 } 1065 1066 static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry) 1067 { 1068 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { 1069 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: 1070 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: 1071 kfree(entry->group_ids); 1072 break; 1073 default: 1074 break; 1075 } 1076 kfree(entry); 1077 } 1078 1079 static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags, 1080 struct ofdpa_group_tbl_entry *match) 1081 { 1082 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1083 struct ofdpa_group_tbl_entry *found; 1084 unsigned long lock_flags; 1085 1086 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); 1087 1088 found = ofdpa_group_tbl_find(ofdpa, match); 1089 1090 if (found) { 1091 hash_del(&found->entry); 1092 ofdpa_group_tbl_entry_free(found); 1093 found = match; 1094 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; 1095 } else { 1096 found = match; 1097 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; 1098 } 1099 1100 hash_add(ofdpa->group_tbl, &found->entry, found->group_id); 1101 1102 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); 1103 1104 return rocker_cmd_exec(ofdpa_port->rocker_port, 1105 ofdpa_flags_nowait(flags), 1106 ofdpa_cmd_group_tbl_add, 1107 found, NULL, NULL); 1108 } 1109 1110 static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags, 1111 struct ofdpa_group_tbl_entry *match) 1112 { 1113 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1114 struct ofdpa_group_tbl_entry *found; 1115 unsigned long lock_flags; 1116 int err = 0; 1117 1118 spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); 1119 1120 found = ofdpa_group_tbl_find(ofdpa, match); 1121 1122 if (found) { 1123 hash_del(&found->entry); 1124 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; 1125 } 1126 1127 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); 1128 1129 ofdpa_group_tbl_entry_free(match); 1130 1131 if (found) { 1132 err = rocker_cmd_exec(ofdpa_port->rocker_port, 1133 ofdpa_flags_nowait(flags), 1134 ofdpa_cmd_group_tbl_del, 1135 found, NULL, NULL); 1136 ofdpa_group_tbl_entry_free(found); 1137 } 1138 1139 return err; 1140 } 1141 1142 static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags, 1143 struct ofdpa_group_tbl_entry *entry) 1144 { 1145 if (flags & OFDPA_OP_FLAG_REMOVE) 1146 return ofdpa_group_tbl_del(ofdpa_port, flags, entry); 1147 else 1148 return ofdpa_group_tbl_add(ofdpa_port, flags, entry); 1149 } 1150 1151 static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port, 1152 int flags, __be16 vlan_id, 1153 u32 out_pport, int pop_vlan) 1154 { 1155 struct ofdpa_group_tbl_entry *entry; 1156 1157 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1158 if (!entry) 1159 return -ENOMEM; 1160 1161 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); 1162 entry->l2_interface.pop_vlan = pop_vlan; 1163 1164 return ofdpa_group_tbl_do(ofdpa_port, flags, entry); 1165 } 1166 1167 static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port, 1168 int flags, u8 group_count, 1169 const u32 *group_ids, u32 group_id) 1170 { 1171 struct ofdpa_group_tbl_entry *entry; 1172 1173 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1174 if (!entry) 1175 return -ENOMEM; 1176 1177 entry->group_id = group_id; 1178 entry->group_count = group_count; 1179 1180 entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL); 1181 if (!entry->group_ids) { 1182 kfree(entry); 1183 return -ENOMEM; 1184 } 1185 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); 1186 1187 return ofdpa_group_tbl_do(ofdpa_port, flags, entry); 1188 } 1189 1190 static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port, 1191 int flags, __be16 vlan_id, 1192 u8 group_count, const u32 *group_ids, 1193 u32 group_id) 1194 { 1195 return ofdpa_group_l2_fan_out(ofdpa_port, flags, 1196 group_count, group_ids, 1197 group_id); 1198 } 1199 1200 static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags, 1201 u32 index, const u8 *src_mac, const u8 *dst_mac, 1202 __be16 vlan_id, bool ttl_check, u32 pport) 1203 { 1204 struct ofdpa_group_tbl_entry *entry; 1205 1206 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1207 if (!entry) 1208 return -ENOMEM; 1209 1210 entry->group_id = ROCKER_GROUP_L3_UNICAST(index); 1211 if (src_mac) 1212 ether_addr_copy(entry->l3_unicast.eth_src, src_mac); 1213 if (dst_mac) 1214 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); 1215 entry->l3_unicast.vlan_id = vlan_id; 1216 entry->l3_unicast.ttl_check = ttl_check; 1217 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); 1218 1219 return ofdpa_group_tbl_do(ofdpa_port, flags, entry); 1220 } 1221 1222 static struct ofdpa_neigh_tbl_entry * 1223 ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr) 1224 { 1225 struct ofdpa_neigh_tbl_entry *found; 1226 1227 hash_for_each_possible(ofdpa->neigh_tbl, found, 1228 entry, be32_to_cpu(ip_addr)) 1229 if (found->ip_addr == ip_addr) 1230 return found; 1231 1232 return NULL; 1233 } 1234 1235 static void ofdpa_neigh_add(struct ofdpa *ofdpa, 1236 struct ofdpa_neigh_tbl_entry *entry) 1237 { 1238 entry->index = ofdpa->neigh_tbl_next_index++; 1239 entry->ref_count++; 1240 hash_add(ofdpa->neigh_tbl, &entry->entry, 1241 be32_to_cpu(entry->ip_addr)); 1242 } 1243 1244 static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry) 1245 { 1246 if (--entry->ref_count == 0) { 1247 hash_del(&entry->entry); 1248 kfree(entry); 1249 } 1250 } 1251 1252 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry, 1253 const u8 *eth_dst, bool ttl_check) 1254 { 1255 if (eth_dst) { 1256 ether_addr_copy(entry->eth_dst, eth_dst); 1257 entry->ttl_check = ttl_check; 1258 } else { 1259 entry->ref_count++; 1260 } 1261 } 1262 1263 static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port, 1264 int flags, __be32 ip_addr, const u8 *eth_dst) 1265 { 1266 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1267 struct ofdpa_neigh_tbl_entry *entry; 1268 struct ofdpa_neigh_tbl_entry *found; 1269 unsigned long lock_flags; 1270 __be16 eth_type = htons(ETH_P_IP); 1271 enum rocker_of_dpa_table_id goto_tbl = 1272 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; 1273 u32 group_id; 1274 u32 priority = 0; 1275 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); 1276 bool updating; 1277 bool removing; 1278 int err = 0; 1279 1280 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1281 if (!entry) 1282 return -ENOMEM; 1283 1284 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); 1285 1286 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); 1287 1288 updating = found && adding; 1289 removing = found && !adding; 1290 adding = !found && adding; 1291 1292 if (adding) { 1293 entry->ip_addr = ip_addr; 1294 entry->dev = ofdpa_port->dev; 1295 ether_addr_copy(entry->eth_dst, eth_dst); 1296 entry->ttl_check = true; 1297 ofdpa_neigh_add(ofdpa, entry); 1298 } else if (removing) { 1299 memcpy(entry, found, sizeof(*entry)); 1300 ofdpa_neigh_del(found); 1301 } else if (updating) { 1302 ofdpa_neigh_update(found, eth_dst, true); 1303 memcpy(entry, found, sizeof(*entry)); 1304 } else { 1305 err = -ENOENT; 1306 } 1307 1308 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); 1309 1310 if (err) 1311 goto err_out; 1312 1313 /* For each active neighbor, we have an L3 unicast group and 1314 * a /32 route to the neighbor, which uses the L3 unicast 1315 * group. The L3 unicast group can also be referred to by 1316 * other routes' nexthops. 1317 */ 1318 1319 err = ofdpa_group_l3_unicast(ofdpa_port, flags, 1320 entry->index, 1321 ofdpa_port->dev->dev_addr, 1322 entry->eth_dst, 1323 ofdpa_port->internal_vlan_id, 1324 entry->ttl_check, 1325 ofdpa_port->pport); 1326 if (err) { 1327 netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n", 1328 err, entry->index); 1329 goto err_out; 1330 } 1331 1332 if (adding || removing) { 1333 group_id = ROCKER_GROUP_L3_UNICAST(entry->index); 1334 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, 1335 eth_type, ip_addr, 1336 inet_make_mask(32), 1337 priority, goto_tbl, 1338 group_id, NULL, flags); 1339 1340 if (err) 1341 netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n", 1342 err, &entry->ip_addr, group_id); 1343 } 1344 1345 err_out: 1346 if (!adding) 1347 kfree(entry); 1348 1349 return err; 1350 } 1351 1352 static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port, 1353 __be32 ip_addr) 1354 { 1355 struct net_device *dev = ofdpa_port->dev; 1356 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); 1357 int err = 0; 1358 1359 if (!n) { 1360 n = neigh_create(&arp_tbl, &ip_addr, dev); 1361 if (IS_ERR(n)) 1362 return PTR_ERR(n); 1363 } 1364 1365 /* If the neigh is already resolved, then go ahead and 1366 * install the entry, otherwise start the ARP process to 1367 * resolve the neigh. 1368 */ 1369 1370 if (n->nud_state & NUD_VALID) 1371 err = ofdpa_port_ipv4_neigh(ofdpa_port, 0, 1372 ip_addr, n->ha); 1373 else 1374 neigh_event_send(n, NULL); 1375 1376 neigh_release(n); 1377 return err; 1378 } 1379 1380 static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, 1381 int flags, __be32 ip_addr, u32 *index) 1382 { 1383 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1384 struct ofdpa_neigh_tbl_entry *entry; 1385 struct ofdpa_neigh_tbl_entry *found; 1386 unsigned long lock_flags; 1387 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); 1388 bool updating; 1389 bool removing; 1390 bool resolved = true; 1391 int err = 0; 1392 1393 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1394 if (!entry) 1395 return -ENOMEM; 1396 1397 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); 1398 1399 found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); 1400 1401 updating = found && adding; 1402 removing = found && !adding; 1403 adding = !found && adding; 1404 1405 if (adding) { 1406 entry->ip_addr = ip_addr; 1407 entry->dev = ofdpa_port->dev; 1408 ofdpa_neigh_add(ofdpa, entry); 1409 *index = entry->index; 1410 resolved = false; 1411 } else if (removing) { 1412 *index = found->index; 1413 ofdpa_neigh_del(found); 1414 } else if (updating) { 1415 ofdpa_neigh_update(found, NULL, false); 1416 resolved = !is_zero_ether_addr(found->eth_dst); 1417 *index = found->index; 1418 } else { 1419 err = -ENOENT; 1420 } 1421 1422 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); 1423 1424 if (!adding) 1425 kfree(entry); 1426 1427 if (err) 1428 return err; 1429 1430 /* Resolved means neigh ip_addr is resolved to neigh mac. */ 1431 1432 if (!resolved) 1433 err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr); 1434 1435 return err; 1436 } 1437 1438 static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa, 1439 int port_index) 1440 { 1441 struct rocker_port *rocker_port; 1442 1443 rocker_port = ofdpa->rocker->ports[port_index]; 1444 return rocker_port ? rocker_port->wpriv : NULL; 1445 } 1446 1447 static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port, 1448 int flags, __be16 vlan_id) 1449 { 1450 struct ofdpa_port *p; 1451 const struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1452 unsigned int port_count = ofdpa->rocker->port_count; 1453 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); 1454 u32 *group_ids; 1455 u8 group_count = 0; 1456 int err = 0; 1457 int i; 1458 1459 group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL); 1460 if (!group_ids) 1461 return -ENOMEM; 1462 1463 /* Adjust the flood group for this VLAN. The flood group 1464 * references an L2 interface group for each port in this 1465 * VLAN. 1466 */ 1467 1468 for (i = 0; i < port_count; i++) { 1469 p = ofdpa_port_get(ofdpa, i); 1470 if (!p) 1471 continue; 1472 if (!ofdpa_port_is_bridged(p)) 1473 continue; 1474 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { 1475 group_ids[group_count++] = 1476 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); 1477 } 1478 } 1479 1480 /* If there are no bridged ports in this VLAN, we're done */ 1481 if (group_count == 0) 1482 goto no_ports_in_vlan; 1483 1484 err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id, 1485 group_count, group_ids, group_id); 1486 if (err) 1487 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); 1488 1489 no_ports_in_vlan: 1490 kfree(group_ids); 1491 return err; 1492 } 1493 1494 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags, 1495 __be16 vlan_id, bool pop_vlan) 1496 { 1497 const struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1498 unsigned int port_count = ofdpa->rocker->port_count; 1499 struct ofdpa_port *p; 1500 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); 1501 u32 out_pport; 1502 int ref = 0; 1503 int err; 1504 int i; 1505 1506 /* An L2 interface group for this port in this VLAN, but 1507 * only when port STP state is LEARNING|FORWARDING. 1508 */ 1509 1510 if (ofdpa_port->stp_state == BR_STATE_LEARNING || 1511 ofdpa_port->stp_state == BR_STATE_FORWARDING) { 1512 out_pport = ofdpa_port->pport; 1513 err = ofdpa_group_l2_interface(ofdpa_port, flags, 1514 vlan_id, out_pport, pop_vlan); 1515 if (err) { 1516 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", 1517 err, out_pport); 1518 return err; 1519 } 1520 } 1521 1522 /* An L2 interface group for this VLAN to CPU port. 1523 * Add when first port joins this VLAN and destroy when 1524 * last port leaves this VLAN. 1525 */ 1526 1527 for (i = 0; i < port_count; i++) { 1528 p = ofdpa_port_get(ofdpa, i); 1529 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) 1530 ref++; 1531 } 1532 1533 if ((!adding || ref != 1) && (adding || ref != 0)) 1534 return 0; 1535 1536 out_pport = 0; 1537 err = ofdpa_group_l2_interface(ofdpa_port, flags, 1538 vlan_id, out_pport, pop_vlan); 1539 if (err) { 1540 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err); 1541 return err; 1542 } 1543 1544 return 0; 1545 } 1546 1547 static struct ofdpa_ctrl { 1548 const u8 *eth_dst; 1549 const u8 *eth_dst_mask; 1550 __be16 eth_type; 1551 bool acl; 1552 bool bridge; 1553 bool term; 1554 bool copy_to_cpu; 1555 } ofdpa_ctrls[] = { 1556 [OFDPA_CTRL_LINK_LOCAL_MCAST] = { 1557 /* pass link local multicast pkts up to CPU for filtering */ 1558 .eth_dst = ll_mac, 1559 .eth_dst_mask = ll_mask, 1560 .acl = true, 1561 }, 1562 [OFDPA_CTRL_LOCAL_ARP] = { 1563 /* pass local ARP pkts up to CPU */ 1564 .eth_dst = zero_mac, 1565 .eth_dst_mask = zero_mac, 1566 .eth_type = htons(ETH_P_ARP), 1567 .acl = true, 1568 }, 1569 [OFDPA_CTRL_IPV4_MCAST] = { 1570 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ 1571 .eth_dst = ipv4_mcast, 1572 .eth_dst_mask = ipv4_mask, 1573 .eth_type = htons(ETH_P_IP), 1574 .term = true, 1575 .copy_to_cpu = true, 1576 }, 1577 [OFDPA_CTRL_IPV6_MCAST] = { 1578 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ 1579 .eth_dst = ipv6_mcast, 1580 .eth_dst_mask = ipv6_mask, 1581 .eth_type = htons(ETH_P_IPV6), 1582 .term = true, 1583 .copy_to_cpu = true, 1584 }, 1585 [OFDPA_CTRL_DFLT_BRIDGING] = { 1586 /* flood any pkts on vlan */ 1587 .bridge = true, 1588 .copy_to_cpu = true, 1589 }, 1590 [OFDPA_CTRL_DFLT_OVS] = { 1591 /* pass all pkts up to CPU */ 1592 .eth_dst = zero_mac, 1593 .eth_dst_mask = zero_mac, 1594 .acl = true, 1595 }, 1596 }; 1597 1598 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags, 1599 const struct ofdpa_ctrl *ctrl, __be16 vlan_id) 1600 { 1601 u32 in_pport = ofdpa_port->pport; 1602 u32 in_pport_mask = 0xffffffff; 1603 u32 out_pport = 0; 1604 const u8 *eth_src = NULL; 1605 const u8 *eth_src_mask = NULL; 1606 __be16 vlan_id_mask = htons(0xffff); 1607 u8 ip_proto = 0; 1608 u8 ip_proto_mask = 0; 1609 u8 ip_tos = 0; 1610 u8 ip_tos_mask = 0; 1611 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); 1612 int err; 1613 1614 err = ofdpa_flow_tbl_acl(ofdpa_port, flags, 1615 in_pport, in_pport_mask, 1616 eth_src, eth_src_mask, 1617 ctrl->eth_dst, ctrl->eth_dst_mask, 1618 ctrl->eth_type, 1619 vlan_id, vlan_id_mask, 1620 ip_proto, ip_proto_mask, 1621 ip_tos, ip_tos_mask, 1622 group_id); 1623 1624 if (err) 1625 netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err); 1626 1627 return err; 1628 } 1629 1630 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port, 1631 int flags, const struct ofdpa_ctrl *ctrl, 1632 __be16 vlan_id) 1633 { 1634 enum rocker_of_dpa_table_id goto_tbl = 1635 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; 1636 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); 1637 u32 tunnel_id = 0; 1638 int err; 1639 1640 if (!ofdpa_port_is_bridged(ofdpa_port)) 1641 return 0; 1642 1643 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, 1644 ctrl->eth_dst, ctrl->eth_dst_mask, 1645 vlan_id, tunnel_id, 1646 goto_tbl, group_id, ctrl->copy_to_cpu); 1647 1648 if (err) 1649 netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err); 1650 1651 return err; 1652 } 1653 1654 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags, 1655 const struct ofdpa_ctrl *ctrl, __be16 vlan_id) 1656 { 1657 u32 in_pport_mask = 0xffffffff; 1658 __be16 vlan_id_mask = htons(0xffff); 1659 int err; 1660 1661 if (ntohs(vlan_id) == 0) 1662 vlan_id = ofdpa_port->internal_vlan_id; 1663 1664 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask, 1665 ctrl->eth_type, ctrl->eth_dst, 1666 ctrl->eth_dst_mask, vlan_id, 1667 vlan_id_mask, ctrl->copy_to_cpu, 1668 flags); 1669 1670 if (err) 1671 netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err); 1672 1673 return err; 1674 } 1675 1676 static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags, 1677 const struct ofdpa_ctrl *ctrl, __be16 vlan_id) 1678 { 1679 if (ctrl->acl) 1680 return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags, 1681 ctrl, vlan_id); 1682 if (ctrl->bridge) 1683 return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags, 1684 ctrl, vlan_id); 1685 1686 if (ctrl->term) 1687 return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags, 1688 ctrl, vlan_id); 1689 1690 return -EOPNOTSUPP; 1691 } 1692 1693 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags, 1694 __be16 vlan_id) 1695 { 1696 int err = 0; 1697 int i; 1698 1699 for (i = 0; i < OFDPA_CTRL_MAX; i++) { 1700 if (ofdpa_port->ctrls[i]) { 1701 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags, 1702 &ofdpa_ctrls[i], vlan_id); 1703 if (err) 1704 return err; 1705 } 1706 } 1707 1708 return err; 1709 } 1710 1711 static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags, 1712 const struct ofdpa_ctrl *ctrl) 1713 { 1714 u16 vid; 1715 int err = 0; 1716 1717 for (vid = 1; vid < VLAN_N_VID; vid++) { 1718 if (!test_bit(vid, ofdpa_port->vlan_bitmap)) 1719 continue; 1720 err = ofdpa_port_ctrl_vlan(ofdpa_port, flags, 1721 ctrl, htons(vid)); 1722 if (err) 1723 break; 1724 } 1725 1726 return err; 1727 } 1728 1729 static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags, 1730 u16 vid) 1731 { 1732 enum rocker_of_dpa_table_id goto_tbl = 1733 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; 1734 u32 in_pport = ofdpa_port->pport; 1735 __be16 vlan_id = htons(vid); 1736 __be16 vlan_id_mask = htons(0xffff); 1737 __be16 internal_vlan_id; 1738 bool untagged; 1739 bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); 1740 int err; 1741 1742 internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged); 1743 1744 if (adding && 1745 test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) 1746 return 0; /* already added */ 1747 else if (!adding && 1748 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) 1749 return 0; /* already removed */ 1750 1751 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); 1752 1753 if (adding) { 1754 err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags, 1755 internal_vlan_id); 1756 if (err) { 1757 netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err); 1758 goto err_vlan_add; 1759 } 1760 } 1761 1762 err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags, 1763 internal_vlan_id, untagged); 1764 if (err) { 1765 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err); 1766 goto err_vlan_l2_groups; 1767 } 1768 1769 err = ofdpa_port_vlan_flood_group(ofdpa_port, flags, 1770 internal_vlan_id); 1771 if (err) { 1772 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); 1773 goto err_flood_group; 1774 } 1775 1776 err = ofdpa_flow_tbl_vlan(ofdpa_port, flags, 1777 in_pport, vlan_id, vlan_id_mask, 1778 goto_tbl, untagged, internal_vlan_id); 1779 if (err) 1780 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err); 1781 1782 return 0; 1783 1784 err_vlan_add: 1785 err_vlan_l2_groups: 1786 err_flood_group: 1787 change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); 1788 return err; 1789 } 1790 1791 static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags) 1792 { 1793 enum rocker_of_dpa_table_id goto_tbl; 1794 u32 in_pport; 1795 u32 in_pport_mask; 1796 int err; 1797 1798 /* Normal Ethernet Frames. Matches pkts from any local physical 1799 * ports. Goto VLAN tbl. 1800 */ 1801 1802 in_pport = 0; 1803 in_pport_mask = 0xffff0000; 1804 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; 1805 1806 err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags, 1807 in_pport, in_pport_mask, 1808 goto_tbl); 1809 if (err) 1810 netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err); 1811 1812 return err; 1813 } 1814 1815 struct ofdpa_fdb_learn_work { 1816 struct work_struct work; 1817 struct ofdpa_port *ofdpa_port; 1818 int flags; 1819 u8 addr[ETH_ALEN]; 1820 u16 vid; 1821 }; 1822 1823 static void ofdpa_port_fdb_learn_work(struct work_struct *work) 1824 { 1825 const struct ofdpa_fdb_learn_work *lw = 1826 container_of(work, struct ofdpa_fdb_learn_work, work); 1827 bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE); 1828 bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED); 1829 struct switchdev_notifier_fdb_info info; 1830 1831 info.addr = lw->addr; 1832 info.vid = lw->vid; 1833 1834 rtnl_lock(); 1835 if (learned && removing) 1836 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, 1837 lw->ofdpa_port->dev, &info.info); 1838 else if (learned && !removing) 1839 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, 1840 lw->ofdpa_port->dev, &info.info); 1841 rtnl_unlock(); 1842 1843 kfree(work); 1844 } 1845 1846 static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port, 1847 int flags, const u8 *addr, __be16 vlan_id) 1848 { 1849 struct ofdpa_fdb_learn_work *lw; 1850 enum rocker_of_dpa_table_id goto_tbl = 1851 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; 1852 u32 out_pport = ofdpa_port->pport; 1853 u32 tunnel_id = 0; 1854 u32 group_id = ROCKER_GROUP_NONE; 1855 bool copy_to_cpu = false; 1856 int err; 1857 1858 if (ofdpa_port_is_bridged(ofdpa_port)) 1859 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); 1860 1861 if (!(flags & OFDPA_OP_FLAG_REFRESH)) { 1862 err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr, 1863 NULL, vlan_id, tunnel_id, goto_tbl, 1864 group_id, copy_to_cpu); 1865 if (err) 1866 return err; 1867 } 1868 1869 if (!ofdpa_port_is_bridged(ofdpa_port)) 1870 return 0; 1871 1872 lw = kzalloc(sizeof(*lw), GFP_ATOMIC); 1873 if (!lw) 1874 return -ENOMEM; 1875 1876 INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work); 1877 1878 lw->ofdpa_port = ofdpa_port; 1879 lw->flags = flags; 1880 ether_addr_copy(lw->addr, addr); 1881 lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id); 1882 1883 schedule_work(&lw->work); 1884 return 0; 1885 } 1886 1887 static struct ofdpa_fdb_tbl_entry * 1888 ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa, 1889 const struct ofdpa_fdb_tbl_entry *match) 1890 { 1891 struct ofdpa_fdb_tbl_entry *found; 1892 1893 hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32) 1894 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) 1895 return found; 1896 1897 return NULL; 1898 } 1899 1900 static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port, 1901 const unsigned char *addr, 1902 __be16 vlan_id, int flags) 1903 { 1904 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1905 struct ofdpa_fdb_tbl_entry *fdb; 1906 struct ofdpa_fdb_tbl_entry *found; 1907 bool removing = (flags & OFDPA_OP_FLAG_REMOVE); 1908 unsigned long lock_flags; 1909 1910 fdb = kzalloc(sizeof(*fdb), GFP_KERNEL); 1911 if (!fdb) 1912 return -ENOMEM; 1913 1914 fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED); 1915 fdb->touched = jiffies; 1916 fdb->key.ofdpa_port = ofdpa_port; 1917 ether_addr_copy(fdb->key.addr, addr); 1918 fdb->key.vlan_id = vlan_id; 1919 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); 1920 1921 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); 1922 1923 found = ofdpa_fdb_tbl_find(ofdpa, fdb); 1924 1925 if (found) { 1926 found->touched = jiffies; 1927 if (removing) { 1928 kfree(fdb); 1929 hash_del(&found->entry); 1930 } 1931 } else if (!removing) { 1932 hash_add(ofdpa->fdb_tbl, &fdb->entry, 1933 fdb->key_crc32); 1934 } 1935 1936 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); 1937 1938 /* Check if adding and already exists, or removing and can't find */ 1939 if (!found != !removing) { 1940 kfree(fdb); 1941 if (!found && removing) 1942 return 0; 1943 /* Refreshing existing to update aging timers */ 1944 flags |= OFDPA_OP_FLAG_REFRESH; 1945 } 1946 1947 return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id); 1948 } 1949 1950 static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags) 1951 { 1952 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 1953 struct ofdpa_fdb_tbl_entry *found; 1954 unsigned long lock_flags; 1955 struct hlist_node *tmp; 1956 int bkt; 1957 int err = 0; 1958 1959 if (ofdpa_port->stp_state == BR_STATE_LEARNING || 1960 ofdpa_port->stp_state == BR_STATE_FORWARDING) 1961 return 0; 1962 1963 flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE; 1964 1965 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); 1966 1967 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { 1968 if (found->key.ofdpa_port != ofdpa_port) 1969 continue; 1970 if (!found->learned) 1971 continue; 1972 err = ofdpa_port_fdb_learn(ofdpa_port, flags, 1973 found->key.addr, 1974 found->key.vlan_id); 1975 if (err) 1976 goto err_out; 1977 hash_del(&found->entry); 1978 } 1979 1980 err_out: 1981 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); 1982 1983 return err; 1984 } 1985 1986 static void ofdpa_fdb_cleanup(unsigned long data) 1987 { 1988 struct ofdpa *ofdpa = (struct ofdpa *)data; 1989 struct ofdpa_port *ofdpa_port; 1990 struct ofdpa_fdb_tbl_entry *entry; 1991 struct hlist_node *tmp; 1992 unsigned long next_timer = jiffies + ofdpa->ageing_time; 1993 unsigned long expires; 1994 unsigned long lock_flags; 1995 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE | 1996 OFDPA_OP_FLAG_LEARNED; 1997 int bkt; 1998 1999 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); 2000 2001 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) { 2002 if (!entry->learned) 2003 continue; 2004 ofdpa_port = entry->key.ofdpa_port; 2005 expires = entry->touched + ofdpa_port->ageing_time; 2006 if (time_before_eq(expires, jiffies)) { 2007 ofdpa_port_fdb_learn(ofdpa_port, flags, 2008 entry->key.addr, 2009 entry->key.vlan_id); 2010 hash_del(&entry->entry); 2011 } else if (time_before(expires, next_timer)) { 2012 next_timer = expires; 2013 } 2014 } 2015 2016 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); 2017 2018 mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer)); 2019 } 2020 2021 static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port, 2022 int flags, __be16 vlan_id) 2023 { 2024 u32 in_pport_mask = 0xffffffff; 2025 __be16 eth_type; 2026 const u8 *dst_mac_mask = ff_mac; 2027 __be16 vlan_id_mask = htons(0xffff); 2028 bool copy_to_cpu = false; 2029 int err; 2030 2031 if (ntohs(vlan_id) == 0) 2032 vlan_id = ofdpa_port->internal_vlan_id; 2033 2034 eth_type = htons(ETH_P_IP); 2035 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, 2036 in_pport_mask, eth_type, 2037 ofdpa_port->dev->dev_addr, 2038 dst_mac_mask, vlan_id, vlan_id_mask, 2039 copy_to_cpu, flags); 2040 if (err) 2041 return err; 2042 2043 eth_type = htons(ETH_P_IPV6); 2044 err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, 2045 in_pport_mask, eth_type, 2046 ofdpa_port->dev->dev_addr, 2047 dst_mac_mask, vlan_id, vlan_id_mask, 2048 copy_to_cpu, flags); 2049 2050 return err; 2051 } 2052 2053 static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags) 2054 { 2055 bool pop_vlan; 2056 u32 out_pport; 2057 __be16 vlan_id; 2058 u16 vid; 2059 int err; 2060 2061 /* Port will be forwarding-enabled if its STP state is LEARNING 2062 * or FORWARDING. Traffic from CPU can still egress, regardless of 2063 * port STP state. Use L2 interface group on port VLANs as a way 2064 * to toggle port forwarding: if forwarding is disabled, L2 2065 * interface group will not exist. 2066 */ 2067 2068 if (ofdpa_port->stp_state != BR_STATE_LEARNING && 2069 ofdpa_port->stp_state != BR_STATE_FORWARDING) 2070 flags |= OFDPA_OP_FLAG_REMOVE; 2071 2072 out_pport = ofdpa_port->pport; 2073 for (vid = 1; vid < VLAN_N_VID; vid++) { 2074 if (!test_bit(vid, ofdpa_port->vlan_bitmap)) 2075 continue; 2076 vlan_id = htons(vid); 2077 pop_vlan = ofdpa_vlan_id_is_internal(vlan_id); 2078 err = ofdpa_group_l2_interface(ofdpa_port, flags, 2079 vlan_id, out_pport, pop_vlan); 2080 if (err) { 2081 netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", 2082 err, out_pport); 2083 return err; 2084 } 2085 } 2086 2087 return 0; 2088 } 2089 2090 static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port, 2091 int flags, u8 state) 2092 { 2093 bool want[OFDPA_CTRL_MAX] = { 0, }; 2094 bool prev_ctrls[OFDPA_CTRL_MAX]; 2095 u8 prev_state; 2096 int err; 2097 int i; 2098 2099 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); 2100 prev_state = ofdpa_port->stp_state; 2101 2102 if (ofdpa_port->stp_state == state) 2103 return 0; 2104 2105 ofdpa_port->stp_state = state; 2106 2107 switch (state) { 2108 case BR_STATE_DISABLED: 2109 /* port is completely disabled */ 2110 break; 2111 case BR_STATE_LISTENING: 2112 case BR_STATE_BLOCKING: 2113 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; 2114 break; 2115 case BR_STATE_LEARNING: 2116 case BR_STATE_FORWARDING: 2117 if (!ofdpa_port_is_ovsed(ofdpa_port)) 2118 want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; 2119 want[OFDPA_CTRL_IPV4_MCAST] = true; 2120 want[OFDPA_CTRL_IPV6_MCAST] = true; 2121 if (ofdpa_port_is_bridged(ofdpa_port)) 2122 want[OFDPA_CTRL_DFLT_BRIDGING] = true; 2123 else if (ofdpa_port_is_ovsed(ofdpa_port)) 2124 want[OFDPA_CTRL_DFLT_OVS] = true; 2125 else 2126 want[OFDPA_CTRL_LOCAL_ARP] = true; 2127 break; 2128 } 2129 2130 for (i = 0; i < OFDPA_CTRL_MAX; i++) { 2131 if (want[i] != ofdpa_port->ctrls[i]) { 2132 int ctrl_flags = flags | 2133 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE); 2134 err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags, 2135 &ofdpa_ctrls[i]); 2136 if (err) 2137 goto err_port_ctrl; 2138 ofdpa_port->ctrls[i] = want[i]; 2139 } 2140 } 2141 2142 err = ofdpa_port_fdb_flush(ofdpa_port, flags); 2143 if (err) 2144 goto err_fdb_flush; 2145 2146 err = ofdpa_port_fwding(ofdpa_port, flags); 2147 if (err) 2148 goto err_port_fwding; 2149 2150 return 0; 2151 2152 err_port_ctrl: 2153 err_fdb_flush: 2154 err_port_fwding: 2155 memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); 2156 ofdpa_port->stp_state = prev_state; 2157 return err; 2158 } 2159 2160 static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags) 2161 { 2162 if (ofdpa_port_is_bridged(ofdpa_port)) 2163 /* bridge STP will enable port */ 2164 return 0; 2165 2166 /* port is not bridged, so simulate going to FORWARDING state */ 2167 return ofdpa_port_stp_update(ofdpa_port, flags, 2168 BR_STATE_FORWARDING); 2169 } 2170 2171 static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags) 2172 { 2173 if (ofdpa_port_is_bridged(ofdpa_port)) 2174 /* bridge STP will disable port */ 2175 return 0; 2176 2177 /* port is not bridged, so simulate going to DISABLED state */ 2178 return ofdpa_port_stp_update(ofdpa_port, flags, 2179 BR_STATE_DISABLED); 2180 } 2181 2182 static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port, 2183 u16 vid, u16 flags) 2184 { 2185 int err; 2186 2187 /* XXX deal with flags for PVID and untagged */ 2188 2189 err = ofdpa_port_vlan(ofdpa_port, 0, vid); 2190 if (err) 2191 return err; 2192 2193 err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid)); 2194 if (err) 2195 ofdpa_port_vlan(ofdpa_port, 2196 OFDPA_OP_FLAG_REMOVE, vid); 2197 2198 return err; 2199 } 2200 2201 static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port, 2202 u16 vid, u16 flags) 2203 { 2204 int err; 2205 2206 err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE, 2207 htons(vid)); 2208 if (err) 2209 return err; 2210 2211 return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE, 2212 vid); 2213 } 2214 2215 static struct ofdpa_internal_vlan_tbl_entry * 2216 ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex) 2217 { 2218 struct ofdpa_internal_vlan_tbl_entry *found; 2219 2220 hash_for_each_possible(ofdpa->internal_vlan_tbl, found, 2221 entry, ifindex) { 2222 if (found->ifindex == ifindex) 2223 return found; 2224 } 2225 2226 return NULL; 2227 } 2228 2229 static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port, 2230 int ifindex) 2231 { 2232 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 2233 struct ofdpa_internal_vlan_tbl_entry *entry; 2234 struct ofdpa_internal_vlan_tbl_entry *found; 2235 unsigned long lock_flags; 2236 int i; 2237 2238 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2239 if (!entry) 2240 return 0; 2241 2242 entry->ifindex = ifindex; 2243 2244 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); 2245 2246 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); 2247 if (found) { 2248 kfree(entry); 2249 goto found; 2250 } 2251 2252 found = entry; 2253 hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex); 2254 2255 for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) { 2256 if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap)) 2257 continue; 2258 found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i); 2259 goto found; 2260 } 2261 2262 netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n"); 2263 2264 found: 2265 found->ref_count++; 2266 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); 2267 2268 return found->vlan_id; 2269 } 2270 2271 static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst, 2272 int dst_len, struct fib_info *fi, u32 tb_id, 2273 int flags) 2274 { 2275 const struct fib_nh *nh; 2276 __be16 eth_type = htons(ETH_P_IP); 2277 __be32 dst_mask = inet_make_mask(dst_len); 2278 __be16 internal_vlan_id = ofdpa_port->internal_vlan_id; 2279 u32 priority = fi->fib_priority; 2280 enum rocker_of_dpa_table_id goto_tbl = 2281 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; 2282 u32 group_id; 2283 bool nh_on_port; 2284 bool has_gw; 2285 u32 index; 2286 int err; 2287 2288 /* XXX support ECMP */ 2289 2290 nh = fi->fib_nh; 2291 nh_on_port = (fi->fib_dev == ofdpa_port->dev); 2292 has_gw = !!nh->nh_gw; 2293 2294 if (has_gw && nh_on_port) { 2295 err = ofdpa_port_ipv4_nh(ofdpa_port, flags, 2296 nh->nh_gw, &index); 2297 if (err) 2298 return err; 2299 2300 group_id = ROCKER_GROUP_L3_UNICAST(index); 2301 } else { 2302 /* Send to CPU for processing */ 2303 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); 2304 } 2305 2306 err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst, 2307 dst_mask, priority, goto_tbl, 2308 group_id, fi, flags); 2309 if (err) 2310 netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n", 2311 err, &dst); 2312 2313 return err; 2314 } 2315 2316 static void 2317 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port, 2318 int ifindex) 2319 { 2320 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 2321 struct ofdpa_internal_vlan_tbl_entry *found; 2322 unsigned long lock_flags; 2323 unsigned long bit; 2324 2325 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); 2326 2327 found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); 2328 if (!found) { 2329 netdev_err(ofdpa_port->dev, 2330 "ifindex (%d) not found in internal VLAN tbl\n", 2331 ifindex); 2332 goto not_found; 2333 } 2334 2335 if (--found->ref_count <= 0) { 2336 bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE; 2337 clear_bit(bit, ofdpa->internal_vlan_bitmap); 2338 hash_del(&found->entry); 2339 kfree(found); 2340 } 2341 2342 not_found: 2343 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); 2344 } 2345 2346 /********************************** 2347 * Rocker world ops implementation 2348 **********************************/ 2349 2350 static int ofdpa_init(struct rocker *rocker) 2351 { 2352 struct ofdpa *ofdpa = rocker->wpriv; 2353 2354 ofdpa->rocker = rocker; 2355 2356 hash_init(ofdpa->flow_tbl); 2357 spin_lock_init(&ofdpa->flow_tbl_lock); 2358 2359 hash_init(ofdpa->group_tbl); 2360 spin_lock_init(&ofdpa->group_tbl_lock); 2361 2362 hash_init(ofdpa->fdb_tbl); 2363 spin_lock_init(&ofdpa->fdb_tbl_lock); 2364 2365 hash_init(ofdpa->internal_vlan_tbl); 2366 spin_lock_init(&ofdpa->internal_vlan_tbl_lock); 2367 2368 hash_init(ofdpa->neigh_tbl); 2369 spin_lock_init(&ofdpa->neigh_tbl_lock); 2370 2371 setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 2372 (unsigned long) ofdpa); 2373 mod_timer(&ofdpa->fdb_cleanup_timer, jiffies); 2374 2375 ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME; 2376 2377 return 0; 2378 } 2379 2380 static void ofdpa_fini(struct rocker *rocker) 2381 { 2382 struct ofdpa *ofdpa = rocker->wpriv; 2383 2384 unsigned long flags; 2385 struct ofdpa_flow_tbl_entry *flow_entry; 2386 struct ofdpa_group_tbl_entry *group_entry; 2387 struct ofdpa_fdb_tbl_entry *fdb_entry; 2388 struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry; 2389 struct ofdpa_neigh_tbl_entry *neigh_entry; 2390 struct hlist_node *tmp; 2391 int bkt; 2392 2393 del_timer_sync(&ofdpa->fdb_cleanup_timer); 2394 flush_workqueue(rocker->rocker_owq); 2395 2396 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); 2397 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) 2398 hash_del(&flow_entry->entry); 2399 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); 2400 2401 spin_lock_irqsave(&ofdpa->group_tbl_lock, flags); 2402 hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry) 2403 hash_del(&group_entry->entry); 2404 spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags); 2405 2406 spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags); 2407 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry) 2408 hash_del(&fdb_entry->entry); 2409 spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags); 2410 2411 spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags); 2412 hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt, 2413 tmp, internal_vlan_entry, entry) 2414 hash_del(&internal_vlan_entry->entry); 2415 spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags); 2416 2417 spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags); 2418 hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry) 2419 hash_del(&neigh_entry->entry); 2420 spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags); 2421 } 2422 2423 static int ofdpa_port_pre_init(struct rocker_port *rocker_port) 2424 { 2425 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2426 2427 ofdpa_port->ofdpa = rocker_port->rocker->wpriv; 2428 ofdpa_port->rocker_port = rocker_port; 2429 ofdpa_port->dev = rocker_port->dev; 2430 ofdpa_port->pport = rocker_port->pport; 2431 ofdpa_port->brport_flags = BR_LEARNING; 2432 ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME; 2433 return 0; 2434 } 2435 2436 static int ofdpa_port_init(struct rocker_port *rocker_port) 2437 { 2438 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2439 int err; 2440 2441 rocker_port_set_learning(rocker_port, 2442 !!(ofdpa_port->brport_flags & BR_LEARNING)); 2443 2444 err = ofdpa_port_ig_tbl(ofdpa_port, 0); 2445 if (err) { 2446 netdev_err(ofdpa_port->dev, "install ig port table failed\n"); 2447 return err; 2448 } 2449 2450 ofdpa_port->internal_vlan_id = 2451 ofdpa_port_internal_vlan_id_get(ofdpa_port, 2452 ofdpa_port->dev->ifindex); 2453 2454 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0); 2455 if (err) { 2456 netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n"); 2457 goto err_untagged_vlan; 2458 } 2459 return 0; 2460 2461 err_untagged_vlan: 2462 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE); 2463 return err; 2464 } 2465 2466 static void ofdpa_port_fini(struct rocker_port *rocker_port) 2467 { 2468 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2469 2470 ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE); 2471 } 2472 2473 static int ofdpa_port_open(struct rocker_port *rocker_port) 2474 { 2475 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2476 2477 return ofdpa_port_fwd_enable(ofdpa_port, 0); 2478 } 2479 2480 static void ofdpa_port_stop(struct rocker_port *rocker_port) 2481 { 2482 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2483 2484 ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT); 2485 } 2486 2487 static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port, 2488 u8 state) 2489 { 2490 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2491 2492 return ofdpa_port_stp_update(ofdpa_port, 0, state); 2493 } 2494 2495 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port, 2496 unsigned long brport_flags, 2497 struct switchdev_trans *trans) 2498 { 2499 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2500 unsigned long orig_flags; 2501 int err = 0; 2502 2503 orig_flags = ofdpa_port->brport_flags; 2504 ofdpa_port->brport_flags = brport_flags; 2505 if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING && 2506 !switchdev_trans_ph_prepare(trans)) 2507 err = rocker_port_set_learning(ofdpa_port->rocker_port, 2508 !!(ofdpa_port->brport_flags & BR_LEARNING)); 2509 2510 if (switchdev_trans_ph_prepare(trans)) 2511 ofdpa_port->brport_flags = orig_flags; 2512 2513 return err; 2514 } 2515 2516 static int 2517 ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, 2518 unsigned long *p_brport_flags) 2519 { 2520 const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2521 2522 *p_brport_flags = ofdpa_port->brport_flags; 2523 return 0; 2524 } 2525 2526 static int 2527 ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port * 2528 rocker_port, 2529 unsigned long * 2530 p_brport_flags_support) 2531 { 2532 *p_brport_flags_support = BR_LEARNING; 2533 return 0; 2534 } 2535 2536 static int 2537 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, 2538 u32 ageing_time, 2539 struct switchdev_trans *trans) 2540 { 2541 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2542 struct ofdpa *ofdpa = ofdpa_port->ofdpa; 2543 2544 if (!switchdev_trans_ph_prepare(trans)) { 2545 ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time); 2546 if (ofdpa_port->ageing_time < ofdpa->ageing_time) 2547 ofdpa->ageing_time = ofdpa_port->ageing_time; 2548 mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies); 2549 } 2550 2551 return 0; 2552 } 2553 2554 static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port, 2555 const struct switchdev_obj_port_vlan *vlan) 2556 { 2557 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2558 u16 vid; 2559 int err; 2560 2561 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 2562 err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags); 2563 if (err) 2564 return err; 2565 } 2566 2567 return 0; 2568 } 2569 2570 static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port, 2571 const struct switchdev_obj_port_vlan *vlan) 2572 { 2573 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2574 u16 vid; 2575 int err; 2576 2577 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 2578 err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags); 2579 if (err) 2580 return err; 2581 } 2582 2583 return 0; 2584 } 2585 2586 static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port, 2587 u16 vid, const unsigned char *addr) 2588 { 2589 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2590 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL); 2591 2592 if (!ofdpa_port_is_bridged(ofdpa_port)) 2593 return -EINVAL; 2594 2595 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0); 2596 } 2597 2598 static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port, 2599 u16 vid, const unsigned char *addr) 2600 { 2601 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2602 __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL); 2603 int flags = OFDPA_OP_FLAG_REMOVE; 2604 2605 if (!ofdpa_port_is_bridged(ofdpa_port)) 2606 return -EINVAL; 2607 2608 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags); 2609 } 2610 2611 static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, 2612 struct net_device *bridge) 2613 { 2614 int err; 2615 2616 /* Port is joining bridge, so the internal VLAN for the 2617 * port is going to change to the bridge internal VLAN. 2618 * Let's remove untagged VLAN (vid=0) from port and 2619 * re-add once internal VLAN has changed. 2620 */ 2621 2622 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); 2623 if (err) 2624 return err; 2625 2626 ofdpa_port_internal_vlan_id_put(ofdpa_port, 2627 ofdpa_port->dev->ifindex); 2628 ofdpa_port->internal_vlan_id = 2629 ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex); 2630 2631 ofdpa_port->bridge_dev = bridge; 2632 2633 return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0); 2634 } 2635 2636 static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) 2637 { 2638 int err; 2639 2640 err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); 2641 if (err) 2642 return err; 2643 2644 ofdpa_port_internal_vlan_id_put(ofdpa_port, 2645 ofdpa_port->bridge_dev->ifindex); 2646 ofdpa_port->internal_vlan_id = 2647 ofdpa_port_internal_vlan_id_get(ofdpa_port, 2648 ofdpa_port->dev->ifindex); 2649 2650 ofdpa_port->bridge_dev = NULL; 2651 2652 err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0); 2653 if (err) 2654 return err; 2655 2656 if (ofdpa_port->dev->flags & IFF_UP) 2657 err = ofdpa_port_fwd_enable(ofdpa_port, 0); 2658 2659 return err; 2660 } 2661 2662 static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port, 2663 struct net_device *master) 2664 { 2665 int err; 2666 2667 ofdpa_port->bridge_dev = master; 2668 2669 err = ofdpa_port_fwd_disable(ofdpa_port, 0); 2670 if (err) 2671 return err; 2672 err = ofdpa_port_fwd_enable(ofdpa_port, 0); 2673 2674 return err; 2675 } 2676 2677 static int ofdpa_port_master_linked(struct rocker_port *rocker_port, 2678 struct net_device *master) 2679 { 2680 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2681 int err = 0; 2682 2683 if (netif_is_bridge_master(master)) 2684 err = ofdpa_port_bridge_join(ofdpa_port, master); 2685 else if (netif_is_ovs_master(master)) 2686 err = ofdpa_port_ovs_changed(ofdpa_port, master); 2687 return err; 2688 } 2689 2690 static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port, 2691 struct net_device *master) 2692 { 2693 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2694 int err = 0; 2695 2696 if (ofdpa_port_is_bridged(ofdpa_port)) 2697 err = ofdpa_port_bridge_leave(ofdpa_port); 2698 else if (ofdpa_port_is_ovsed(ofdpa_port)) 2699 err = ofdpa_port_ovs_changed(ofdpa_port, NULL); 2700 return err; 2701 } 2702 2703 static int ofdpa_port_neigh_update(struct rocker_port *rocker_port, 2704 struct neighbour *n) 2705 { 2706 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2707 int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) | 2708 OFDPA_OP_FLAG_NOWAIT; 2709 __be32 ip_addr = *(__be32 *) n->primary_key; 2710 2711 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha); 2712 } 2713 2714 static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port, 2715 struct neighbour *n) 2716 { 2717 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2718 int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT; 2719 __be32 ip_addr = *(__be32 *) n->primary_key; 2720 2721 return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha); 2722 } 2723 2724 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, 2725 const unsigned char *addr, 2726 __be16 vlan_id) 2727 { 2728 struct ofdpa_port *ofdpa_port = rocker_port->wpriv; 2729 int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED; 2730 2731 if (ofdpa_port->stp_state != BR_STATE_LEARNING && 2732 ofdpa_port->stp_state != BR_STATE_FORWARDING) 2733 return 0; 2734 2735 return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags); 2736 } 2737 2738 static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev, 2739 struct rocker *rocker) 2740 { 2741 struct rocker_port *rocker_port; 2742 2743 rocker_port = rocker_port_dev_lower_find(dev, rocker); 2744 return rocker_port ? rocker_port->wpriv : NULL; 2745 } 2746 2747 static int ofdpa_fib4_add(struct rocker *rocker, 2748 const struct fib_entry_notifier_info *fen_info) 2749 { 2750 struct ofdpa *ofdpa = rocker->wpriv; 2751 struct ofdpa_port *ofdpa_port; 2752 int err; 2753 2754 if (ofdpa->fib_aborted) 2755 return 0; 2756 ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); 2757 if (!ofdpa_port) 2758 return 0; 2759 err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), 2760 fen_info->dst_len, fen_info->fi, 2761 fen_info->tb_id, 0); 2762 if (err) 2763 return err; 2764 fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD; 2765 return 0; 2766 } 2767 2768 static int ofdpa_fib4_del(struct rocker *rocker, 2769 const struct fib_entry_notifier_info *fen_info) 2770 { 2771 struct ofdpa *ofdpa = rocker->wpriv; 2772 struct ofdpa_port *ofdpa_port; 2773 2774 if (ofdpa->fib_aborted) 2775 return 0; 2776 ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); 2777 if (!ofdpa_port) 2778 return 0; 2779 fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; 2780 return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), 2781 fen_info->dst_len, fen_info->fi, 2782 fen_info->tb_id, OFDPA_OP_FLAG_REMOVE); 2783 } 2784 2785 static void ofdpa_fib4_abort(struct rocker *rocker) 2786 { 2787 struct ofdpa *ofdpa = rocker->wpriv; 2788 struct ofdpa_port *ofdpa_port; 2789 struct ofdpa_flow_tbl_entry *flow_entry; 2790 struct hlist_node *tmp; 2791 unsigned long flags; 2792 int bkt; 2793 2794 if (ofdpa->fib_aborted) 2795 return; 2796 2797 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); 2798 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) { 2799 if (flow_entry->key.tbl_id != 2800 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) 2801 continue; 2802 ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev, 2803 rocker); 2804 if (!ofdpa_port) 2805 continue; 2806 flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; 2807 ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, 2808 flow_entry); 2809 } 2810 spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); 2811 ofdpa->fib_aborted = true; 2812 } 2813 2814 struct rocker_world_ops rocker_ofdpa_ops = { 2815 .kind = "ofdpa", 2816 .priv_size = sizeof(struct ofdpa), 2817 .port_priv_size = sizeof(struct ofdpa_port), 2818 .mode = ROCKER_PORT_MODE_OF_DPA, 2819 .init = ofdpa_init, 2820 .fini = ofdpa_fini, 2821 .port_pre_init = ofdpa_port_pre_init, 2822 .port_init = ofdpa_port_init, 2823 .port_fini = ofdpa_port_fini, 2824 .port_open = ofdpa_port_open, 2825 .port_stop = ofdpa_port_stop, 2826 .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set, 2827 .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set, 2828 .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get, 2829 .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get, 2830 .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set, 2831 .port_obj_vlan_add = ofdpa_port_obj_vlan_add, 2832 .port_obj_vlan_del = ofdpa_port_obj_vlan_del, 2833 .port_obj_fdb_add = ofdpa_port_obj_fdb_add, 2834 .port_obj_fdb_del = ofdpa_port_obj_fdb_del, 2835 .port_master_linked = ofdpa_port_master_linked, 2836 .port_master_unlinked = ofdpa_port_master_unlinked, 2837 .port_neigh_update = ofdpa_port_neigh_update, 2838 .port_neigh_destroy = ofdpa_port_neigh_destroy, 2839 .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen, 2840 .fib4_add = ofdpa_fib4_add, 2841 .fib4_del = ofdpa_fib4_del, 2842 .fib4_abort = ofdpa_fib4_abort, 2843 }; 2844