1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.h 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #ifndef _MLXSW_SPECTRUM_H 38 #define _MLXSW_SPECTRUM_H 39 40 #include <linux/types.h> 41 #include <linux/netdevice.h> 42 #include <linux/rhashtable.h> 43 #include <linux/bitops.h> 44 #include <linux/if_vlan.h> 45 #include <linux/list.h> 46 #include <linux/dcbnl.h> 47 #include <linux/in6.h> 48 #include <linux/notifier.h> 49 #include <net/psample.h> 50 #include <net/pkt_cls.h> 51 52 #include "port.h" 53 #include "core.h" 54 #include "core_acl_flex_keys.h" 55 #include "core_acl_flex_actions.h" 56 57 #define MLXSW_SP_VFID_BASE VLAN_N_VID 58 #define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ 59 60 #define MLXSW_SP_RFID_BASE 15360 61 62 #define MLXSW_SP_MID_MAX 7000 63 64 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 65 66 #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ 67 68 #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */ 69 #define MLXSW_SP_KVD_GRANULARITY 128 70 71 struct mlxsw_sp_port; 72 struct mlxsw_sp_rif; 73 74 struct mlxsw_sp_upper { 75 struct net_device *dev; 76 unsigned int ref_count; 77 }; 78 79 struct mlxsw_sp_fid { 80 void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport); 81 struct list_head list; 82 unsigned int ref_count; 83 struct net_device *dev; 84 struct mlxsw_sp_rif *rif; 85 u16 fid; 86 }; 87 88 struct mlxsw_sp_mid { 89 struct list_head list; 90 unsigned char addr[ETH_ALEN]; 91 u16 fid; 92 u16 mid; 93 unsigned int ref_count; 94 }; 95 96 static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid) 97 { 98 return MLXSW_SP_VFID_BASE + vfid; 99 } 100 101 static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) 102 { 103 return fid - MLXSW_SP_VFID_BASE; 104 } 105 106 static inline bool mlxsw_sp_fid_is_vfid(u16 fid) 107 { 108 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; 109 } 110 111 struct mlxsw_sp_sb_pr { 112 enum mlxsw_reg_sbpr_mode mode; 113 u32 size; 114 }; 115 116 struct mlxsw_cp_sb_occ { 117 u32 cur; 118 u32 max; 119 }; 120 121 struct mlxsw_sp_sb_cm { 122 u32 min_buff; 123 u32 max_buff; 124 u8 pool; 125 struct mlxsw_cp_sb_occ occ; 126 }; 127 128 struct mlxsw_sp_sb_pm { 129 u32 min_buff; 130 u32 max_buff; 131 struct mlxsw_cp_sb_occ occ; 132 }; 133 134 #define MLXSW_SP_SB_POOL_COUNT 4 135 #define MLXSW_SP_SB_TC_COUNT 8 136 137 struct mlxsw_sp_sb_port { 138 struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; 139 struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; 140 }; 141 142 struct mlxsw_sp_sb { 143 struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; 144 struct mlxsw_sp_sb_port *ports; 145 u32 cell_size; 146 }; 147 148 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) 149 150 struct mlxsw_sp_prefix_usage { 151 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); 152 }; 153 154 enum mlxsw_sp_l3proto { 155 MLXSW_SP_L3_PROTO_IPV4, 156 MLXSW_SP_L3_PROTO_IPV6, 157 }; 158 159 struct mlxsw_sp_lpm_tree { 160 u8 id; /* tree ID */ 161 unsigned int ref_count; 162 enum mlxsw_sp_l3proto proto; 163 struct mlxsw_sp_prefix_usage prefix_usage; 164 }; 165 166 struct mlxsw_sp_fib; 167 168 struct mlxsw_sp_vr { 169 u16 id; /* virtual router ID */ 170 u32 tb_id; /* kernel fib table id */ 171 unsigned int rif_count; 172 struct mlxsw_sp_fib *fib4; 173 }; 174 175 enum mlxsw_sp_span_type { 176 MLXSW_SP_SPAN_EGRESS, 177 MLXSW_SP_SPAN_INGRESS 178 }; 179 180 struct mlxsw_sp_span_inspected_port { 181 struct list_head list; 182 enum mlxsw_sp_span_type type; 183 u8 local_port; 184 }; 185 186 struct mlxsw_sp_span_entry { 187 u8 local_port; 188 bool used; 189 struct list_head bound_ports_list; 190 int ref_count; 191 int id; 192 }; 193 194 enum mlxsw_sp_port_mall_action_type { 195 MLXSW_SP_PORT_MALL_MIRROR, 196 MLXSW_SP_PORT_MALL_SAMPLE, 197 }; 198 199 struct mlxsw_sp_port_mall_mirror_tc_entry { 200 u8 to_local_port; 201 bool ingress; 202 }; 203 204 struct mlxsw_sp_port_mall_tc_entry { 205 struct list_head list; 206 unsigned long cookie; 207 enum mlxsw_sp_port_mall_action_type type; 208 union { 209 struct mlxsw_sp_port_mall_mirror_tc_entry mirror; 210 }; 211 }; 212 213 struct mlxsw_sp_router { 214 struct mlxsw_sp_vr *vrs; 215 struct rhashtable neigh_ht; 216 struct rhashtable nexthop_group_ht; 217 struct rhashtable nexthop_ht; 218 struct { 219 struct mlxsw_sp_lpm_tree *trees; 220 unsigned int tree_count; 221 } lpm; 222 struct { 223 struct delayed_work dw; 224 unsigned long interval; /* ms */ 225 } neighs_update; 226 struct delayed_work nexthop_probe_dw; 227 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ 228 struct list_head nexthop_neighs_list; 229 bool aborted; 230 }; 231 232 struct mlxsw_sp_acl; 233 struct mlxsw_sp_counter_pool; 234 235 struct mlxsw_sp { 236 struct { 237 struct list_head list; 238 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX); 239 } vfids; 240 struct { 241 struct list_head list; 242 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX); 243 } br_mids; 244 struct list_head fids; /* VLAN-aware bridge FIDs */ 245 struct mlxsw_sp_rif **rifs; 246 struct mlxsw_sp_port **ports; 247 struct mlxsw_core *core; 248 const struct mlxsw_bus_info *bus_info; 249 unsigned char base_mac[ETH_ALEN]; 250 struct { 251 struct delayed_work dw; 252 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 253 unsigned int interval; /* ms */ 254 } fdb_notify; 255 #define MLXSW_SP_MIN_AGEING_TIME 10 256 #define MLXSW_SP_MAX_AGEING_TIME 1000000 257 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 258 u32 ageing_time; 259 struct mlxsw_sp_upper master_bridge; 260 struct mlxsw_sp_upper *lags; 261 u8 *port_to_module; 262 struct mlxsw_sp_sb sb; 263 struct mlxsw_sp_router router; 264 struct mlxsw_sp_acl *acl; 265 struct { 266 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); 267 } kvdl; 268 269 struct mlxsw_sp_counter_pool *counter_pool; 270 struct { 271 struct mlxsw_sp_span_entry *entries; 272 int entries_count; 273 } span; 274 struct notifier_block fib_nb; 275 }; 276 277 static inline struct mlxsw_sp_upper * 278 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 279 { 280 return &mlxsw_sp->lags[lag_id]; 281 } 282 283 static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, 284 u32 cells) 285 { 286 return mlxsw_sp->sb.cell_size * cells; 287 } 288 289 static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, 290 u32 bytes) 291 { 292 return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size); 293 } 294 295 struct mlxsw_sp_port_pcpu_stats { 296 u64 rx_packets; 297 u64 rx_bytes; 298 u64 tx_packets; 299 u64 tx_bytes; 300 struct u64_stats_sync syncp; 301 u32 tx_dropped; 302 }; 303 304 struct mlxsw_sp_port_sample { 305 struct psample_group __rcu *psample_group; 306 u32 trunc_size; 307 u32 rate; 308 bool truncate; 309 }; 310 311 struct mlxsw_sp_port { 312 struct net_device *dev; 313 struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; 314 struct mlxsw_sp *mlxsw_sp; 315 u8 local_port; 316 u8 stp_state; 317 u16 learning:1, 318 learning_sync:1, 319 uc_flood:1, 320 mc_flood:1, 321 mc_router:1, 322 mc_disabled:1, 323 bridged:1, 324 lagged:1, 325 split:1; 326 u16 pvid; 327 u16 lag_id; 328 struct { 329 struct list_head list; 330 struct mlxsw_sp_fid *f; 331 u16 vid; 332 } vport; 333 struct { 334 u8 tx_pause:1, 335 rx_pause:1, 336 autoneg:1; 337 } link; 338 struct { 339 struct ieee_ets *ets; 340 struct ieee_maxrate *maxrate; 341 struct ieee_pfc *pfc; 342 } dcb; 343 struct { 344 u8 module; 345 u8 width; 346 u8 lane; 347 } mapping; 348 /* 802.1Q bridge VLANs */ 349 unsigned long *active_vlans; 350 unsigned long *untagged_vlans; 351 /* VLAN interfaces */ 352 struct list_head vports_list; 353 /* TC handles */ 354 struct list_head mall_tc_list; 355 struct { 356 #define MLXSW_HW_STATS_UPDATE_TIME HZ 357 struct rtnl_link_stats64 *cache; 358 struct delayed_work update_dw; 359 } hw_stats; 360 struct mlxsw_sp_port_sample *sample; 361 }; 362 363 bool mlxsw_sp_port_dev_check(const struct net_device *dev); 364 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); 365 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); 366 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); 367 368 static inline bool 369 mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) 370 { 371 return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause; 372 } 373 374 static inline struct mlxsw_sp_port * 375 mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) 376 { 377 struct mlxsw_sp_port *mlxsw_sp_port; 378 u8 local_port; 379 380 local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, 381 lag_id, port_index); 382 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 383 return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; 384 } 385 386 static inline u16 387 mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 388 { 389 return mlxsw_sp_vport->vport.vid; 390 } 391 392 static inline bool 393 mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) 394 { 395 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 396 397 return vid != 0; 398 } 399 400 static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport, 401 struct mlxsw_sp_fid *f) 402 { 403 mlxsw_sp_vport->vport.f = f; 404 } 405 406 static inline struct mlxsw_sp_fid * 407 mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 408 { 409 return mlxsw_sp_vport->vport.f; 410 } 411 412 static inline struct net_device * 413 mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 414 { 415 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 416 417 return f ? f->dev : NULL; 418 } 419 420 static inline struct mlxsw_sp_port * 421 mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 422 { 423 struct mlxsw_sp_port *mlxsw_sp_vport; 424 425 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 426 vport.list) { 427 if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid) 428 return mlxsw_sp_vport; 429 } 430 431 return NULL; 432 } 433 434 static inline struct mlxsw_sp_port * 435 mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 436 u16 fid) 437 { 438 struct mlxsw_sp_port *mlxsw_sp_vport; 439 440 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 441 vport.list) { 442 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 443 444 if (f && f->fid == fid) 445 return mlxsw_sp_vport; 446 } 447 448 return NULL; 449 } 450 451 static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp, 452 u16 fid) 453 { 454 struct mlxsw_sp_fid *f; 455 456 list_for_each_entry(f, &mlxsw_sp->fids, list) 457 if (f->fid == fid) 458 return f; 459 460 return NULL; 461 } 462 463 static inline struct mlxsw_sp_fid * 464 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, 465 const struct net_device *br_dev) 466 { 467 struct mlxsw_sp_fid *f; 468 469 list_for_each_entry(f, &mlxsw_sp->vfids.list, list) 470 if (f->dev == br_dev) 471 return f; 472 473 return NULL; 474 } 475 476 enum mlxsw_sp_flood_table { 477 MLXSW_SP_FLOOD_TABLE_UC, 478 MLXSW_SP_FLOOD_TABLE_BC, 479 MLXSW_SP_FLOOD_TABLE_MC, 480 }; 481 482 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); 483 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp); 484 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); 485 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, 486 unsigned int sb_index, u16 pool_index, 487 struct devlink_sb_pool_info *pool_info); 488 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, 489 unsigned int sb_index, u16 pool_index, u32 size, 490 enum devlink_sb_threshold_type threshold_type); 491 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 492 unsigned int sb_index, u16 pool_index, 493 u32 *p_threshold); 494 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, 495 unsigned int sb_index, u16 pool_index, 496 u32 threshold); 497 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, 498 unsigned int sb_index, u16 tc_index, 499 enum devlink_sb_pool_type pool_type, 500 u16 *p_pool_index, u32 *p_threshold); 501 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, 502 unsigned int sb_index, u16 tc_index, 503 enum devlink_sb_pool_type pool_type, 504 u16 pool_index, u32 threshold); 505 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, 506 unsigned int sb_index); 507 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, 508 unsigned int sb_index); 509 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 510 unsigned int sb_index, u16 pool_index, 511 u32 *p_cur, u32 *p_max); 512 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, 513 unsigned int sb_index, u16 tc_index, 514 enum devlink_sb_pool_type pool_type, 515 u32 *p_cur, u32 *p_max); 516 517 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); 518 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); 519 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port); 520 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port); 521 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port); 522 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 523 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 524 u16 vid); 525 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 526 u16 vid_end, bool is_member, bool untagged); 527 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 528 bool set); 529 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 530 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); 531 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); 532 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 533 bool adding); 534 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); 535 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); 536 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 537 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 538 bool dwrr, u8 dwrr_weight); 539 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 540 u8 switch_prio, u8 tclass); 541 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 542 u8 *prio_tc, bool pause_en, 543 struct ieee_pfc *my_pfc); 544 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 545 enum mlxsw_reg_qeec_hr hr, u8 index, 546 u8 next_index, u32 maxrate); 547 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 548 u16 vid_begin, u16 vid_end, 549 bool learn_enable); 550 551 #ifdef CONFIG_MLXSW_SPECTRUM_DCB 552 553 int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port); 554 void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port); 555 556 #else 557 558 static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) 559 { 560 return 0; 561 } 562 563 static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) 564 {} 565 566 #endif 567 568 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); 569 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); 570 int mlxsw_sp_router_netevent_event(struct notifier_block *unused, 571 unsigned long event, void *ptr); 572 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); 573 int mlxsw_sp_inetaddr_event(struct notifier_block *unused, 574 unsigned long event, void *ptr); 575 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, 576 struct mlxsw_sp_rif *rif); 577 int mlxsw_sp_vport_vrf_join(struct mlxsw_sp_port *mlxsw_sp_vport); 578 void mlxsw_sp_vport_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 579 int mlxsw_sp_port_vrf_join(struct mlxsw_sp_port *mlxsw_sp_port); 580 void mlxsw_sp_port_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_port); 581 int mlxsw_sp_bridge_vrf_join(struct mlxsw_sp *mlxsw_sp, 582 struct net_device *l3_dev); 583 void mlxsw_sp_bridge_vrf_leave(struct mlxsw_sp *mlxsw_sp, 584 struct net_device *l3_dev); 585 586 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, 587 u32 *p_entry_index); 588 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); 589 590 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); 591 592 struct mlxsw_sp_acl_rule_info { 593 unsigned int priority; 594 struct mlxsw_afk_element_values values; 595 struct mlxsw_afa_block *act_block; 596 unsigned int counter_index; 597 bool counter_valid; 598 }; 599 600 enum mlxsw_sp_acl_profile { 601 MLXSW_SP_ACL_PROFILE_FLOWER, 602 }; 603 604 struct mlxsw_sp_acl_profile_ops { 605 size_t ruleset_priv_size; 606 int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, 607 void *priv, void *ruleset_priv); 608 void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); 609 int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, 610 struct net_device *dev, bool ingress); 611 void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); 612 size_t rule_priv_size; 613 int (*rule_add)(struct mlxsw_sp *mlxsw_sp, 614 void *ruleset_priv, void *rule_priv, 615 struct mlxsw_sp_acl_rule_info *rulei); 616 void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); 617 int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, 618 bool *activity); 619 }; 620 621 struct mlxsw_sp_acl_ops { 622 size_t priv_size; 623 int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); 624 void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); 625 const struct mlxsw_sp_acl_profile_ops * 626 (*profile_ops)(struct mlxsw_sp *mlxsw_sp, 627 enum mlxsw_sp_acl_profile profile); 628 }; 629 630 struct mlxsw_sp_acl_ruleset; 631 632 struct mlxsw_sp_acl_ruleset * 633 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, 634 struct net_device *dev, bool ingress, 635 enum mlxsw_sp_acl_profile profile); 636 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, 637 struct mlxsw_sp_acl_ruleset *ruleset); 638 639 struct mlxsw_sp_acl_rule_info * 640 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); 641 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); 642 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); 643 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 644 unsigned int priority); 645 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 646 enum mlxsw_afk_element element, 647 u32 key_value, u32 mask_value); 648 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, 649 enum mlxsw_afk_element element, 650 const char *key_value, 651 const char *mask_value, unsigned int len); 652 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); 653 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 654 u16 group_id); 655 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); 656 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, 657 struct mlxsw_sp_acl_rule_info *rulei, 658 struct net_device *out_dev); 659 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, 660 struct mlxsw_sp_acl_rule_info *rulei, 661 u32 action, u16 vid, u16 proto, u8 prio); 662 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, 663 struct mlxsw_sp_acl_rule_info *rulei); 664 665 struct mlxsw_sp_acl_rule; 666 667 struct mlxsw_sp_acl_rule * 668 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, 669 struct mlxsw_sp_acl_ruleset *ruleset, 670 unsigned long cookie); 671 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, 672 struct mlxsw_sp_acl_rule *rule); 673 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, 674 struct mlxsw_sp_acl_rule *rule); 675 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, 676 struct mlxsw_sp_acl_rule *rule); 677 struct mlxsw_sp_acl_rule * 678 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, 679 struct mlxsw_sp_acl_ruleset *ruleset, 680 unsigned long cookie); 681 struct mlxsw_sp_acl_rule_info * 682 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); 683 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, 684 struct mlxsw_sp_acl_rule *rule, 685 u64 *packets, u64 *bytes, u64 *last_use); 686 687 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); 688 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); 689 690 extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; 691 692 int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 693 __be16 protocol, struct tc_cls_flower_offload *f); 694 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 695 struct tc_cls_flower_offload *f); 696 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 697 struct tc_cls_flower_offload *f); 698 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, 699 unsigned int counter_index, u64 *packets, 700 u64 *bytes); 701 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, 702 unsigned int *p_counter_index); 703 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, 704 unsigned int counter_index); 705 706 #endif 707