1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.h 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #ifndef _MLXSW_SPECTRUM_H 38 #define _MLXSW_SPECTRUM_H 39 40 #include <linux/types.h> 41 #include <linux/netdevice.h> 42 #include <linux/rhashtable.h> 43 #include <linux/bitops.h> 44 #include <linux/if_vlan.h> 45 #include <linux/list.h> 46 #include <linux/dcbnl.h> 47 #include <linux/in6.h> 48 #include <linux/notifier.h> 49 #include <net/psample.h> 50 #include <net/pkt_cls.h> 51 52 #include "port.h" 53 #include "core.h" 54 #include "core_acl_flex_keys.h" 55 #include "core_acl_flex_actions.h" 56 57 #define MLXSW_SP_VFID_BASE VLAN_N_VID 58 #define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ 59 60 #define MLXSW_SP_RFID_BASE 15360 61 #define MLXSW_SP_INVALID_RIF 0xffff 62 63 #define MLXSW_SP_MID_MAX 7000 64 65 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 66 67 #define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */ 68 #define MLXSW_SP_LPM_TREE_MAX 22 69 #define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN) 70 71 #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ 72 73 #define MLXSW_SP_BYTES_PER_CELL 96 74 75 #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) 76 #define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) 77 78 #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */ 79 #define MLXSW_SP_KVD_GRANULARITY 128 80 81 /* Maximum delay buffer needed in case of PAUSE frames, in cells. 82 * Assumes 100m cable and maximum MTU. 83 */ 84 #define MLXSW_SP_PAUSE_DELAY 612 85 86 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 87 88 static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) 89 { 90 delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE)); 91 return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu); 92 } 93 94 struct mlxsw_sp_port; 95 96 struct mlxsw_sp_upper { 97 struct net_device *dev; 98 unsigned int ref_count; 99 }; 100 101 struct mlxsw_sp_fid { 102 void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport); 103 struct list_head list; 104 unsigned int ref_count; 105 struct net_device *dev; 106 struct mlxsw_sp_rif *r; 107 u16 fid; 108 }; 109 110 struct mlxsw_sp_rif { 111 struct list_head nexthop_list; 112 struct list_head neigh_list; 113 struct net_device *dev; 114 unsigned int ref_count; 115 struct mlxsw_sp_fid *f; 116 unsigned char addr[ETH_ALEN]; 117 int mtu; 118 u16 rif; 119 }; 120 121 struct mlxsw_sp_mid { 122 struct list_head list; 123 unsigned char addr[ETH_ALEN]; 124 u16 fid; 125 u16 mid; 126 unsigned int ref_count; 127 }; 128 129 static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid) 130 { 131 return MLXSW_SP_VFID_BASE + vfid; 132 } 133 134 static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) 135 { 136 return fid - MLXSW_SP_VFID_BASE; 137 } 138 139 static inline bool mlxsw_sp_fid_is_vfid(u16 fid) 140 { 141 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; 142 } 143 144 static inline bool mlxsw_sp_fid_is_rfid(u16 fid) 145 { 146 return fid >= MLXSW_SP_RFID_BASE; 147 } 148 149 static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif) 150 { 151 return MLXSW_SP_RFID_BASE + rif; 152 } 153 154 struct mlxsw_sp_sb_pr { 155 enum mlxsw_reg_sbpr_mode mode; 156 u32 size; 157 }; 158 159 struct mlxsw_cp_sb_occ { 160 u32 cur; 161 u32 max; 162 }; 163 164 struct mlxsw_sp_sb_cm { 165 u32 min_buff; 166 u32 max_buff; 167 u8 pool; 168 struct mlxsw_cp_sb_occ occ; 169 }; 170 171 struct mlxsw_sp_sb_pm { 172 u32 min_buff; 173 u32 max_buff; 174 struct mlxsw_cp_sb_occ occ; 175 }; 176 177 #define MLXSW_SP_SB_POOL_COUNT 4 178 #define MLXSW_SP_SB_TC_COUNT 8 179 180 struct mlxsw_sp_sb { 181 struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; 182 struct { 183 struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; 184 struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; 185 } ports[MLXSW_PORT_MAX_PORTS]; 186 }; 187 188 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) 189 190 struct mlxsw_sp_prefix_usage { 191 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); 192 }; 193 194 enum mlxsw_sp_l3proto { 195 MLXSW_SP_L3_PROTO_IPV4, 196 MLXSW_SP_L3_PROTO_IPV6, 197 }; 198 199 struct mlxsw_sp_lpm_tree { 200 u8 id; /* tree ID */ 201 unsigned int ref_count; 202 enum mlxsw_sp_l3proto proto; 203 struct mlxsw_sp_prefix_usage prefix_usage; 204 }; 205 206 struct mlxsw_sp_fib; 207 208 struct mlxsw_sp_vr { 209 u16 id; /* virtual router ID */ 210 bool used; 211 enum mlxsw_sp_l3proto proto; 212 u32 tb_id; /* kernel fib table id */ 213 struct mlxsw_sp_lpm_tree *lpm_tree; 214 struct mlxsw_sp_fib *fib; 215 }; 216 217 enum mlxsw_sp_span_type { 218 MLXSW_SP_SPAN_EGRESS, 219 MLXSW_SP_SPAN_INGRESS 220 }; 221 222 struct mlxsw_sp_span_inspected_port { 223 struct list_head list; 224 enum mlxsw_sp_span_type type; 225 u8 local_port; 226 }; 227 228 struct mlxsw_sp_span_entry { 229 u8 local_port; 230 bool used; 231 struct list_head bound_ports_list; 232 int ref_count; 233 int id; 234 }; 235 236 enum mlxsw_sp_port_mall_action_type { 237 MLXSW_SP_PORT_MALL_MIRROR, 238 MLXSW_SP_PORT_MALL_SAMPLE, 239 }; 240 241 struct mlxsw_sp_port_mall_mirror_tc_entry { 242 u8 to_local_port; 243 bool ingress; 244 }; 245 246 struct mlxsw_sp_port_mall_tc_entry { 247 struct list_head list; 248 unsigned long cookie; 249 enum mlxsw_sp_port_mall_action_type type; 250 union { 251 struct mlxsw_sp_port_mall_mirror_tc_entry mirror; 252 }; 253 }; 254 255 struct mlxsw_sp_router { 256 struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; 257 struct mlxsw_sp_vr *vrs; 258 struct rhashtable neigh_ht; 259 struct rhashtable nexthop_group_ht; 260 struct rhashtable nexthop_ht; 261 struct { 262 struct delayed_work dw; 263 unsigned long interval; /* ms */ 264 } neighs_update; 265 struct delayed_work nexthop_probe_dw; 266 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ 267 struct list_head nexthop_neighs_list; 268 bool aborted; 269 }; 270 271 struct mlxsw_sp_acl; 272 273 struct mlxsw_sp { 274 struct { 275 struct list_head list; 276 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX); 277 } vfids; 278 struct { 279 struct list_head list; 280 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX); 281 } br_mids; 282 struct list_head fids; /* VLAN-aware bridge FIDs */ 283 struct mlxsw_sp_rif **rifs; 284 struct mlxsw_sp_port **ports; 285 struct mlxsw_core *core; 286 const struct mlxsw_bus_info *bus_info; 287 unsigned char base_mac[ETH_ALEN]; 288 struct { 289 struct delayed_work dw; 290 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 291 unsigned int interval; /* ms */ 292 } fdb_notify; 293 #define MLXSW_SP_MIN_AGEING_TIME 10 294 #define MLXSW_SP_MAX_AGEING_TIME 1000000 295 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 296 u32 ageing_time; 297 struct mlxsw_sp_upper master_bridge; 298 struct mlxsw_sp_upper *lags; 299 u8 port_to_module[MLXSW_PORT_MAX_PORTS]; 300 struct mlxsw_sp_sb sb; 301 struct mlxsw_sp_router router; 302 struct mlxsw_sp_acl *acl; 303 struct { 304 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); 305 } kvdl; 306 307 struct { 308 struct mlxsw_sp_span_entry *entries; 309 int entries_count; 310 } span; 311 struct notifier_block fib_nb; 312 }; 313 314 static inline struct mlxsw_sp_upper * 315 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 316 { 317 return &mlxsw_sp->lags[lag_id]; 318 } 319 320 struct mlxsw_sp_port_pcpu_stats { 321 u64 rx_packets; 322 u64 rx_bytes; 323 u64 tx_packets; 324 u64 tx_bytes; 325 struct u64_stats_sync syncp; 326 u32 tx_dropped; 327 }; 328 329 struct mlxsw_sp_port_sample { 330 struct psample_group __rcu *psample_group; 331 u32 trunc_size; 332 u32 rate; 333 bool truncate; 334 }; 335 336 struct mlxsw_sp_port { 337 struct net_device *dev; 338 struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; 339 struct mlxsw_sp *mlxsw_sp; 340 u8 local_port; 341 u8 stp_state; 342 u16 learning:1, 343 learning_sync:1, 344 uc_flood:1, 345 mc_flood:1, 346 mc_router:1, 347 mc_disabled:1, 348 bridged:1, 349 lagged:1, 350 split:1; 351 u16 pvid; 352 u16 lag_id; 353 struct { 354 struct list_head list; 355 struct mlxsw_sp_fid *f; 356 u16 vid; 357 } vport; 358 struct { 359 u8 tx_pause:1, 360 rx_pause:1, 361 autoneg:1; 362 } link; 363 struct { 364 struct ieee_ets *ets; 365 struct ieee_maxrate *maxrate; 366 struct ieee_pfc *pfc; 367 } dcb; 368 struct { 369 u8 module; 370 u8 width; 371 u8 lane; 372 } mapping; 373 /* 802.1Q bridge VLANs */ 374 unsigned long *active_vlans; 375 unsigned long *untagged_vlans; 376 /* VLAN interfaces */ 377 struct list_head vports_list; 378 /* TC handles */ 379 struct list_head mall_tc_list; 380 struct { 381 #define MLXSW_HW_STATS_UPDATE_TIME HZ 382 struct rtnl_link_stats64 *cache; 383 struct delayed_work update_dw; 384 } hw_stats; 385 struct mlxsw_sp_port_sample *sample; 386 }; 387 388 bool mlxsw_sp_port_dev_check(const struct net_device *dev); 389 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); 390 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); 391 392 static inline bool 393 mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) 394 { 395 return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause; 396 } 397 398 static inline struct mlxsw_sp_port * 399 mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) 400 { 401 struct mlxsw_sp_port *mlxsw_sp_port; 402 u8 local_port; 403 404 local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, 405 lag_id, port_index); 406 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 407 return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; 408 } 409 410 static inline u16 411 mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 412 { 413 return mlxsw_sp_vport->vport.vid; 414 } 415 416 static inline bool 417 mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) 418 { 419 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); 420 421 return vid != 0; 422 } 423 424 static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport, 425 struct mlxsw_sp_fid *f) 426 { 427 mlxsw_sp_vport->vport.f = f; 428 } 429 430 static inline struct mlxsw_sp_fid * 431 mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 432 { 433 return mlxsw_sp_vport->vport.f; 434 } 435 436 static inline struct net_device * 437 mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 438 { 439 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 440 441 return f ? f->dev : NULL; 442 } 443 444 static inline struct mlxsw_sp_port * 445 mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 446 { 447 struct mlxsw_sp_port *mlxsw_sp_vport; 448 449 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 450 vport.list) { 451 if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid) 452 return mlxsw_sp_vport; 453 } 454 455 return NULL; 456 } 457 458 static inline struct mlxsw_sp_port * 459 mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port, 460 u16 fid) 461 { 462 struct mlxsw_sp_port *mlxsw_sp_vport; 463 464 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 465 vport.list) { 466 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 467 468 if (f && f->fid == fid) 469 return mlxsw_sp_vport; 470 } 471 472 return NULL; 473 } 474 475 static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp, 476 u16 fid) 477 { 478 struct mlxsw_sp_fid *f; 479 480 list_for_each_entry(f, &mlxsw_sp->fids, list) 481 if (f->fid == fid) 482 return f; 483 484 return NULL; 485 } 486 487 static inline struct mlxsw_sp_fid * 488 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, 489 const struct net_device *br_dev) 490 { 491 struct mlxsw_sp_fid *f; 492 493 list_for_each_entry(f, &mlxsw_sp->vfids.list, list) 494 if (f->dev == br_dev) 495 return f; 496 497 return NULL; 498 } 499 500 static inline struct mlxsw_sp_rif * 501 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, 502 const struct net_device *dev) 503 { 504 int i; 505 506 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) 507 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) 508 return mlxsw_sp->rifs[i]; 509 510 return NULL; 511 } 512 513 enum mlxsw_sp_flood_table { 514 MLXSW_SP_FLOOD_TABLE_UC, 515 MLXSW_SP_FLOOD_TABLE_BC, 516 MLXSW_SP_FLOOD_TABLE_MC, 517 }; 518 519 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); 520 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp); 521 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); 522 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, 523 unsigned int sb_index, u16 pool_index, 524 struct devlink_sb_pool_info *pool_info); 525 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, 526 unsigned int sb_index, u16 pool_index, u32 size, 527 enum devlink_sb_threshold_type threshold_type); 528 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 529 unsigned int sb_index, u16 pool_index, 530 u32 *p_threshold); 531 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, 532 unsigned int sb_index, u16 pool_index, 533 u32 threshold); 534 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, 535 unsigned int sb_index, u16 tc_index, 536 enum devlink_sb_pool_type pool_type, 537 u16 *p_pool_index, u32 *p_threshold); 538 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, 539 unsigned int sb_index, u16 tc_index, 540 enum devlink_sb_pool_type pool_type, 541 u16 pool_index, u32 threshold); 542 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, 543 unsigned int sb_index); 544 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, 545 unsigned int sb_index); 546 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, 547 unsigned int sb_index, u16 pool_index, 548 u32 *p_cur, u32 *p_max); 549 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, 550 unsigned int sb_index, u16 tc_index, 551 enum devlink_sb_pool_type pool_type, 552 u32 *p_cur, u32 *p_max); 553 554 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); 555 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); 556 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port); 557 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port); 558 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port); 559 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, 560 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, 561 u16 vid); 562 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 563 u16 vid_end, bool is_member, bool untagged); 564 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 565 bool set); 566 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 567 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); 568 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); 569 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 570 bool adding); 571 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); 572 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); 573 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, 574 struct mlxsw_sp_rif *r); 575 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 576 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 577 bool dwrr, u8 dwrr_weight); 578 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, 579 u8 switch_prio, u8 tclass); 580 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, 581 u8 *prio_tc, bool pause_en, 582 struct ieee_pfc *my_pfc); 583 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 584 enum mlxsw_reg_qeec_hr hr, u8 index, 585 u8 next_index, u32 maxrate); 586 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, 587 u16 vid_begin, u16 vid_end, 588 bool learn_enable); 589 590 #ifdef CONFIG_MLXSW_SPECTRUM_DCB 591 592 int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port); 593 void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port); 594 595 #else 596 597 static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) 598 { 599 return 0; 600 } 601 602 static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) 603 {} 604 605 #endif 606 607 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); 608 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); 609 int mlxsw_sp_router_netevent_event(struct notifier_block *unused, 610 unsigned long event, void *ptr); 611 void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 612 struct mlxsw_sp_rif *r); 613 614 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); 615 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); 616 617 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); 618 619 struct mlxsw_sp_acl_rule_info { 620 unsigned int priority; 621 struct mlxsw_afk_element_values values; 622 struct mlxsw_afa_block *act_block; 623 }; 624 625 enum mlxsw_sp_acl_profile { 626 MLXSW_SP_ACL_PROFILE_FLOWER, 627 }; 628 629 struct mlxsw_sp_acl_profile_ops { 630 size_t ruleset_priv_size; 631 int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, 632 void *priv, void *ruleset_priv); 633 void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); 634 int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, 635 struct net_device *dev, bool ingress); 636 void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); 637 size_t rule_priv_size; 638 int (*rule_add)(struct mlxsw_sp *mlxsw_sp, 639 void *ruleset_priv, void *rule_priv, 640 struct mlxsw_sp_acl_rule_info *rulei); 641 void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); 642 }; 643 644 struct mlxsw_sp_acl_ops { 645 size_t priv_size; 646 int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); 647 void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); 648 const struct mlxsw_sp_acl_profile_ops * 649 (*profile_ops)(struct mlxsw_sp *mlxsw_sp, 650 enum mlxsw_sp_acl_profile profile); 651 }; 652 653 struct mlxsw_sp_acl_ruleset; 654 655 struct mlxsw_sp_acl_ruleset * 656 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, 657 struct net_device *dev, bool ingress, 658 enum mlxsw_sp_acl_profile profile); 659 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, 660 struct mlxsw_sp_acl_ruleset *ruleset); 661 662 struct mlxsw_sp_acl_rule_info * 663 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); 664 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); 665 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); 666 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 667 unsigned int priority); 668 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 669 enum mlxsw_afk_element element, 670 u32 key_value, u32 mask_value); 671 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, 672 enum mlxsw_afk_element element, 673 const char *key_value, 674 const char *mask_value, unsigned int len); 675 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); 676 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 677 u16 group_id); 678 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); 679 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, 680 struct mlxsw_sp_acl_rule_info *rulei, 681 struct net_device *out_dev); 682 683 struct mlxsw_sp_acl_rule; 684 685 struct mlxsw_sp_acl_rule * 686 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, 687 struct mlxsw_sp_acl_ruleset *ruleset, 688 unsigned long cookie); 689 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, 690 struct mlxsw_sp_acl_rule *rule); 691 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, 692 struct mlxsw_sp_acl_rule *rule); 693 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, 694 struct mlxsw_sp_acl_rule *rule); 695 struct mlxsw_sp_acl_rule * 696 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, 697 struct mlxsw_sp_acl_ruleset *ruleset, 698 unsigned long cookie); 699 struct mlxsw_sp_acl_rule_info * 700 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); 701 702 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); 703 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); 704 705 extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; 706 707 int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 708 __be16 protocol, struct tc_cls_flower_offload *f); 709 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, 710 struct tc_cls_flower_offload *f); 711 712 #endif 713