1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <net/flow_offload.h> 8 9 #include "spectrum.h" 10 #include "spectrum_span.h" 11 #include "reg.h" 12 13 static struct mlxsw_sp_mall_entry * 14 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie) 15 { 16 struct mlxsw_sp_mall_entry *mall_entry; 17 18 list_for_each_entry(mall_entry, &block->mall.list, list) 19 if (mall_entry->cookie == cookie) 20 return mall_entry; 21 22 return NULL; 23 } 24 25 static int 26 mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, 27 struct mlxsw_sp_mall_entry *mall_entry, 28 struct netlink_ext_ack *extack) 29 { 30 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 31 struct mlxsw_sp_span_agent_parms agent_parms = {}; 32 struct mlxsw_sp_span_trigger_parms parms; 33 enum mlxsw_sp_span_trigger trigger; 34 int err; 35 36 if (!mall_entry->mirror.to_dev) { 37 NL_SET_ERR_MSG(extack, "Could not find requested device"); 38 return -EINVAL; 39 } 40 41 agent_parms.to_dev = mall_entry->mirror.to_dev; 42 err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id, 43 &agent_parms); 44 if (err) { 45 NL_SET_ERR_MSG(extack, "Failed to get SPAN agent"); 46 return err; 47 } 48 49 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, 50 mall_entry->ingress); 51 if (err) { 52 NL_SET_ERR_MSG(extack, "Failed to get analyzed port"); 53 goto err_analyzed_port_get; 54 } 55 56 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : 57 MLXSW_SP_SPAN_TRIGGER_EGRESS; 58 parms.span_id = mall_entry->mirror.span_id; 59 parms.probability_rate = 1; 60 err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port, 61 &parms); 62 if (err) { 63 NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent"); 64 goto err_agent_bind; 65 } 66 67 return 0; 68 69 err_agent_bind: 70 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); 71 err_analyzed_port_get: 72 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id); 73 return err; 74 } 75 76 static void 77 mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port, 78 struct mlxsw_sp_mall_entry *mall_entry) 79 { 80 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 81 struct mlxsw_sp_span_trigger_parms parms; 82 enum mlxsw_sp_span_trigger trigger; 83 84 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : 85 MLXSW_SP_SPAN_TRIGGER_EGRESS; 86 parms.span_id = mall_entry->mirror.span_id; 87 mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms); 88 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); 89 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id); 90 } 91 92 static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, 93 bool enable, u32 rate) 94 { 95 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 96 char mpsc_pl[MLXSW_REG_MPSC_LEN]; 97 98 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate); 99 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl); 100 } 101 102 static int 103 mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port, 104 struct mlxsw_sp_mall_entry *mall_entry, 105 struct netlink_ext_ack *extack) 106 { 107 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 108 struct mlxsw_sp_sample_trigger trigger; 109 int err; 110 111 if (mall_entry->ingress) 112 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; 113 else 114 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; 115 trigger.local_port = mlxsw_sp_port->local_port; 116 err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger, 117 &mall_entry->sample.params, 118 extack); 119 if (err) 120 return err; 121 122 err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port, 123 mall_entry, extack); 124 if (err) 125 goto err_port_sample_set; 126 return 0; 127 128 err_port_sample_set: 129 mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); 130 return err; 131 } 132 133 static void 134 mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port, 135 struct mlxsw_sp_mall_entry *mall_entry) 136 { 137 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 138 struct mlxsw_sp_sample_trigger trigger; 139 140 if (mall_entry->ingress) 141 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS; 142 else 143 trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS; 144 trigger.local_port = mlxsw_sp_port->local_port; 145 146 mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry); 147 mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger); 148 } 149 150 static int 151 mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port, 152 struct mlxsw_sp_mall_entry *mall_entry, 153 struct netlink_ext_ack *extack) 154 { 155 switch (mall_entry->type) { 156 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: 157 return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry, 158 extack); 159 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE: 160 return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry, 161 extack); 162 default: 163 WARN_ON(1); 164 return -EINVAL; 165 } 166 } 167 168 static void 169 mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port, 170 struct mlxsw_sp_mall_entry *mall_entry) 171 { 172 switch (mall_entry->type) { 173 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: 174 mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry); 175 break; 176 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE: 177 mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry); 178 break; 179 default: 180 WARN_ON(1); 181 } 182 } 183 184 static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block) 185 { 186 struct mlxsw_sp_mall_entry *mall_entry; 187 188 if (list_empty(&block->mall.list)) 189 return; 190 block->mall.min_prio = UINT_MAX; 191 block->mall.max_prio = 0; 192 list_for_each_entry(mall_entry, &block->mall.list, list) { 193 if (mall_entry->priority < block->mall.min_prio) 194 block->mall.min_prio = mall_entry->priority; 195 if (mall_entry->priority > block->mall.max_prio) 196 block->mall.max_prio = mall_entry->priority; 197 } 198 } 199 200 int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, 201 struct mlxsw_sp_flow_block *block, 202 struct tc_cls_matchall_offload *f) 203 { 204 struct mlxsw_sp_flow_block_binding *binding; 205 struct mlxsw_sp_mall_entry *mall_entry; 206 __be16 protocol = f->common.protocol; 207 struct flow_action_entry *act; 208 unsigned int flower_min_prio; 209 unsigned int flower_max_prio; 210 bool flower_prio_valid; 211 int err; 212 213 if (!flow_offload_has_one_action(&f->rule->action)) { 214 NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported"); 215 return -EOPNOTSUPP; 216 } 217 218 if (f->common.chain_index) { 219 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported"); 220 return -EOPNOTSUPP; 221 } 222 223 if (mlxsw_sp_flow_block_is_mixed_bound(block)) { 224 NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported"); 225 return -EOPNOTSUPP; 226 } 227 228 err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index, 229 &flower_min_prio, &flower_max_prio); 230 if (err) { 231 if (err != -ENOENT) { 232 NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities"); 233 return err; 234 } 235 flower_prio_valid = false; 236 /* No flower filters are installed in specified chain. */ 237 } else { 238 flower_prio_valid = true; 239 } 240 241 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL); 242 if (!mall_entry) 243 return -ENOMEM; 244 mall_entry->cookie = f->cookie; 245 mall_entry->priority = f->common.prio; 246 mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 247 248 act = &f->rule->action.entries[0]; 249 250 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { 251 if (flower_prio_valid && mall_entry->ingress && 252 mall_entry->priority >= flower_min_prio) { 253 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules"); 254 err = -EOPNOTSUPP; 255 goto errout; 256 } 257 if (flower_prio_valid && !mall_entry->ingress && 258 mall_entry->priority <= flower_max_prio) { 259 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules"); 260 err = -EOPNOTSUPP; 261 goto errout; 262 } 263 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR; 264 mall_entry->mirror.to_dev = act->dev; 265 } else if (act->id == FLOW_ACTION_SAMPLE && 266 protocol == htons(ETH_P_ALL)) { 267 if (flower_prio_valid && 268 mall_entry->priority >= flower_min_prio) { 269 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules"); 270 err = -EOPNOTSUPP; 271 goto errout; 272 } 273 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE; 274 mall_entry->sample.params.psample_group = act->sample.psample_group; 275 mall_entry->sample.params.truncate = act->sample.truncate; 276 mall_entry->sample.params.trunc_size = act->sample.trunc_size; 277 mall_entry->sample.params.rate = act->sample.rate; 278 } else { 279 err = -EOPNOTSUPP; 280 goto errout; 281 } 282 283 list_for_each_entry(binding, &block->binding_list, list) { 284 err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port, 285 mall_entry, f->common.extack); 286 if (err) 287 goto rollback; 288 } 289 290 block->rule_count++; 291 if (mall_entry->ingress) 292 block->egress_blocker_rule_count++; 293 else 294 block->ingress_blocker_rule_count++; 295 list_add_tail(&mall_entry->list, &block->mall.list); 296 mlxsw_sp_mall_prio_update(block); 297 return 0; 298 299 rollback: 300 list_for_each_entry_continue_reverse(binding, &block->binding_list, 301 list) 302 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry); 303 errout: 304 kfree(mall_entry); 305 return err; 306 } 307 308 void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block, 309 struct tc_cls_matchall_offload *f) 310 { 311 struct mlxsw_sp_flow_block_binding *binding; 312 struct mlxsw_sp_mall_entry *mall_entry; 313 314 mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie); 315 if (!mall_entry) { 316 NL_SET_ERR_MSG(f->common.extack, "Entry not found"); 317 return; 318 } 319 320 list_del(&mall_entry->list); 321 if (mall_entry->ingress) 322 block->egress_blocker_rule_count--; 323 else 324 block->ingress_blocker_rule_count--; 325 block->rule_count--; 326 list_for_each_entry(binding, &block->binding_list, list) 327 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry); 328 kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */ 329 mlxsw_sp_mall_prio_update(block); 330 } 331 332 int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block, 333 struct mlxsw_sp_port *mlxsw_sp_port, 334 struct netlink_ext_ack *extack) 335 { 336 struct mlxsw_sp_mall_entry *mall_entry; 337 int err; 338 339 list_for_each_entry(mall_entry, &block->mall.list, list) { 340 err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry, 341 extack); 342 if (err) 343 goto rollback; 344 } 345 return 0; 346 347 rollback: 348 list_for_each_entry_continue_reverse(mall_entry, &block->mall.list, 349 list) 350 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry); 351 return err; 352 } 353 354 void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block, 355 struct mlxsw_sp_port *mlxsw_sp_port) 356 { 357 struct mlxsw_sp_mall_entry *mall_entry; 358 359 list_for_each_entry(mall_entry, &block->mall.list, list) 360 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry); 361 } 362 363 int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index, 364 unsigned int *p_min_prio, unsigned int *p_max_prio) 365 { 366 if (chain_index || list_empty(&block->mall.list)) 367 /* In case there are no matchall rules, the caller 368 * receives -ENOENT to indicate there is no need 369 * to check the priorities. 370 */ 371 return -ENOENT; 372 *p_min_prio = block->mall.min_prio; 373 *p_max_prio = block->mall.max_prio; 374 return 0; 375 } 376 377 static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp, 378 struct mlxsw_sp_port *mlxsw_sp_port, 379 struct mlxsw_sp_mall_entry *mall_entry, 380 struct netlink_ext_ack *extack) 381 { 382 u32 rate = mall_entry->sample.params.rate; 383 384 if (!mall_entry->ingress) { 385 NL_SET_ERR_MSG(extack, "Sampling is not supported on egress"); 386 return -EOPNOTSUPP; 387 } 388 389 if (rate > MLXSW_REG_MPSC_RATE_MAX) { 390 NL_SET_ERR_MSG(extack, "Unsupported sampling rate"); 391 return -EOPNOTSUPP; 392 } 393 394 return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate); 395 } 396 397 static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp, 398 struct mlxsw_sp_port *mlxsw_sp_port, 399 struct mlxsw_sp_mall_entry *mall_entry) 400 { 401 mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1); 402 } 403 404 const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = { 405 .sample_add = mlxsw_sp1_mall_sample_add, 406 .sample_del = mlxsw_sp1_mall_sample_del, 407 }; 408 409 static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp, 410 struct mlxsw_sp_port *mlxsw_sp_port, 411 struct mlxsw_sp_mall_entry *mall_entry, 412 struct netlink_ext_ack *extack) 413 { 414 struct mlxsw_sp_span_trigger_parms trigger_parms = {}; 415 struct mlxsw_sp_span_agent_parms agent_parms = { 416 .to_dev = NULL, /* Mirror to CPU. */ 417 .session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING, 418 }; 419 u32 rate = mall_entry->sample.params.rate; 420 enum mlxsw_sp_span_trigger span_trigger; 421 int err; 422 423 err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id, 424 &agent_parms); 425 if (err) { 426 NL_SET_ERR_MSG(extack, "Failed to get SPAN agent"); 427 return err; 428 } 429 430 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, 431 mall_entry->ingress); 432 if (err) { 433 NL_SET_ERR_MSG(extack, "Failed to get analyzed port"); 434 goto err_analyzed_port_get; 435 } 436 437 span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : 438 MLXSW_SP_SPAN_TRIGGER_EGRESS; 439 trigger_parms.span_id = mall_entry->sample.span_id; 440 trigger_parms.probability_rate = rate; 441 err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port, 442 &trigger_parms); 443 if (err) { 444 NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent"); 445 goto err_agent_bind; 446 } 447 448 return 0; 449 450 err_agent_bind: 451 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); 452 err_analyzed_port_get: 453 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id); 454 return err; 455 } 456 457 static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp, 458 struct mlxsw_sp_port *mlxsw_sp_port, 459 struct mlxsw_sp_mall_entry *mall_entry) 460 { 461 struct mlxsw_sp_span_trigger_parms trigger_parms = {}; 462 enum mlxsw_sp_span_trigger span_trigger; 463 464 span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS : 465 MLXSW_SP_SPAN_TRIGGER_EGRESS; 466 trigger_parms.span_id = mall_entry->sample.span_id; 467 mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port, 468 &trigger_parms); 469 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress); 470 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id); 471 } 472 473 const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = { 474 .sample_add = mlxsw_sp2_mall_sample_add, 475 .sample_del = mlxsw_sp2_mall_sample_del, 476 }; 477