1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/kernel.h> 3 #include <linux/slab.h> 4 #include <net/flow_offload.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/mutex.h> 7 8 struct flow_rule *flow_rule_alloc(unsigned int num_actions) 9 { 10 struct flow_rule *rule; 11 12 rule = kzalloc(struct_size(rule, action.entries, num_actions), 13 GFP_KERNEL); 14 if (!rule) 15 return NULL; 16 17 rule->action.num_entries = num_actions; 18 19 return rule; 20 } 21 EXPORT_SYMBOL(flow_rule_alloc); 22 23 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ 24 const struct flow_match *__m = &(__rule)->match; \ 25 struct flow_dissector *__d = (__m)->dissector; \ 26 \ 27 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ 28 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ 29 30 void flow_rule_match_meta(const struct flow_rule *rule, 31 struct flow_match_meta *out) 32 { 33 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); 34 } 35 EXPORT_SYMBOL(flow_rule_match_meta); 36 37 void flow_rule_match_basic(const struct flow_rule *rule, 38 struct flow_match_basic *out) 39 { 40 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); 41 } 42 EXPORT_SYMBOL(flow_rule_match_basic); 43 44 void flow_rule_match_control(const struct flow_rule *rule, 45 struct flow_match_control *out) 46 { 47 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); 48 } 49 EXPORT_SYMBOL(flow_rule_match_control); 50 51 void flow_rule_match_eth_addrs(const struct flow_rule *rule, 52 struct flow_match_eth_addrs *out) 53 { 54 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); 55 } 56 EXPORT_SYMBOL(flow_rule_match_eth_addrs); 57 58 void flow_rule_match_vlan(const struct flow_rule *rule, 59 struct flow_match_vlan *out) 60 { 61 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); 62 } 63 EXPORT_SYMBOL(flow_rule_match_vlan); 64 65 void flow_rule_match_cvlan(const struct flow_rule *rule, 66 struct flow_match_vlan *out) 67 { 68 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); 69 } 70 EXPORT_SYMBOL(flow_rule_match_cvlan); 71 72 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 73 struct flow_match_ipv4_addrs *out) 74 { 75 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); 76 } 77 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs); 78 79 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, 80 struct flow_match_ipv6_addrs *out) 81 { 82 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); 83 } 84 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs); 85 86 void flow_rule_match_ip(const struct flow_rule *rule, 87 struct flow_match_ip *out) 88 { 89 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); 90 } 91 EXPORT_SYMBOL(flow_rule_match_ip); 92 93 void flow_rule_match_ports(const struct flow_rule *rule, 94 struct flow_match_ports *out) 95 { 96 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); 97 } 98 EXPORT_SYMBOL(flow_rule_match_ports); 99 100 void flow_rule_match_tcp(const struct flow_rule *rule, 101 struct flow_match_tcp *out) 102 { 103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); 104 } 105 EXPORT_SYMBOL(flow_rule_match_tcp); 106 107 void flow_rule_match_icmp(const struct flow_rule *rule, 108 struct flow_match_icmp *out) 109 { 110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); 111 } 112 EXPORT_SYMBOL(flow_rule_match_icmp); 113 114 void flow_rule_match_mpls(const struct flow_rule *rule, 115 struct flow_match_mpls *out) 116 { 117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out); 118 } 119 EXPORT_SYMBOL(flow_rule_match_mpls); 120 121 void flow_rule_match_enc_control(const struct flow_rule *rule, 122 struct flow_match_control *out) 123 { 124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); 125 } 126 EXPORT_SYMBOL(flow_rule_match_enc_control); 127 128 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, 129 struct flow_match_ipv4_addrs *out) 130 { 131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); 132 } 133 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs); 134 135 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, 136 struct flow_match_ipv6_addrs *out) 137 { 138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); 139 } 140 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs); 141 142 void flow_rule_match_enc_ip(const struct flow_rule *rule, 143 struct flow_match_ip *out) 144 { 145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); 146 } 147 EXPORT_SYMBOL(flow_rule_match_enc_ip); 148 149 void flow_rule_match_enc_ports(const struct flow_rule *rule, 150 struct flow_match_ports *out) 151 { 152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); 153 } 154 EXPORT_SYMBOL(flow_rule_match_enc_ports); 155 156 void flow_rule_match_enc_keyid(const struct flow_rule *rule, 157 struct flow_match_enc_keyid *out) 158 { 159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); 160 } 161 EXPORT_SYMBOL(flow_rule_match_enc_keyid); 162 163 void flow_rule_match_enc_opts(const struct flow_rule *rule, 164 struct flow_match_enc_opts *out) 165 { 166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out); 167 } 168 EXPORT_SYMBOL(flow_rule_match_enc_opts); 169 170 struct flow_action_cookie *flow_action_cookie_create(void *data, 171 unsigned int len, 172 gfp_t gfp) 173 { 174 struct flow_action_cookie *cookie; 175 176 cookie = kmalloc(sizeof(*cookie) + len, gfp); 177 if (!cookie) 178 return NULL; 179 cookie->cookie_len = len; 180 memcpy(cookie->cookie, data, len); 181 return cookie; 182 } 183 EXPORT_SYMBOL(flow_action_cookie_create); 184 185 void flow_action_cookie_destroy(struct flow_action_cookie *cookie) 186 { 187 kfree(cookie); 188 } 189 EXPORT_SYMBOL(flow_action_cookie_destroy); 190 191 void flow_rule_match_ct(const struct flow_rule *rule, 192 struct flow_match_ct *out) 193 { 194 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out); 195 } 196 EXPORT_SYMBOL(flow_rule_match_ct); 197 198 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 199 void *cb_ident, void *cb_priv, 200 void (*release)(void *cb_priv)) 201 { 202 struct flow_block_cb *block_cb; 203 204 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); 205 if (!block_cb) 206 return ERR_PTR(-ENOMEM); 207 208 block_cb->cb = cb; 209 block_cb->cb_ident = cb_ident; 210 block_cb->cb_priv = cb_priv; 211 block_cb->release = release; 212 213 return block_cb; 214 } 215 EXPORT_SYMBOL(flow_block_cb_alloc); 216 217 void flow_block_cb_free(struct flow_block_cb *block_cb) 218 { 219 if (block_cb->release) 220 block_cb->release(block_cb->cb_priv); 221 222 kfree(block_cb); 223 } 224 EXPORT_SYMBOL(flow_block_cb_free); 225 226 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 227 flow_setup_cb_t *cb, void *cb_ident) 228 { 229 struct flow_block_cb *block_cb; 230 231 list_for_each_entry(block_cb, &block->cb_list, list) { 232 if (block_cb->cb == cb && 233 block_cb->cb_ident == cb_ident) 234 return block_cb; 235 } 236 237 return NULL; 238 } 239 EXPORT_SYMBOL(flow_block_cb_lookup); 240 241 void *flow_block_cb_priv(struct flow_block_cb *block_cb) 242 { 243 return block_cb->cb_priv; 244 } 245 EXPORT_SYMBOL(flow_block_cb_priv); 246 247 void flow_block_cb_incref(struct flow_block_cb *block_cb) 248 { 249 block_cb->refcnt++; 250 } 251 EXPORT_SYMBOL(flow_block_cb_incref); 252 253 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) 254 { 255 return --block_cb->refcnt; 256 } 257 EXPORT_SYMBOL(flow_block_cb_decref); 258 259 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 260 struct list_head *driver_block_list) 261 { 262 struct flow_block_cb *block_cb; 263 264 list_for_each_entry(block_cb, driver_block_list, driver_list) { 265 if (block_cb->cb == cb && 266 block_cb->cb_ident == cb_ident) 267 return true; 268 } 269 270 return false; 271 } 272 EXPORT_SYMBOL(flow_block_cb_is_busy); 273 274 int flow_block_cb_setup_simple(struct flow_block_offload *f, 275 struct list_head *driver_block_list, 276 flow_setup_cb_t *cb, 277 void *cb_ident, void *cb_priv, 278 bool ingress_only) 279 { 280 struct flow_block_cb *block_cb; 281 282 if (ingress_only && 283 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 284 return -EOPNOTSUPP; 285 286 f->driver_block_list = driver_block_list; 287 288 switch (f->command) { 289 case FLOW_BLOCK_BIND: 290 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) 291 return -EBUSY; 292 293 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); 294 if (IS_ERR(block_cb)) 295 return PTR_ERR(block_cb); 296 297 flow_block_cb_add(block_cb, f); 298 list_add_tail(&block_cb->driver_list, driver_block_list); 299 return 0; 300 case FLOW_BLOCK_UNBIND: 301 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); 302 if (!block_cb) 303 return -ENOENT; 304 305 flow_block_cb_remove(block_cb, f); 306 list_del(&block_cb->driver_list); 307 return 0; 308 default: 309 return -EOPNOTSUPP; 310 } 311 } 312 EXPORT_SYMBOL(flow_block_cb_setup_simple); 313 314 static LIST_HEAD(block_cb_list); 315 316 static struct rhashtable indr_setup_block_ht; 317 318 struct flow_indr_block_cb { 319 struct list_head list; 320 void *cb_priv; 321 flow_indr_block_bind_cb_t *cb; 322 void *cb_ident; 323 }; 324 325 struct flow_indr_block_dev { 326 struct rhash_head ht_node; 327 struct net_device *dev; 328 unsigned int refcnt; 329 struct list_head cb_list; 330 }; 331 332 static const struct rhashtable_params flow_indr_setup_block_ht_params = { 333 .key_offset = offsetof(struct flow_indr_block_dev, dev), 334 .head_offset = offsetof(struct flow_indr_block_dev, ht_node), 335 .key_len = sizeof(struct net_device *), 336 }; 337 338 static struct flow_indr_block_dev * 339 flow_indr_block_dev_lookup(struct net_device *dev) 340 { 341 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev, 342 flow_indr_setup_block_ht_params); 343 } 344 345 static struct flow_indr_block_dev * 346 flow_indr_block_dev_get(struct net_device *dev) 347 { 348 struct flow_indr_block_dev *indr_dev; 349 350 indr_dev = flow_indr_block_dev_lookup(dev); 351 if (indr_dev) 352 goto inc_ref; 353 354 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL); 355 if (!indr_dev) 356 return NULL; 357 358 INIT_LIST_HEAD(&indr_dev->cb_list); 359 indr_dev->dev = dev; 360 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node, 361 flow_indr_setup_block_ht_params)) { 362 kfree(indr_dev); 363 return NULL; 364 } 365 366 inc_ref: 367 indr_dev->refcnt++; 368 return indr_dev; 369 } 370 371 static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev) 372 { 373 if (--indr_dev->refcnt) 374 return; 375 376 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node, 377 flow_indr_setup_block_ht_params); 378 kfree(indr_dev); 379 } 380 381 static struct flow_indr_block_cb * 382 flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev, 383 flow_indr_block_bind_cb_t *cb, void *cb_ident) 384 { 385 struct flow_indr_block_cb *indr_block_cb; 386 387 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) 388 if (indr_block_cb->cb == cb && 389 indr_block_cb->cb_ident == cb_ident) 390 return indr_block_cb; 391 return NULL; 392 } 393 394 static struct flow_indr_block_cb * 395 flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv, 396 flow_indr_block_bind_cb_t *cb, void *cb_ident) 397 { 398 struct flow_indr_block_cb *indr_block_cb; 399 400 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident); 401 if (indr_block_cb) 402 return ERR_PTR(-EEXIST); 403 404 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL); 405 if (!indr_block_cb) 406 return ERR_PTR(-ENOMEM); 407 408 indr_block_cb->cb_priv = cb_priv; 409 indr_block_cb->cb = cb; 410 indr_block_cb->cb_ident = cb_ident; 411 list_add(&indr_block_cb->list, &indr_dev->cb_list); 412 413 return indr_block_cb; 414 } 415 416 static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb) 417 { 418 list_del(&indr_block_cb->list); 419 kfree(indr_block_cb); 420 } 421 422 static DEFINE_MUTEX(flow_indr_block_cb_lock); 423 424 static void flow_block_cmd(struct net_device *dev, 425 flow_indr_block_bind_cb_t *cb, void *cb_priv, 426 enum flow_block_command command) 427 { 428 struct flow_indr_block_entry *entry; 429 430 mutex_lock(&flow_indr_block_cb_lock); 431 list_for_each_entry(entry, &block_cb_list, list) { 432 entry->cb(dev, cb, cb_priv, command); 433 } 434 mutex_unlock(&flow_indr_block_cb_lock); 435 } 436 437 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, 438 flow_indr_block_bind_cb_t *cb, 439 void *cb_ident) 440 { 441 struct flow_indr_block_cb *indr_block_cb; 442 struct flow_indr_block_dev *indr_dev; 443 int err; 444 445 indr_dev = flow_indr_block_dev_get(dev); 446 if (!indr_dev) 447 return -ENOMEM; 448 449 indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident); 450 err = PTR_ERR_OR_ZERO(indr_block_cb); 451 if (err) 452 goto err_dev_put; 453 454 flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 455 FLOW_BLOCK_BIND); 456 457 return 0; 458 459 err_dev_put: 460 flow_indr_block_dev_put(indr_dev); 461 return err; 462 } 463 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register); 464 465 int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, 466 flow_indr_block_bind_cb_t *cb, 467 void *cb_ident) 468 { 469 int err; 470 471 rtnl_lock(); 472 err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident); 473 rtnl_unlock(); 474 475 return err; 476 } 477 EXPORT_SYMBOL_GPL(flow_indr_block_cb_register); 478 479 void __flow_indr_block_cb_unregister(struct net_device *dev, 480 flow_indr_block_bind_cb_t *cb, 481 void *cb_ident) 482 { 483 struct flow_indr_block_cb *indr_block_cb; 484 struct flow_indr_block_dev *indr_dev; 485 486 indr_dev = flow_indr_block_dev_lookup(dev); 487 if (!indr_dev) 488 return; 489 490 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident); 491 if (!indr_block_cb) 492 return; 493 494 flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 495 FLOW_BLOCK_UNBIND); 496 497 flow_indr_block_cb_del(indr_block_cb); 498 flow_indr_block_dev_put(indr_dev); 499 } 500 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister); 501 502 void flow_indr_block_cb_unregister(struct net_device *dev, 503 flow_indr_block_bind_cb_t *cb, 504 void *cb_ident) 505 { 506 rtnl_lock(); 507 __flow_indr_block_cb_unregister(dev, cb, cb_ident); 508 rtnl_unlock(); 509 } 510 EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister); 511 512 void flow_indr_block_call(struct net_device *dev, 513 struct flow_block_offload *bo, 514 enum flow_block_command command, 515 enum tc_setup_type type) 516 { 517 struct flow_indr_block_cb *indr_block_cb; 518 struct flow_indr_block_dev *indr_dev; 519 520 indr_dev = flow_indr_block_dev_lookup(dev); 521 if (!indr_dev) 522 return; 523 524 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) 525 indr_block_cb->cb(dev, indr_block_cb->cb_priv, type, bo); 526 } 527 EXPORT_SYMBOL_GPL(flow_indr_block_call); 528 529 void flow_indr_add_block_cb(struct flow_indr_block_entry *entry) 530 { 531 mutex_lock(&flow_indr_block_cb_lock); 532 list_add_tail(&entry->list, &block_cb_list); 533 mutex_unlock(&flow_indr_block_cb_lock); 534 } 535 EXPORT_SYMBOL_GPL(flow_indr_add_block_cb); 536 537 void flow_indr_del_block_cb(struct flow_indr_block_entry *entry) 538 { 539 mutex_lock(&flow_indr_block_cb_lock); 540 list_del(&entry->list); 541 mutex_unlock(&flow_indr_block_cb_lock); 542 } 543 EXPORT_SYMBOL_GPL(flow_indr_del_block_cb); 544 545 static int __init init_flow_indr_rhashtable(void) 546 { 547 return rhashtable_init(&indr_setup_block_ht, 548 &flow_indr_setup_block_ht_params); 549 } 550 subsys_initcall(init_flow_indr_rhashtable); 551