1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/kernel.h> 3 #include <linux/slab.h> 4 #include <net/flow_offload.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/mutex.h> 7 8 struct flow_rule *flow_rule_alloc(unsigned int num_actions) 9 { 10 struct flow_rule *rule; 11 int i; 12 13 rule = kzalloc(struct_size(rule, action.entries, num_actions), 14 GFP_KERNEL); 15 if (!rule) 16 return NULL; 17 18 rule->action.num_entries = num_actions; 19 /* Pre-fill each action hw_stats with DONT_CARE. 20 * Caller can override this if it wants stats for a given action. 21 */ 22 for (i = 0; i < num_actions; i++) 23 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; 24 25 return rule; 26 } 27 EXPORT_SYMBOL(flow_rule_alloc); 28 29 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ 30 const struct flow_match *__m = &(__rule)->match; \ 31 struct flow_dissector *__d = (__m)->dissector; \ 32 \ 33 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ 34 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ 35 36 void flow_rule_match_meta(const struct flow_rule *rule, 37 struct flow_match_meta *out) 38 { 39 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); 40 } 41 EXPORT_SYMBOL(flow_rule_match_meta); 42 43 void flow_rule_match_basic(const struct flow_rule *rule, 44 struct flow_match_basic *out) 45 { 46 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); 47 } 48 EXPORT_SYMBOL(flow_rule_match_basic); 49 50 void flow_rule_match_control(const struct flow_rule *rule, 51 struct flow_match_control *out) 52 { 53 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); 54 } 55 EXPORT_SYMBOL(flow_rule_match_control); 56 57 void flow_rule_match_eth_addrs(const struct flow_rule *rule, 58 struct flow_match_eth_addrs *out) 59 { 60 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); 61 } 62 EXPORT_SYMBOL(flow_rule_match_eth_addrs); 63 64 void flow_rule_match_vlan(const struct flow_rule *rule, 65 struct flow_match_vlan *out) 66 { 67 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); 68 } 69 EXPORT_SYMBOL(flow_rule_match_vlan); 70 71 void flow_rule_match_cvlan(const struct flow_rule *rule, 72 struct flow_match_vlan *out) 73 { 74 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); 75 } 76 EXPORT_SYMBOL(flow_rule_match_cvlan); 77 78 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 79 struct flow_match_ipv4_addrs *out) 80 { 81 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); 82 } 83 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs); 84 85 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, 86 struct flow_match_ipv6_addrs *out) 87 { 88 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); 89 } 90 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs); 91 92 void flow_rule_match_ip(const struct flow_rule *rule, 93 struct flow_match_ip *out) 94 { 95 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); 96 } 97 EXPORT_SYMBOL(flow_rule_match_ip); 98 99 void flow_rule_match_ports(const struct flow_rule *rule, 100 struct flow_match_ports *out) 101 { 102 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); 103 } 104 EXPORT_SYMBOL(flow_rule_match_ports); 105 106 void flow_rule_match_tcp(const struct flow_rule *rule, 107 struct flow_match_tcp *out) 108 { 109 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); 110 } 111 EXPORT_SYMBOL(flow_rule_match_tcp); 112 113 void flow_rule_match_icmp(const struct flow_rule *rule, 114 struct flow_match_icmp *out) 115 { 116 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); 117 } 118 EXPORT_SYMBOL(flow_rule_match_icmp); 119 120 void flow_rule_match_mpls(const struct flow_rule *rule, 121 struct flow_match_mpls *out) 122 { 123 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out); 124 } 125 EXPORT_SYMBOL(flow_rule_match_mpls); 126 127 void flow_rule_match_enc_control(const struct flow_rule *rule, 128 struct flow_match_control *out) 129 { 130 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); 131 } 132 EXPORT_SYMBOL(flow_rule_match_enc_control); 133 134 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, 135 struct flow_match_ipv4_addrs *out) 136 { 137 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); 138 } 139 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs); 140 141 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, 142 struct flow_match_ipv6_addrs *out) 143 { 144 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); 145 } 146 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs); 147 148 void flow_rule_match_enc_ip(const struct flow_rule *rule, 149 struct flow_match_ip *out) 150 { 151 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); 152 } 153 EXPORT_SYMBOL(flow_rule_match_enc_ip); 154 155 void flow_rule_match_enc_ports(const struct flow_rule *rule, 156 struct flow_match_ports *out) 157 { 158 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); 159 } 160 EXPORT_SYMBOL(flow_rule_match_enc_ports); 161 162 void flow_rule_match_enc_keyid(const struct flow_rule *rule, 163 struct flow_match_enc_keyid *out) 164 { 165 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); 166 } 167 EXPORT_SYMBOL(flow_rule_match_enc_keyid); 168 169 void flow_rule_match_enc_opts(const struct flow_rule *rule, 170 struct flow_match_enc_opts *out) 171 { 172 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out); 173 } 174 EXPORT_SYMBOL(flow_rule_match_enc_opts); 175 176 struct flow_action_cookie *flow_action_cookie_create(void *data, 177 unsigned int len, 178 gfp_t gfp) 179 { 180 struct flow_action_cookie *cookie; 181 182 cookie = kmalloc(sizeof(*cookie) + len, gfp); 183 if (!cookie) 184 return NULL; 185 cookie->cookie_len = len; 186 memcpy(cookie->cookie, data, len); 187 return cookie; 188 } 189 EXPORT_SYMBOL(flow_action_cookie_create); 190 191 void flow_action_cookie_destroy(struct flow_action_cookie *cookie) 192 { 193 kfree(cookie); 194 } 195 EXPORT_SYMBOL(flow_action_cookie_destroy); 196 197 void flow_rule_match_ct(const struct flow_rule *rule, 198 struct flow_match_ct *out) 199 { 200 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out); 201 } 202 EXPORT_SYMBOL(flow_rule_match_ct); 203 204 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 205 void *cb_ident, void *cb_priv, 206 void (*release)(void *cb_priv)) 207 { 208 struct flow_block_cb *block_cb; 209 210 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); 211 if (!block_cb) 212 return ERR_PTR(-ENOMEM); 213 214 block_cb->cb = cb; 215 block_cb->cb_ident = cb_ident; 216 block_cb->cb_priv = cb_priv; 217 block_cb->release = release; 218 219 return block_cb; 220 } 221 EXPORT_SYMBOL(flow_block_cb_alloc); 222 223 void flow_block_cb_free(struct flow_block_cb *block_cb) 224 { 225 if (block_cb->release) 226 block_cb->release(block_cb->cb_priv); 227 228 kfree(block_cb); 229 } 230 EXPORT_SYMBOL(flow_block_cb_free); 231 232 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 233 flow_setup_cb_t *cb, void *cb_ident) 234 { 235 struct flow_block_cb *block_cb; 236 237 list_for_each_entry(block_cb, &block->cb_list, list) { 238 if (block_cb->cb == cb && 239 block_cb->cb_ident == cb_ident) 240 return block_cb; 241 } 242 243 return NULL; 244 } 245 EXPORT_SYMBOL(flow_block_cb_lookup); 246 247 void *flow_block_cb_priv(struct flow_block_cb *block_cb) 248 { 249 return block_cb->cb_priv; 250 } 251 EXPORT_SYMBOL(flow_block_cb_priv); 252 253 void flow_block_cb_incref(struct flow_block_cb *block_cb) 254 { 255 block_cb->refcnt++; 256 } 257 EXPORT_SYMBOL(flow_block_cb_incref); 258 259 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) 260 { 261 return --block_cb->refcnt; 262 } 263 EXPORT_SYMBOL(flow_block_cb_decref); 264 265 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 266 struct list_head *driver_block_list) 267 { 268 struct flow_block_cb *block_cb; 269 270 list_for_each_entry(block_cb, driver_block_list, driver_list) { 271 if (block_cb->cb == cb && 272 block_cb->cb_ident == cb_ident) 273 return true; 274 } 275 276 return false; 277 } 278 EXPORT_SYMBOL(flow_block_cb_is_busy); 279 280 int flow_block_cb_setup_simple(struct flow_block_offload *f, 281 struct list_head *driver_block_list, 282 flow_setup_cb_t *cb, 283 void *cb_ident, void *cb_priv, 284 bool ingress_only) 285 { 286 struct flow_block_cb *block_cb; 287 288 if (ingress_only && 289 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 290 return -EOPNOTSUPP; 291 292 f->driver_block_list = driver_block_list; 293 294 switch (f->command) { 295 case FLOW_BLOCK_BIND: 296 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) 297 return -EBUSY; 298 299 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); 300 if (IS_ERR(block_cb)) 301 return PTR_ERR(block_cb); 302 303 flow_block_cb_add(block_cb, f); 304 list_add_tail(&block_cb->driver_list, driver_block_list); 305 return 0; 306 case FLOW_BLOCK_UNBIND: 307 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); 308 if (!block_cb) 309 return -ENOENT; 310 311 flow_block_cb_remove(block_cb, f); 312 list_del(&block_cb->driver_list); 313 return 0; 314 default: 315 return -EOPNOTSUPP; 316 } 317 } 318 EXPORT_SYMBOL(flow_block_cb_setup_simple); 319 320 static DEFINE_MUTEX(flow_indr_block_lock); 321 static LIST_HEAD(flow_block_indr_list); 322 static LIST_HEAD(flow_block_indr_dev_list); 323 324 struct flow_indr_dev { 325 struct list_head list; 326 flow_indr_block_bind_cb_t *cb; 327 void *cb_priv; 328 refcount_t refcnt; 329 struct rcu_head rcu; 330 }; 331 332 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb, 333 void *cb_priv) 334 { 335 struct flow_indr_dev *indr_dev; 336 337 indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL); 338 if (!indr_dev) 339 return NULL; 340 341 indr_dev->cb = cb; 342 indr_dev->cb_priv = cb_priv; 343 refcount_set(&indr_dev->refcnt, 1); 344 345 return indr_dev; 346 } 347 348 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) 349 { 350 struct flow_indr_dev *indr_dev; 351 352 mutex_lock(&flow_indr_block_lock); 353 list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) { 354 if (indr_dev->cb == cb && 355 indr_dev->cb_priv == cb_priv) { 356 refcount_inc(&indr_dev->refcnt); 357 mutex_unlock(&flow_indr_block_lock); 358 return 0; 359 } 360 } 361 362 indr_dev = flow_indr_dev_alloc(cb, cb_priv); 363 if (!indr_dev) { 364 mutex_unlock(&flow_indr_block_lock); 365 return -ENOMEM; 366 } 367 368 list_add(&indr_dev->list, &flow_block_indr_dev_list); 369 mutex_unlock(&flow_indr_block_lock); 370 371 return 0; 372 } 373 EXPORT_SYMBOL(flow_indr_dev_register); 374 375 static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv, 376 struct list_head *cleanup_list) 377 { 378 struct flow_block_cb *this, *next; 379 380 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { 381 if (this->cb == setup_cb && 382 this->cb_priv == cb_priv) { 383 list_move(&this->indr.list, cleanup_list); 384 return; 385 } 386 } 387 } 388 389 static void flow_block_indr_notify(struct list_head *cleanup_list) 390 { 391 struct flow_block_cb *this, *next; 392 393 list_for_each_entry_safe(this, next, cleanup_list, indr.list) { 394 list_del(&this->indr.list); 395 this->indr.cleanup(this); 396 } 397 } 398 399 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, 400 flow_setup_cb_t *setup_cb) 401 { 402 struct flow_indr_dev *this, *next, *indr_dev = NULL; 403 LIST_HEAD(cleanup_list); 404 405 mutex_lock(&flow_indr_block_lock); 406 list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) { 407 if (this->cb == cb && 408 this->cb_priv == cb_priv && 409 refcount_dec_and_test(&this->refcnt)) { 410 indr_dev = this; 411 list_del(&indr_dev->list); 412 break; 413 } 414 } 415 416 if (!indr_dev) { 417 mutex_unlock(&flow_indr_block_lock); 418 return; 419 } 420 421 __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list); 422 mutex_unlock(&flow_indr_block_lock); 423 424 flow_block_indr_notify(&cleanup_list); 425 kfree(indr_dev); 426 } 427 EXPORT_SYMBOL(flow_indr_dev_unregister); 428 429 static void flow_block_indr_init(struct flow_block_cb *flow_block, 430 struct flow_block_offload *bo, 431 struct net_device *dev, void *data, 432 void (*cleanup)(struct flow_block_cb *block_cb)) 433 { 434 flow_block->indr.binder_type = bo->binder_type; 435 flow_block->indr.data = data; 436 flow_block->indr.dev = dev; 437 flow_block->indr.cleanup = cleanup; 438 } 439 440 static void __flow_block_indr_binding(struct flow_block_offload *bo, 441 struct net_device *dev, void *data, 442 void (*cleanup)(struct flow_block_cb *block_cb)) 443 { 444 struct flow_block_cb *block_cb; 445 446 list_for_each_entry(block_cb, &bo->cb_list, list) { 447 switch (bo->command) { 448 case FLOW_BLOCK_BIND: 449 flow_block_indr_init(block_cb, bo, dev, data, cleanup); 450 list_add(&block_cb->indr.list, &flow_block_indr_list); 451 break; 452 case FLOW_BLOCK_UNBIND: 453 list_del(&block_cb->indr.list); 454 break; 455 } 456 } 457 } 458 459 int flow_indr_dev_setup_offload(struct net_device *dev, 460 enum tc_setup_type type, void *data, 461 struct flow_block_offload *bo, 462 void (*cleanup)(struct flow_block_cb *block_cb)) 463 { 464 struct flow_indr_dev *this; 465 466 mutex_lock(&flow_indr_block_lock); 467 list_for_each_entry(this, &flow_block_indr_dev_list, list) 468 this->cb(dev, this->cb_priv, type, bo); 469 470 __flow_block_indr_binding(bo, dev, data, cleanup); 471 mutex_unlock(&flow_indr_block_lock); 472 473 return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0; 474 } 475 EXPORT_SYMBOL(flow_indr_dev_setup_offload); 476