1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 4 */ 5 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/kernel.h> 9 #include <linux/skbuff.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/filter.h> 12 #include <linux/bpf.h> 13 14 #include <net/netlink.h> 15 #include <net/sock.h> 16 #include <net/pkt_sched.h> 17 #include <net/pkt_cls.h> 18 19 #include <linux/tc_act/tc_bpf.h> 20 #include <net/tc_act/tc_bpf.h> 21 #include <net/tc_wrapper.h> 22 23 #define ACT_BPF_NAME_LEN 256 24 25 struct tcf_bpf_cfg { 26 struct bpf_prog *filter; 27 struct sock_filter *bpf_ops; 28 const char *bpf_name; 29 u16 bpf_num_ops; 30 bool is_ebpf; 31 }; 32 33 static struct tc_action_ops act_bpf_ops; 34 35 TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb, 36 const struct tc_action *act, 37 struct tcf_result *res) 38 { 39 bool at_ingress = skb_at_tc_ingress(skb); 40 struct tcf_bpf *prog = to_bpf(act); 41 struct bpf_prog *filter; 42 int action, filter_res; 43 44 tcf_lastuse_update(&prog->tcf_tm); 45 bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb); 46 47 filter = rcu_dereference(prog->filter); 48 if (at_ingress) { 49 __skb_push(skb, skb->mac_len); 50 bpf_compute_data_pointers(skb); 51 filter_res = bpf_prog_run(filter, skb); 52 __skb_pull(skb, skb->mac_len); 53 } else { 54 bpf_compute_data_pointers(skb); 55 filter_res = bpf_prog_run(filter, skb); 56 } 57 if (unlikely(!skb->tstamp && skb->mono_delivery_time)) 58 skb->mono_delivery_time = 0; 59 if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK) 60 skb_orphan(skb); 61 62 /* A BPF program may overwrite the default action opcode. 63 * Similarly as in cls_bpf, if filter_res == -1 we use the 64 * default action specified from tc. 65 * 66 * In case a different well-known TC_ACT opcode has been 67 * returned, it will overwrite the default one. 68 * 69 * For everything else that is unknown, TC_ACT_UNSPEC is 70 * returned. 71 */ 72 switch (filter_res) { 73 case TC_ACT_PIPE: 74 case TC_ACT_RECLASSIFY: 75 case TC_ACT_OK: 76 case TC_ACT_REDIRECT: 77 action = filter_res; 78 break; 79 case TC_ACT_SHOT: 80 action = filter_res; 81 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats)); 82 break; 83 case TC_ACT_UNSPEC: 84 action = prog->tcf_action; 85 break; 86 default: 87 action = TC_ACT_UNSPEC; 88 break; 89 } 90 91 return action; 92 } 93 94 static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog) 95 { 96 return !prog->bpf_ops; 97 } 98 99 static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog, 100 struct sk_buff *skb) 101 { 102 struct nlattr *nla; 103 104 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops)) 105 return -EMSGSIZE; 106 107 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops * 108 sizeof(struct sock_filter)); 109 if (nla == NULL) 110 return -EMSGSIZE; 111 112 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); 113 114 return 0; 115 } 116 117 static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog, 118 struct sk_buff *skb) 119 { 120 struct nlattr *nla; 121 122 if (prog->bpf_name && 123 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name)) 124 return -EMSGSIZE; 125 126 if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id)) 127 return -EMSGSIZE; 128 129 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag)); 130 if (nla == NULL) 131 return -EMSGSIZE; 132 133 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); 134 135 return 0; 136 } 137 138 static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, 139 int bind, int ref) 140 { 141 unsigned char *tp = skb_tail_pointer(skb); 142 struct tcf_bpf *prog = to_bpf(act); 143 struct tc_act_bpf opt = { 144 .index = prog->tcf_index, 145 .refcnt = refcount_read(&prog->tcf_refcnt) - ref, 146 .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind, 147 }; 148 struct tcf_t tm; 149 int ret; 150 151 spin_lock_bh(&prog->tcf_lock); 152 opt.action = prog->tcf_action; 153 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt)) 154 goto nla_put_failure; 155 156 if (tcf_bpf_is_ebpf(prog)) 157 ret = tcf_bpf_dump_ebpf_info(prog, skb); 158 else 159 ret = tcf_bpf_dump_bpf_info(prog, skb); 160 if (ret) 161 goto nla_put_failure; 162 163 tcf_tm_dump(&tm, &prog->tcf_tm); 164 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm, 165 TCA_ACT_BPF_PAD)) 166 goto nla_put_failure; 167 168 spin_unlock_bh(&prog->tcf_lock); 169 return skb->len; 170 171 nla_put_failure: 172 spin_unlock_bh(&prog->tcf_lock); 173 nlmsg_trim(skb, tp); 174 return -1; 175 } 176 177 static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = { 178 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) }, 179 [TCA_ACT_BPF_FD] = { .type = NLA_U32 }, 180 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, 181 .len = ACT_BPF_NAME_LEN }, 182 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 }, 183 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY, 184 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 185 }; 186 187 static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) 188 { 189 struct sock_filter *bpf_ops; 190 struct sock_fprog_kern fprog_tmp; 191 struct bpf_prog *fp; 192 u16 bpf_size, bpf_num_ops; 193 int ret; 194 195 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]); 196 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) 197 return -EINVAL; 198 199 bpf_size = bpf_num_ops * sizeof(*bpf_ops); 200 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS])) 201 return -EINVAL; 202 203 bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL); 204 if (bpf_ops == NULL) 205 return -ENOMEM; 206 207 fprog_tmp.len = bpf_num_ops; 208 fprog_tmp.filter = bpf_ops; 209 210 ret = bpf_prog_create(&fp, &fprog_tmp); 211 if (ret < 0) { 212 kfree(bpf_ops); 213 return ret; 214 } 215 216 cfg->bpf_ops = bpf_ops; 217 cfg->bpf_num_ops = bpf_num_ops; 218 cfg->filter = fp; 219 cfg->is_ebpf = false; 220 221 return 0; 222 } 223 224 static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) 225 { 226 struct bpf_prog *fp; 227 char *name = NULL; 228 u32 bpf_fd; 229 230 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); 231 232 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT); 233 if (IS_ERR(fp)) 234 return PTR_ERR(fp); 235 236 if (tb[TCA_ACT_BPF_NAME]) { 237 name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL); 238 if (!name) { 239 bpf_prog_put(fp); 240 return -ENOMEM; 241 } 242 } 243 244 cfg->bpf_name = name; 245 cfg->filter = fp; 246 cfg->is_ebpf = true; 247 248 return 0; 249 } 250 251 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) 252 { 253 struct bpf_prog *filter = cfg->filter; 254 255 if (filter) { 256 if (cfg->is_ebpf) 257 bpf_prog_put(filter); 258 else 259 bpf_prog_destroy(filter); 260 } 261 262 kfree(cfg->bpf_ops); 263 kfree(cfg->bpf_name); 264 } 265 266 static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, 267 struct tcf_bpf_cfg *cfg) 268 { 269 cfg->is_ebpf = tcf_bpf_is_ebpf(prog); 270 /* updates to prog->filter are prevented, since it's called either 271 * with tcf lock or during final cleanup in rcu callback 272 */ 273 cfg->filter = rcu_dereference_protected(prog->filter, 1); 274 275 cfg->bpf_ops = prog->bpf_ops; 276 cfg->bpf_name = prog->bpf_name; 277 } 278 279 static int tcf_bpf_init(struct net *net, struct nlattr *nla, 280 struct nlattr *est, struct tc_action **act, 281 struct tcf_proto *tp, u32 flags, 282 struct netlink_ext_ack *extack) 283 { 284 struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id); 285 bool bind = flags & TCA_ACT_FLAGS_BIND; 286 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 287 struct tcf_chain *goto_ch = NULL; 288 struct tcf_bpf_cfg cfg, old; 289 struct tc_act_bpf *parm; 290 struct tcf_bpf *prog; 291 bool is_bpf, is_ebpf; 292 int ret, res = 0; 293 u32 index; 294 295 if (!nla) 296 return -EINVAL; 297 298 ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla, 299 act_bpf_policy, NULL); 300 if (ret < 0) 301 return ret; 302 303 if (!tb[TCA_ACT_BPF_PARMS]) 304 return -EINVAL; 305 306 parm = nla_data(tb[TCA_ACT_BPF_PARMS]); 307 index = parm->index; 308 ret = tcf_idr_check_alloc(tn, &index, act, bind); 309 if (!ret) { 310 ret = tcf_idr_create(tn, index, est, act, 311 &act_bpf_ops, bind, true, flags); 312 if (ret < 0) { 313 tcf_idr_cleanup(tn, index); 314 return ret; 315 } 316 317 res = ACT_P_CREATED; 318 } else if (ret > 0) { 319 /* Don't override defaults. */ 320 if (bind) 321 return 0; 322 323 if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 324 tcf_idr_release(*act, bind); 325 return -EEXIST; 326 } 327 } else { 328 return ret; 329 } 330 331 ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 332 if (ret < 0) 333 goto release_idr; 334 335 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; 336 is_ebpf = tb[TCA_ACT_BPF_FD]; 337 338 if (is_bpf == is_ebpf) { 339 ret = -EINVAL; 340 goto put_chain; 341 } 342 343 memset(&cfg, 0, sizeof(cfg)); 344 345 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : 346 tcf_bpf_init_from_efd(tb, &cfg); 347 if (ret < 0) 348 goto put_chain; 349 350 prog = to_bpf(*act); 351 352 spin_lock_bh(&prog->tcf_lock); 353 if (res != ACT_P_CREATED) 354 tcf_bpf_prog_fill_cfg(prog, &old); 355 356 prog->bpf_ops = cfg.bpf_ops; 357 prog->bpf_name = cfg.bpf_name; 358 359 if (cfg.bpf_num_ops) 360 prog->bpf_num_ops = cfg.bpf_num_ops; 361 362 goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch); 363 rcu_assign_pointer(prog->filter, cfg.filter); 364 spin_unlock_bh(&prog->tcf_lock); 365 366 if (goto_ch) 367 tcf_chain_put_by_act(goto_ch); 368 369 if (res != ACT_P_CREATED) { 370 /* make sure the program being replaced is no longer executing */ 371 synchronize_rcu(); 372 tcf_bpf_cfg_cleanup(&old); 373 } 374 375 return res; 376 377 put_chain: 378 if (goto_ch) 379 tcf_chain_put_by_act(goto_ch); 380 381 release_idr: 382 tcf_idr_release(*act, bind); 383 return ret; 384 } 385 386 static void tcf_bpf_cleanup(struct tc_action *act) 387 { 388 struct tcf_bpf_cfg tmp; 389 390 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp); 391 tcf_bpf_cfg_cleanup(&tmp); 392 } 393 394 static struct tc_action_ops act_bpf_ops __read_mostly = { 395 .kind = "bpf", 396 .id = TCA_ID_BPF, 397 .owner = THIS_MODULE, 398 .act = tcf_bpf_act, 399 .dump = tcf_bpf_dump, 400 .cleanup = tcf_bpf_cleanup, 401 .init = tcf_bpf_init, 402 .size = sizeof(struct tcf_bpf), 403 }; 404 405 static __net_init int bpf_init_net(struct net *net) 406 { 407 struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id); 408 409 return tc_action_net_init(net, tn, &act_bpf_ops); 410 } 411 412 static void __net_exit bpf_exit_net(struct list_head *net_list) 413 { 414 tc_action_net_exit(net_list, act_bpf_ops.net_id); 415 } 416 417 static struct pernet_operations bpf_net_ops = { 418 .init = bpf_init_net, 419 .exit_batch = bpf_exit_net, 420 .id = &act_bpf_ops.net_id, 421 .size = sizeof(struct tc_action_net), 422 }; 423 424 static int __init bpf_init_module(void) 425 { 426 return tcf_register_action(&act_bpf_ops, &bpf_net_ops); 427 } 428 429 static void __exit bpf_cleanup_module(void) 430 { 431 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops); 432 } 433 434 module_init(bpf_init_module); 435 module_exit(bpf_cleanup_module); 436 437 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 438 MODULE_DESCRIPTION("TC BPF based action"); 439 MODULE_LICENSE("GPL v2"); 440