1 /* 2 * net/sched/cls_matchll.c Match-all classifier 3 * 4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/percpu.h> 16 17 #include <net/sch_generic.h> 18 #include <net/pkt_cls.h> 19 20 struct cls_mall_head { 21 struct tcf_exts exts; 22 struct tcf_result res; 23 u32 handle; 24 u32 flags; 25 unsigned int in_hw_count; 26 struct tc_matchall_pcnt __percpu *pf; 27 struct rcu_work rwork; 28 }; 29 30 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, 31 struct tcf_result *res) 32 { 33 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 34 35 if (tc_skip_sw(head->flags)) 36 return -1; 37 38 *res = head->res; 39 __this_cpu_inc(head->pf->rhit); 40 return tcf_exts_exec(skb, &head->exts, res); 41 } 42 43 static int mall_init(struct tcf_proto *tp) 44 { 45 return 0; 46 } 47 48 static void __mall_destroy(struct cls_mall_head *head) 49 { 50 tcf_exts_destroy(&head->exts); 51 tcf_exts_put_net(&head->exts); 52 free_percpu(head->pf); 53 kfree(head); 54 } 55 56 static void mall_destroy_work(struct work_struct *work) 57 { 58 struct cls_mall_head *head = container_of(to_rcu_work(work), 59 struct cls_mall_head, 60 rwork); 61 rtnl_lock(); 62 __mall_destroy(head); 63 rtnl_unlock(); 64 } 65 66 static void mall_destroy_hw_filter(struct tcf_proto *tp, 67 struct cls_mall_head *head, 68 unsigned long cookie, 69 struct netlink_ext_ack *extack) 70 { 71 struct tc_cls_matchall_offload cls_mall = {}; 72 struct tcf_block *block = tp->chain->block; 73 74 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); 75 cls_mall.command = TC_CLSMATCHALL_DESTROY; 76 cls_mall.cookie = cookie; 77 78 tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false); 79 tcf_block_offload_dec(block, &head->flags); 80 } 81 82 static int mall_replace_hw_filter(struct tcf_proto *tp, 83 struct cls_mall_head *head, 84 unsigned long cookie, 85 struct netlink_ext_ack *extack) 86 { 87 struct tc_cls_matchall_offload cls_mall = {}; 88 struct tcf_block *block = tp->chain->block; 89 bool skip_sw = tc_skip_sw(head->flags); 90 int err; 91 92 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); 93 cls_mall.command = TC_CLSMATCHALL_REPLACE; 94 cls_mall.exts = &head->exts; 95 cls_mall.cookie = cookie; 96 97 err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw); 98 if (err < 0) { 99 mall_destroy_hw_filter(tp, head, cookie, NULL); 100 return err; 101 } else if (err > 0) { 102 head->in_hw_count = err; 103 tcf_block_offload_inc(block, &head->flags); 104 } 105 106 if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW)) 107 return -EINVAL; 108 109 return 0; 110 } 111 112 static void mall_destroy(struct tcf_proto *tp, bool rtnl_held, 113 struct netlink_ext_ack *extack) 114 { 115 struct cls_mall_head *head = rtnl_dereference(tp->root); 116 117 if (!head) 118 return; 119 120 tcf_unbind_filter(tp, &head->res); 121 122 if (!tc_skip_hw(head->flags)) 123 mall_destroy_hw_filter(tp, head, (unsigned long) head, extack); 124 125 if (tcf_exts_get_net(&head->exts)) 126 tcf_queue_work(&head->rwork, mall_destroy_work); 127 else 128 __mall_destroy(head); 129 } 130 131 static void *mall_get(struct tcf_proto *tp, u32 handle) 132 { 133 return NULL; 134 } 135 136 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { 137 [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC }, 138 [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 }, 139 }; 140 141 static int mall_set_parms(struct net *net, struct tcf_proto *tp, 142 struct cls_mall_head *head, 143 unsigned long base, struct nlattr **tb, 144 struct nlattr *est, bool ovr, 145 struct netlink_ext_ack *extack) 146 { 147 int err; 148 149 err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true, 150 extack); 151 if (err < 0) 152 return err; 153 154 if (tb[TCA_MATCHALL_CLASSID]) { 155 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); 156 tcf_bind_filter(tp, &head->res, base); 157 } 158 return 0; 159 } 160 161 static int mall_change(struct net *net, struct sk_buff *in_skb, 162 struct tcf_proto *tp, unsigned long base, 163 u32 handle, struct nlattr **tca, 164 void **arg, bool ovr, bool rtnl_held, 165 struct netlink_ext_ack *extack) 166 { 167 struct cls_mall_head *head = rtnl_dereference(tp->root); 168 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 169 struct cls_mall_head *new; 170 u32 flags = 0; 171 int err; 172 173 if (!tca[TCA_OPTIONS]) 174 return -EINVAL; 175 176 if (head) 177 return -EEXIST; 178 179 err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS], 180 mall_policy, NULL); 181 if (err < 0) 182 return err; 183 184 if (tb[TCA_MATCHALL_FLAGS]) { 185 flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]); 186 if (!tc_flags_valid(flags)) 187 return -EINVAL; 188 } 189 190 new = kzalloc(sizeof(*new), GFP_KERNEL); 191 if (!new) 192 return -ENOBUFS; 193 194 err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0); 195 if (err) 196 goto err_exts_init; 197 198 if (!handle) 199 handle = 1; 200 new->handle = handle; 201 new->flags = flags; 202 new->pf = alloc_percpu(struct tc_matchall_pcnt); 203 if (!new->pf) { 204 err = -ENOMEM; 205 goto err_alloc_percpu; 206 } 207 208 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr, 209 extack); 210 if (err) 211 goto err_set_parms; 212 213 if (!tc_skip_hw(new->flags)) { 214 err = mall_replace_hw_filter(tp, new, (unsigned long)new, 215 extack); 216 if (err) 217 goto err_replace_hw_filter; 218 } 219 220 if (!tc_in_hw(new->flags)) 221 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 222 223 *arg = head; 224 rcu_assign_pointer(tp->root, new); 225 return 0; 226 227 err_replace_hw_filter: 228 err_set_parms: 229 free_percpu(new->pf); 230 err_alloc_percpu: 231 tcf_exts_destroy(&new->exts); 232 err_exts_init: 233 kfree(new); 234 return err; 235 } 236 237 static int mall_delete(struct tcf_proto *tp, void *arg, bool *last, 238 bool rtnl_held, struct netlink_ext_ack *extack) 239 { 240 return -EOPNOTSUPP; 241 } 242 243 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg, 244 bool rtnl_held) 245 { 246 struct cls_mall_head *head = rtnl_dereference(tp->root); 247 248 if (arg->count < arg->skip) 249 goto skip; 250 251 if (!head) 252 return; 253 if (arg->fn(tp, head, arg) < 0) 254 arg->stop = 1; 255 skip: 256 arg->count++; 257 } 258 259 static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 260 void *cb_priv, struct netlink_ext_ack *extack) 261 { 262 struct cls_mall_head *head = rtnl_dereference(tp->root); 263 struct tc_cls_matchall_offload cls_mall = {}; 264 struct tcf_block *block = tp->chain->block; 265 int err; 266 267 if (tc_skip_hw(head->flags)) 268 return 0; 269 270 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); 271 cls_mall.command = add ? 272 TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; 273 cls_mall.exts = &head->exts; 274 cls_mall.cookie = (unsigned long)head; 275 276 err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv); 277 if (err) { 278 if (add && tc_skip_sw(head->flags)) 279 return err; 280 return 0; 281 } 282 283 tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add); 284 285 return 0; 286 } 287 288 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, 289 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 290 { 291 struct tc_matchall_pcnt gpf = {}; 292 struct cls_mall_head *head = fh; 293 struct nlattr *nest; 294 int cpu; 295 296 if (!head) 297 return skb->len; 298 299 t->tcm_handle = head->handle; 300 301 nest = nla_nest_start(skb, TCA_OPTIONS); 302 if (!nest) 303 goto nla_put_failure; 304 305 if (head->res.classid && 306 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid)) 307 goto nla_put_failure; 308 309 if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags)) 310 goto nla_put_failure; 311 312 for_each_possible_cpu(cpu) { 313 struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu); 314 315 gpf.rhit += pf->rhit; 316 } 317 318 if (nla_put_64bit(skb, TCA_MATCHALL_PCNT, 319 sizeof(struct tc_matchall_pcnt), 320 &gpf, TCA_MATCHALL_PAD)) 321 goto nla_put_failure; 322 323 if (tcf_exts_dump(skb, &head->exts)) 324 goto nla_put_failure; 325 326 nla_nest_end(skb, nest); 327 328 if (tcf_exts_dump_stats(skb, &head->exts) < 0) 329 goto nla_put_failure; 330 331 return skb->len; 332 333 nla_put_failure: 334 nla_nest_cancel(skb, nest); 335 return -1; 336 } 337 338 static void mall_bind_class(void *fh, u32 classid, unsigned long cl) 339 { 340 struct cls_mall_head *head = fh; 341 342 if (head && head->res.classid == classid) 343 head->res.class = cl; 344 } 345 346 static struct tcf_proto_ops cls_mall_ops __read_mostly = { 347 .kind = "matchall", 348 .classify = mall_classify, 349 .init = mall_init, 350 .destroy = mall_destroy, 351 .get = mall_get, 352 .change = mall_change, 353 .delete = mall_delete, 354 .walk = mall_walk, 355 .reoffload = mall_reoffload, 356 .dump = mall_dump, 357 .bind_class = mall_bind_class, 358 .owner = THIS_MODULE, 359 }; 360 361 static int __init cls_mall_init(void) 362 { 363 return register_tcf_proto_ops(&cls_mall_ops); 364 } 365 366 static void __exit cls_mall_exit(void) 367 { 368 unregister_tcf_proto_ops(&cls_mall_ops); 369 } 370 371 module_init(cls_mall_init); 372 module_exit(cls_mall_exit); 373 374 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 375 MODULE_DESCRIPTION("Match-all classifier"); 376 MODULE_LICENSE("GPL v2"); 377