1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one 9 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel). 10 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension 11 */ 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/skbuff.h> 20 #include <net/netlink.h> 21 #include <net/act_api.h> 22 #include <net/pkt_cls.h> 23 #include <net/sch_generic.h> 24 25 #define HTSIZE 256 26 27 struct fw_head { 28 u32 mask; 29 struct fw_filter __rcu *ht[HTSIZE]; 30 struct rcu_head rcu; 31 }; 32 33 struct fw_filter { 34 struct fw_filter __rcu *next; 35 u32 id; 36 struct tcf_result res; 37 int ifindex; 38 struct tcf_exts exts; 39 struct tcf_proto *tp; 40 struct rcu_work rwork; 41 }; 42 43 static u32 fw_hash(u32 handle) 44 { 45 handle ^= (handle >> 16); 46 handle ^= (handle >> 8); 47 return handle % HTSIZE; 48 } 49 50 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, 51 struct tcf_result *res) 52 { 53 struct fw_head *head = rcu_dereference_bh(tp->root); 54 struct fw_filter *f; 55 int r; 56 u32 id = skb->mark; 57 58 if (head != NULL) { 59 id &= head->mask; 60 61 for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; 62 f = rcu_dereference_bh(f->next)) { 63 if (f->id == id) { 64 *res = f->res; 65 if (!tcf_match_indev(skb, f->ifindex)) 66 continue; 67 r = tcf_exts_exec(skb, &f->exts, res); 68 if (r < 0) 69 continue; 70 71 return r; 72 } 73 } 74 } else { 75 struct Qdisc *q = tcf_block_q(tp->chain->block); 76 77 /* Old method: classify the packet using its skb mark. */ 78 if (id && (TC_H_MAJ(id) == 0 || 79 !(TC_H_MAJ(id ^ q->handle)))) { 80 res->classid = id; 81 res->class = 0; 82 return 0; 83 } 84 } 85 86 return -1; 87 } 88 89 static void *fw_get(struct tcf_proto *tp, u32 handle) 90 { 91 struct fw_head *head = rtnl_dereference(tp->root); 92 struct fw_filter *f; 93 94 if (head == NULL) 95 return NULL; 96 97 f = rtnl_dereference(head->ht[fw_hash(handle)]); 98 for (; f; f = rtnl_dereference(f->next)) { 99 if (f->id == handle) 100 return f; 101 } 102 return NULL; 103 } 104 105 static int fw_init(struct tcf_proto *tp) 106 { 107 /* We don't allocate fw_head here, because in the old method 108 * we don't need it at all. 109 */ 110 return 0; 111 } 112 113 static void __fw_delete_filter(struct fw_filter *f) 114 { 115 tcf_exts_destroy(&f->exts); 116 tcf_exts_put_net(&f->exts); 117 kfree(f); 118 } 119 120 static void fw_delete_filter_work(struct work_struct *work) 121 { 122 struct fw_filter *f = container_of(to_rcu_work(work), 123 struct fw_filter, 124 rwork); 125 rtnl_lock(); 126 __fw_delete_filter(f); 127 rtnl_unlock(); 128 } 129 130 static void fw_destroy(struct tcf_proto *tp, bool rtnl_held, 131 struct netlink_ext_ack *extack) 132 { 133 struct fw_head *head = rtnl_dereference(tp->root); 134 struct fw_filter *f; 135 int h; 136 137 if (head == NULL) 138 return; 139 140 for (h = 0; h < HTSIZE; h++) { 141 while ((f = rtnl_dereference(head->ht[h])) != NULL) { 142 RCU_INIT_POINTER(head->ht[h], 143 rtnl_dereference(f->next)); 144 tcf_unbind_filter(tp, &f->res); 145 if (tcf_exts_get_net(&f->exts)) 146 tcf_queue_work(&f->rwork, fw_delete_filter_work); 147 else 148 __fw_delete_filter(f); 149 } 150 } 151 kfree_rcu(head, rcu); 152 } 153 154 static int fw_delete(struct tcf_proto *tp, void *arg, bool *last, 155 bool rtnl_held, struct netlink_ext_ack *extack) 156 { 157 struct fw_head *head = rtnl_dereference(tp->root); 158 struct fw_filter *f = arg; 159 struct fw_filter __rcu **fp; 160 struct fw_filter *pfp; 161 int ret = -EINVAL; 162 int h; 163 164 if (head == NULL || f == NULL) 165 goto out; 166 167 fp = &head->ht[fw_hash(f->id)]; 168 169 for (pfp = rtnl_dereference(*fp); pfp; 170 fp = &pfp->next, pfp = rtnl_dereference(*fp)) { 171 if (pfp == f) { 172 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); 173 tcf_unbind_filter(tp, &f->res); 174 tcf_exts_get_net(&f->exts); 175 tcf_queue_work(&f->rwork, fw_delete_filter_work); 176 ret = 0; 177 break; 178 } 179 } 180 181 *last = true; 182 for (h = 0; h < HTSIZE; h++) { 183 if (rcu_access_pointer(head->ht[h])) { 184 *last = false; 185 break; 186 } 187 } 188 189 out: 190 return ret; 191 } 192 193 static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = { 194 [TCA_FW_CLASSID] = { .type = NLA_U32 }, 195 [TCA_FW_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, 196 [TCA_FW_MASK] = { .type = NLA_U32 }, 197 }; 198 199 static int fw_set_parms(struct net *net, struct tcf_proto *tp, 200 struct fw_filter *f, struct nlattr **tb, 201 struct nlattr **tca, unsigned long base, bool ovr, 202 struct netlink_ext_ack *extack) 203 { 204 struct fw_head *head = rtnl_dereference(tp->root); 205 u32 mask; 206 int err; 207 208 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr, 209 true, extack); 210 if (err < 0) 211 return err; 212 213 if (tb[TCA_FW_CLASSID]) { 214 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); 215 tcf_bind_filter(tp, &f->res, base); 216 } 217 218 if (tb[TCA_FW_INDEV]) { 219 int ret; 220 ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack); 221 if (ret < 0) 222 return ret; 223 f->ifindex = ret; 224 } 225 226 err = -EINVAL; 227 if (tb[TCA_FW_MASK]) { 228 mask = nla_get_u32(tb[TCA_FW_MASK]); 229 if (mask != head->mask) 230 return err; 231 } else if (head->mask != 0xFFFFFFFF) 232 return err; 233 234 return 0; 235 } 236 237 static int fw_change(struct net *net, struct sk_buff *in_skb, 238 struct tcf_proto *tp, unsigned long base, 239 u32 handle, struct nlattr **tca, void **arg, 240 bool ovr, bool rtnl_held, 241 struct netlink_ext_ack *extack) 242 { 243 struct fw_head *head = rtnl_dereference(tp->root); 244 struct fw_filter *f = *arg; 245 struct nlattr *opt = tca[TCA_OPTIONS]; 246 struct nlattr *tb[TCA_FW_MAX + 1]; 247 int err; 248 249 if (!opt) 250 return handle ? -EINVAL : 0; /* Succeed if it is old method. */ 251 252 err = nla_parse_nested_deprecated(tb, TCA_FW_MAX, opt, fw_policy, 253 NULL); 254 if (err < 0) 255 return err; 256 257 if (f) { 258 struct fw_filter *pfp, *fnew; 259 struct fw_filter __rcu **fp; 260 261 if (f->id != handle && handle) 262 return -EINVAL; 263 264 fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 265 if (!fnew) 266 return -ENOBUFS; 267 268 fnew->id = f->id; 269 fnew->res = f->res; 270 fnew->ifindex = f->ifindex; 271 fnew->tp = f->tp; 272 273 err = tcf_exts_init(&fnew->exts, net, TCA_FW_ACT, 274 TCA_FW_POLICE); 275 if (err < 0) { 276 kfree(fnew); 277 return err; 278 } 279 280 err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr, extack); 281 if (err < 0) { 282 tcf_exts_destroy(&fnew->exts); 283 kfree(fnew); 284 return err; 285 } 286 287 fp = &head->ht[fw_hash(fnew->id)]; 288 for (pfp = rtnl_dereference(*fp); pfp; 289 fp = &pfp->next, pfp = rtnl_dereference(*fp)) 290 if (pfp == f) 291 break; 292 293 RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); 294 rcu_assign_pointer(*fp, fnew); 295 tcf_unbind_filter(tp, &f->res); 296 tcf_exts_get_net(&f->exts); 297 tcf_queue_work(&f->rwork, fw_delete_filter_work); 298 299 *arg = fnew; 300 return err; 301 } 302 303 if (!handle) 304 return -EINVAL; 305 306 if (!head) { 307 u32 mask = 0xFFFFFFFF; 308 if (tb[TCA_FW_MASK]) 309 mask = nla_get_u32(tb[TCA_FW_MASK]); 310 311 head = kzalloc(sizeof(*head), GFP_KERNEL); 312 if (!head) 313 return -ENOBUFS; 314 head->mask = mask; 315 316 rcu_assign_pointer(tp->root, head); 317 } 318 319 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 320 if (f == NULL) 321 return -ENOBUFS; 322 323 err = tcf_exts_init(&f->exts, net, TCA_FW_ACT, TCA_FW_POLICE); 324 if (err < 0) 325 goto errout; 326 f->id = handle; 327 f->tp = tp; 328 329 err = fw_set_parms(net, tp, f, tb, tca, base, ovr, extack); 330 if (err < 0) 331 goto errout; 332 333 RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); 334 rcu_assign_pointer(head->ht[fw_hash(handle)], f); 335 336 *arg = f; 337 return 0; 338 339 errout: 340 tcf_exts_destroy(&f->exts); 341 kfree(f); 342 return err; 343 } 344 345 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg, 346 bool rtnl_held) 347 { 348 struct fw_head *head = rtnl_dereference(tp->root); 349 int h; 350 351 if (head == NULL) 352 arg->stop = 1; 353 354 if (arg->stop) 355 return; 356 357 for (h = 0; h < HTSIZE; h++) { 358 struct fw_filter *f; 359 360 for (f = rtnl_dereference(head->ht[h]); f; 361 f = rtnl_dereference(f->next)) { 362 if (arg->count < arg->skip) { 363 arg->count++; 364 continue; 365 } 366 if (arg->fn(tp, f, arg) < 0) { 367 arg->stop = 1; 368 return; 369 } 370 arg->count++; 371 } 372 } 373 } 374 375 static int fw_dump(struct net *net, struct tcf_proto *tp, void *fh, 376 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 377 { 378 struct fw_head *head = rtnl_dereference(tp->root); 379 struct fw_filter *f = fh; 380 struct nlattr *nest; 381 382 if (f == NULL) 383 return skb->len; 384 385 t->tcm_handle = f->id; 386 387 if (!f->res.classid && !tcf_exts_has_actions(&f->exts)) 388 return skb->len; 389 390 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 391 if (nest == NULL) 392 goto nla_put_failure; 393 394 if (f->res.classid && 395 nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid)) 396 goto nla_put_failure; 397 if (f->ifindex) { 398 struct net_device *dev; 399 dev = __dev_get_by_index(net, f->ifindex); 400 if (dev && nla_put_string(skb, TCA_FW_INDEV, dev->name)) 401 goto nla_put_failure; 402 } 403 if (head->mask != 0xFFFFFFFF && 404 nla_put_u32(skb, TCA_FW_MASK, head->mask)) 405 goto nla_put_failure; 406 407 if (tcf_exts_dump(skb, &f->exts) < 0) 408 goto nla_put_failure; 409 410 nla_nest_end(skb, nest); 411 412 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 413 goto nla_put_failure; 414 415 return skb->len; 416 417 nla_put_failure: 418 nla_nest_cancel(skb, nest); 419 return -1; 420 } 421 422 static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 423 unsigned long base) 424 { 425 struct fw_filter *f = fh; 426 427 if (f && f->res.classid == classid) { 428 if (cl) 429 __tcf_bind_filter(q, &f->res, base); 430 else 431 __tcf_unbind_filter(q, &f->res); 432 } 433 } 434 435 static struct tcf_proto_ops cls_fw_ops __read_mostly = { 436 .kind = "fw", 437 .classify = fw_classify, 438 .init = fw_init, 439 .destroy = fw_destroy, 440 .get = fw_get, 441 .change = fw_change, 442 .delete = fw_delete, 443 .walk = fw_walk, 444 .dump = fw_dump, 445 .bind_class = fw_bind_class, 446 .owner = THIS_MODULE, 447 }; 448 449 static int __init init_fw(void) 450 { 451 return register_tcf_proto_ops(&cls_fw_ops); 452 } 453 454 static void __exit exit_fw(void) 455 { 456 unregister_tcf_proto_ops(&cls_fw_ops); 457 } 458 459 module_init(init_fw) 460 module_exit(exit_fw) 461 MODULE_LICENSE("GPL"); 462