1 /* 2 * net/sched/cls_api.c Packet classifier API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * 13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/skbuff.h> 23 #include <linux/init.h> 24 #include <linux/kmod.h> 25 #include <linux/netlink.h> 26 #include <net/netlink.h> 27 #include <net/pkt_sched.h> 28 #include <net/pkt_cls.h> 29 30 #if 0 /* control */ 31 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) 32 #else 33 #define DPRINTK(format,args...) 34 #endif 35 36 /* The list of all installed classifier types */ 37 38 static struct tcf_proto_ops *tcf_proto_base; 39 40 /* Protects list of registered TC modules. It is pure SMP lock. */ 41 static DEFINE_RWLOCK(cls_mod_lock); 42 43 /* Find classifier type by string name */ 44 45 static struct tcf_proto_ops * tcf_proto_lookup_ops(struct rtattr *kind) 46 { 47 struct tcf_proto_ops *t = NULL; 48 49 if (kind) { 50 read_lock(&cls_mod_lock); 51 for (t = tcf_proto_base; t; t = t->next) { 52 if (rtattr_strcmp(kind, t->kind) == 0) { 53 if (!try_module_get(t->owner)) 54 t = NULL; 55 break; 56 } 57 } 58 read_unlock(&cls_mod_lock); 59 } 60 return t; 61 } 62 63 /* Register(unregister) new classifier type */ 64 65 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 66 { 67 struct tcf_proto_ops *t, **tp; 68 int rc = -EEXIST; 69 70 write_lock(&cls_mod_lock); 71 for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) 72 if (!strcmp(ops->kind, t->kind)) 73 goto out; 74 75 ops->next = NULL; 76 *tp = ops; 77 rc = 0; 78 out: 79 write_unlock(&cls_mod_lock); 80 return rc; 81 } 82 83 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 84 { 85 struct tcf_proto_ops *t, **tp; 86 int rc = -ENOENT; 87 88 write_lock(&cls_mod_lock); 89 for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) 90 if (t == ops) 91 break; 92 93 if (!t) 94 goto out; 95 *tp = t->next; 96 rc = 0; 97 out: 98 write_unlock(&cls_mod_lock); 99 return rc; 100 } 101 102 static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, 103 struct tcf_proto *tp, unsigned long fh, int event); 104 105 106 /* Select new prio value from the range, managed by kernel. */ 107 108 static __inline__ u32 tcf_auto_prio(struct tcf_proto *tp) 109 { 110 u32 first = TC_H_MAKE(0xC0000000U,0U); 111 112 if (tp) 113 first = tp->prio-1; 114 115 return first; 116 } 117 118 /* Add/change/delete/get a filter node */ 119 120 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 121 { 122 struct rtattr **tca; 123 struct tcmsg *t; 124 u32 protocol; 125 u32 prio; 126 u32 nprio; 127 u32 parent; 128 struct net_device *dev; 129 struct Qdisc *q; 130 struct tcf_proto **back, **chain; 131 struct tcf_proto *tp; 132 struct tcf_proto_ops *tp_ops; 133 struct Qdisc_class_ops *cops; 134 unsigned long cl; 135 unsigned long fh; 136 int err; 137 138 replay: 139 tca = arg; 140 t = NLMSG_DATA(n); 141 protocol = TC_H_MIN(t->tcm_info); 142 prio = TC_H_MAJ(t->tcm_info); 143 nprio = prio; 144 parent = t->tcm_parent; 145 cl = 0; 146 147 if (prio == 0) { 148 /* If no priority is given, user wants we allocated it. */ 149 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) 150 return -ENOENT; 151 prio = TC_H_MAKE(0x80000000U,0U); 152 } 153 154 /* Find head of filter chain. */ 155 156 /* Find link */ 157 if ((dev = __dev_get_by_index(&init_net, t->tcm_ifindex)) == NULL) 158 return -ENODEV; 159 160 /* Find qdisc */ 161 if (!parent) { 162 q = dev->qdisc_sleeping; 163 parent = q->handle; 164 } else if ((q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent))) == NULL) 165 return -EINVAL; 166 167 /* Is it classful? */ 168 if ((cops = q->ops->cl_ops) == NULL) 169 return -EINVAL; 170 171 /* Do we search for filter, attached to class? */ 172 if (TC_H_MIN(parent)) { 173 cl = cops->get(q, parent); 174 if (cl == 0) 175 return -ENOENT; 176 } 177 178 /* And the last stroke */ 179 chain = cops->tcf_chain(q, cl); 180 err = -EINVAL; 181 if (chain == NULL) 182 goto errout; 183 184 /* Check the chain for existence of proto-tcf with this priority */ 185 for (back = chain; (tp=*back) != NULL; back = &tp->next) { 186 if (tp->prio >= prio) { 187 if (tp->prio == prio) { 188 if (!nprio || (tp->protocol != protocol && protocol)) 189 goto errout; 190 } else 191 tp = NULL; 192 break; 193 } 194 } 195 196 if (tp == NULL) { 197 /* Proto-tcf does not exist, create new one */ 198 199 if (tca[TCA_KIND-1] == NULL || !protocol) 200 goto errout; 201 202 err = -ENOENT; 203 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) 204 goto errout; 205 206 207 /* Create new proto tcf */ 208 209 err = -ENOBUFS; 210 if ((tp = kzalloc(sizeof(*tp), GFP_KERNEL)) == NULL) 211 goto errout; 212 err = -EINVAL; 213 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]); 214 if (tp_ops == NULL) { 215 #ifdef CONFIG_KMOD 216 struct rtattr *kind = tca[TCA_KIND-1]; 217 char name[IFNAMSIZ]; 218 219 if (kind != NULL && 220 rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) { 221 rtnl_unlock(); 222 request_module("cls_%s", name); 223 rtnl_lock(); 224 tp_ops = tcf_proto_lookup_ops(kind); 225 /* We dropped the RTNL semaphore in order to 226 * perform the module load. So, even if we 227 * succeeded in loading the module we have to 228 * replay the request. We indicate this using 229 * -EAGAIN. 230 */ 231 if (tp_ops != NULL) { 232 module_put(tp_ops->owner); 233 err = -EAGAIN; 234 } 235 } 236 #endif 237 kfree(tp); 238 goto errout; 239 } 240 tp->ops = tp_ops; 241 tp->protocol = protocol; 242 tp->prio = nprio ? : tcf_auto_prio(*back); 243 tp->q = q; 244 tp->classify = tp_ops->classify; 245 tp->classid = parent; 246 if ((err = tp_ops->init(tp)) != 0) { 247 module_put(tp_ops->owner); 248 kfree(tp); 249 goto errout; 250 } 251 252 qdisc_lock_tree(dev); 253 tp->next = *back; 254 *back = tp; 255 qdisc_unlock_tree(dev); 256 257 } else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind)) 258 goto errout; 259 260 fh = tp->ops->get(tp, t->tcm_handle); 261 262 if (fh == 0) { 263 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 264 qdisc_lock_tree(dev); 265 *back = tp->next; 266 qdisc_unlock_tree(dev); 267 268 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 269 tcf_destroy(tp); 270 err = 0; 271 goto errout; 272 } 273 274 err = -ENOENT; 275 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) 276 goto errout; 277 } else { 278 switch (n->nlmsg_type) { 279 case RTM_NEWTFILTER: 280 err = -EEXIST; 281 if (n->nlmsg_flags&NLM_F_EXCL) 282 goto errout; 283 break; 284 case RTM_DELTFILTER: 285 err = tp->ops->delete(tp, fh); 286 if (err == 0) 287 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 288 goto errout; 289 case RTM_GETTFILTER: 290 err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 291 goto errout; 292 default: 293 err = -EINVAL; 294 goto errout; 295 } 296 } 297 298 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); 299 if (err == 0) 300 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 301 302 errout: 303 if (cl) 304 cops->put(q, cl); 305 if (err == -EAGAIN) 306 /* Replay the request. */ 307 goto replay; 308 return err; 309 } 310 311 static int 312 tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh, 313 u32 pid, u32 seq, u16 flags, int event) 314 { 315 struct tcmsg *tcm; 316 struct nlmsghdr *nlh; 317 unsigned char *b = skb_tail_pointer(skb); 318 319 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 320 tcm = NLMSG_DATA(nlh); 321 tcm->tcm_family = AF_UNSPEC; 322 tcm->tcm__pad1 = 0; 323 tcm->tcm__pad1 = 0; 324 tcm->tcm_ifindex = tp->q->dev->ifindex; 325 tcm->tcm_parent = tp->classid; 326 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 327 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, tp->ops->kind); 328 tcm->tcm_handle = fh; 329 if (RTM_DELTFILTER != event) { 330 tcm->tcm_handle = 0; 331 if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0) 332 goto rtattr_failure; 333 } 334 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 335 return skb->len; 336 337 nlmsg_failure: 338 rtattr_failure: 339 nlmsg_trim(skb, b); 340 return -1; 341 } 342 343 static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, 344 struct tcf_proto *tp, unsigned long fh, int event) 345 { 346 struct sk_buff *skb; 347 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 348 349 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 350 if (!skb) 351 return -ENOBUFS; 352 353 if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) { 354 kfree_skb(skb); 355 return -EINVAL; 356 } 357 358 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 359 } 360 361 struct tcf_dump_args 362 { 363 struct tcf_walker w; 364 struct sk_buff *skb; 365 struct netlink_callback *cb; 366 }; 367 368 static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, struct tcf_walker *arg) 369 { 370 struct tcf_dump_args *a = (void*)arg; 371 372 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid, 373 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER); 374 } 375 376 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 377 { 378 int t; 379 int s_t; 380 struct net_device *dev; 381 struct Qdisc *q; 382 struct tcf_proto *tp, **chain; 383 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); 384 unsigned long cl = 0; 385 struct Qdisc_class_ops *cops; 386 struct tcf_dump_args arg; 387 388 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 389 return skb->len; 390 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 391 return skb->len; 392 393 if (!tcm->tcm_parent) 394 q = dev->qdisc_sleeping; 395 else 396 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 397 if (!q) 398 goto out; 399 if ((cops = q->ops->cl_ops) == NULL) 400 goto errout; 401 if (TC_H_MIN(tcm->tcm_parent)) { 402 cl = cops->get(q, tcm->tcm_parent); 403 if (cl == 0) 404 goto errout; 405 } 406 chain = cops->tcf_chain(q, cl); 407 if (chain == NULL) 408 goto errout; 409 410 s_t = cb->args[0]; 411 412 for (tp=*chain, t=0; tp; tp = tp->next, t++) { 413 if (t < s_t) continue; 414 if (TC_H_MAJ(tcm->tcm_info) && 415 TC_H_MAJ(tcm->tcm_info) != tp->prio) 416 continue; 417 if (TC_H_MIN(tcm->tcm_info) && 418 TC_H_MIN(tcm->tcm_info) != tp->protocol) 419 continue; 420 if (t > s_t) 421 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0])); 422 if (cb->args[1] == 0) { 423 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid, 424 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) { 425 break; 426 } 427 cb->args[1] = 1; 428 } 429 if (tp->ops->walk == NULL) 430 continue; 431 arg.w.fn = tcf_node_dump; 432 arg.skb = skb; 433 arg.cb = cb; 434 arg.w.stop = 0; 435 arg.w.skip = cb->args[1]-1; 436 arg.w.count = 0; 437 tp->ops->walk(tp, &arg.w); 438 cb->args[1] = arg.w.count+1; 439 if (arg.w.stop) 440 break; 441 } 442 443 cb->args[0] = t; 444 445 errout: 446 if (cl) 447 cops->put(q, cl); 448 out: 449 dev_put(dev); 450 return skb->len; 451 } 452 453 void 454 tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts) 455 { 456 #ifdef CONFIG_NET_CLS_ACT 457 if (exts->action) { 458 tcf_action_destroy(exts->action, TCA_ACT_UNBIND); 459 exts->action = NULL; 460 } 461 #endif 462 } 463 464 465 int 466 tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb, 467 struct rtattr *rate_tlv, struct tcf_exts *exts, 468 struct tcf_ext_map *map) 469 { 470 memset(exts, 0, sizeof(*exts)); 471 472 #ifdef CONFIG_NET_CLS_ACT 473 { 474 int err; 475 struct tc_action *act; 476 477 if (map->police && tb[map->police-1]) { 478 act = tcf_action_init_1(tb[map->police-1], rate_tlv, "police", 479 TCA_ACT_NOREPLACE, TCA_ACT_BIND, &err); 480 if (act == NULL) 481 return err; 482 483 act->type = TCA_OLD_COMPAT; 484 exts->action = act; 485 } else if (map->action && tb[map->action-1]) { 486 act = tcf_action_init(tb[map->action-1], rate_tlv, NULL, 487 TCA_ACT_NOREPLACE, TCA_ACT_BIND, &err); 488 if (act == NULL) 489 return err; 490 491 exts->action = act; 492 } 493 } 494 #else 495 if ((map->action && tb[map->action-1]) || 496 (map->police && tb[map->police-1])) 497 return -EOPNOTSUPP; 498 #endif 499 500 return 0; 501 } 502 503 void 504 tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, 505 struct tcf_exts *src) 506 { 507 #ifdef CONFIG_NET_CLS_ACT 508 if (src->action) { 509 struct tc_action *act; 510 tcf_tree_lock(tp); 511 act = xchg(&dst->action, src->action); 512 tcf_tree_unlock(tp); 513 if (act) 514 tcf_action_destroy(act, TCA_ACT_UNBIND); 515 } 516 #endif 517 } 518 519 int 520 tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, 521 struct tcf_ext_map *map) 522 { 523 #ifdef CONFIG_NET_CLS_ACT 524 if (map->action && exts->action) { 525 /* 526 * again for backward compatible mode - we want 527 * to work with both old and new modes of entering 528 * tc data even if iproute2 was newer - jhs 529 */ 530 struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb); 531 532 if (exts->action->type != TCA_OLD_COMPAT) { 533 RTA_PUT(skb, map->action, 0, NULL); 534 if (tcf_action_dump(skb, exts->action, 0, 0) < 0) 535 goto rtattr_failure; 536 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; 537 } else if (map->police) { 538 RTA_PUT(skb, map->police, 0, NULL); 539 if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0) 540 goto rtattr_failure; 541 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; 542 } 543 } 544 #endif 545 return 0; 546 rtattr_failure: __attribute__ ((unused)) 547 return -1; 548 } 549 550 int 551 tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, 552 struct tcf_ext_map *map) 553 { 554 #ifdef CONFIG_NET_CLS_ACT 555 if (exts->action) 556 if (tcf_action_copy_stats(skb, exts->action, 1) < 0) 557 goto rtattr_failure; 558 #endif 559 return 0; 560 rtattr_failure: __attribute__ ((unused)) 561 return -1; 562 } 563 564 static int __init tc_filter_init(void) 565 { 566 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL); 567 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL); 568 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 569 tc_dump_tfilter); 570 571 return 0; 572 } 573 574 subsys_initcall(tc_filter_init); 575 576 EXPORT_SYMBOL(register_tcf_proto_ops); 577 EXPORT_SYMBOL(unregister_tcf_proto_ops); 578 EXPORT_SYMBOL(tcf_exts_validate); 579 EXPORT_SYMBOL(tcf_exts_destroy); 580 EXPORT_SYMBOL(tcf_exts_change); 581 EXPORT_SYMBOL(tcf_exts_dump); 582 EXPORT_SYMBOL(tcf_exts_dump_stats); 583