1 /* 2 * net/sched/cls_cgroup.c Control Group Classifier 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/skbuff.h> 15 #include <linux/rcupdate.h> 16 #include <net/rtnetlink.h> 17 #include <net/pkt_cls.h> 18 #include <net/sock.h> 19 #include <net/cls_cgroup.h> 20 21 struct cls_cgroup_head { 22 u32 handle; 23 struct tcf_exts exts; 24 struct tcf_ematch_tree ematches; 25 struct tcf_proto *tp; 26 union { 27 struct work_struct work; 28 struct rcu_head rcu; 29 }; 30 }; 31 32 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, 33 struct tcf_result *res) 34 { 35 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); 36 u32 classid = task_get_classid(skb); 37 38 if (!classid) 39 return -1; 40 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 41 return -1; 42 43 res->classid = classid; 44 res->class = 0; 45 46 return tcf_exts_exec(skb, &head->exts, res); 47 } 48 49 static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle) 50 { 51 return NULL; 52 } 53 54 static int cls_cgroup_init(struct tcf_proto *tp) 55 { 56 return 0; 57 } 58 59 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { 60 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 61 }; 62 63 static void __cls_cgroup_destroy(struct cls_cgroup_head *head) 64 { 65 tcf_exts_destroy(&head->exts); 66 tcf_em_tree_destroy(&head->ematches); 67 tcf_exts_put_net(&head->exts); 68 kfree(head); 69 } 70 71 static void cls_cgroup_destroy_work(struct work_struct *work) 72 { 73 struct cls_cgroup_head *head = container_of(work, 74 struct cls_cgroup_head, 75 work); 76 rtnl_lock(); 77 __cls_cgroup_destroy(head); 78 rtnl_unlock(); 79 } 80 81 static void cls_cgroup_destroy_rcu(struct rcu_head *root) 82 { 83 struct cls_cgroup_head *head = container_of(root, 84 struct cls_cgroup_head, 85 rcu); 86 87 INIT_WORK(&head->work, cls_cgroup_destroy_work); 88 tcf_queue_work(&head->work); 89 } 90 91 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 92 struct tcf_proto *tp, unsigned long base, 93 u32 handle, struct nlattr **tca, 94 void **arg, bool ovr) 95 { 96 struct nlattr *tb[TCA_CGROUP_MAX + 1]; 97 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 98 struct cls_cgroup_head *new; 99 int err; 100 101 if (!tca[TCA_OPTIONS]) 102 return -EINVAL; 103 104 if (!head && !handle) 105 return -EINVAL; 106 107 if (head && handle != head->handle) 108 return -ENOENT; 109 110 new = kzalloc(sizeof(*head), GFP_KERNEL); 111 if (!new) 112 return -ENOBUFS; 113 114 err = tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); 115 if (err < 0) 116 goto errout; 117 new->handle = handle; 118 new->tp = tp; 119 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], 120 cgroup_policy, NULL); 121 if (err < 0) 122 goto errout; 123 124 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr); 125 if (err < 0) 126 goto errout; 127 128 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); 129 if (err < 0) 130 goto errout; 131 132 rcu_assign_pointer(tp->root, new); 133 if (head) { 134 tcf_exts_get_net(&head->exts); 135 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 136 } 137 return 0; 138 errout: 139 tcf_exts_destroy(&new->exts); 140 kfree(new); 141 return err; 142 } 143 144 static void cls_cgroup_destroy(struct tcf_proto *tp) 145 { 146 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 147 148 /* Head can still be NULL due to cls_cgroup_init(). */ 149 if (head) { 150 if (tcf_exts_get_net(&head->exts)) 151 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 152 else 153 __cls_cgroup_destroy(head); 154 } 155 } 156 157 static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last) 158 { 159 return -EOPNOTSUPP; 160 } 161 162 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) 163 { 164 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 165 166 if (arg->count < arg->skip) 167 goto skip; 168 169 if (arg->fn(tp, head, arg) < 0) { 170 arg->stop = 1; 171 return; 172 } 173 skip: 174 arg->count++; 175 } 176 177 static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, void *fh, 178 struct sk_buff *skb, struct tcmsg *t) 179 { 180 struct cls_cgroup_head *head = rtnl_dereference(tp->root); 181 struct nlattr *nest; 182 183 t->tcm_handle = head->handle; 184 185 nest = nla_nest_start(skb, TCA_OPTIONS); 186 if (nest == NULL) 187 goto nla_put_failure; 188 189 if (tcf_exts_dump(skb, &head->exts) < 0 || 190 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) 191 goto nla_put_failure; 192 193 nla_nest_end(skb, nest); 194 195 if (tcf_exts_dump_stats(skb, &head->exts) < 0) 196 goto nla_put_failure; 197 198 return skb->len; 199 200 nla_put_failure: 201 nla_nest_cancel(skb, nest); 202 return -1; 203 } 204 205 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { 206 .kind = "cgroup", 207 .init = cls_cgroup_init, 208 .change = cls_cgroup_change, 209 .classify = cls_cgroup_classify, 210 .destroy = cls_cgroup_destroy, 211 .get = cls_cgroup_get, 212 .delete = cls_cgroup_delete, 213 .walk = cls_cgroup_walk, 214 .dump = cls_cgroup_dump, 215 .owner = THIS_MODULE, 216 }; 217 218 static int __init init_cgroup_cls(void) 219 { 220 return register_tcf_proto_ops(&cls_cgroup_ops); 221 } 222 223 static void __exit exit_cgroup_cls(void) 224 { 225 unregister_tcf_proto_ops(&cls_cgroup_ops); 226 } 227 228 module_init(init_cgroup_cls); 229 module_exit(exit_cgroup_cls); 230 MODULE_LICENSE("GPL"); 231