1 /* 2 * net/sched/cls_cgroup.c Control Group Classifier 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/skbuff.h> 18 #include <linux/cgroup.h> 19 #include <linux/rcupdate.h> 20 #include <linux/fdtable.h> 21 #include <net/rtnetlink.h> 22 #include <net/pkt_cls.h> 23 #include <net/sock.h> 24 #include <net/cls_cgroup.h> 25 26 static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css) 27 { 28 return css ? container_of(css, struct cgroup_cls_state, css) : NULL; 29 } 30 31 static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) 32 { 33 return css_cls_state(task_css(p, net_cls_subsys_id)); 34 } 35 36 static struct cgroup_subsys_state * 37 cgrp_css_alloc(struct cgroup_subsys_state *parent_css) 38 { 39 struct cgroup_cls_state *cs; 40 41 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 42 if (!cs) 43 return ERR_PTR(-ENOMEM); 44 return &cs->css; 45 } 46 47 static int cgrp_css_online(struct cgroup_subsys_state *css) 48 { 49 struct cgroup_cls_state *cs = css_cls_state(css); 50 struct cgroup_cls_state *parent = css_cls_state(css_parent(css)); 51 52 if (parent) 53 cs->classid = parent->classid; 54 return 0; 55 } 56 57 static void cgrp_css_free(struct cgroup_subsys_state *css) 58 { 59 kfree(css_cls_state(css)); 60 } 61 62 static int update_classid(const void *v, struct file *file, unsigned n) 63 { 64 int err; 65 struct socket *sock = sock_from_file(file, &err); 66 if (sock) 67 sock->sk->sk_classid = (u32)(unsigned long)v; 68 return 0; 69 } 70 71 static void cgrp_attach(struct cgroup_subsys_state *css, 72 struct cgroup_taskset *tset) 73 { 74 struct task_struct *p; 75 void *v; 76 77 cgroup_taskset_for_each(p, css, tset) { 78 task_lock(p); 79 v = (void *)(unsigned long)task_cls_classid(p); 80 iterate_fd(p->files, 0, update_classid, v); 81 task_unlock(p); 82 } 83 } 84 85 static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) 86 { 87 return css_cls_state(css)->classid; 88 } 89 90 static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, 91 u64 value) 92 { 93 css_cls_state(css)->classid = (u32) value; 94 return 0; 95 } 96 97 static struct cftype ss_files[] = { 98 { 99 .name = "classid", 100 .read_u64 = read_classid, 101 .write_u64 = write_classid, 102 }, 103 { } /* terminate */ 104 }; 105 106 struct cgroup_subsys net_cls_subsys = { 107 .name = "net_cls", 108 .css_alloc = cgrp_css_alloc, 109 .css_online = cgrp_css_online, 110 .css_free = cgrp_css_free, 111 .attach = cgrp_attach, 112 .subsys_id = net_cls_subsys_id, 113 .base_cftypes = ss_files, 114 .module = THIS_MODULE, 115 }; 116 117 struct cls_cgroup_head { 118 u32 handle; 119 struct tcf_exts exts; 120 struct tcf_ematch_tree ematches; 121 }; 122 123 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, 124 struct tcf_result *res) 125 { 126 struct cls_cgroup_head *head = tp->root; 127 u32 classid; 128 129 rcu_read_lock(); 130 classid = task_cls_state(current)->classid; 131 rcu_read_unlock(); 132 133 /* 134 * Due to the nature of the classifier it is required to ignore all 135 * packets originating from softirq context as accessing `current' 136 * would lead to false results. 137 * 138 * This test assumes that all callers of dev_queue_xmit() explicitely 139 * disable bh. Knowing this, it is possible to detect softirq based 140 * calls by looking at the number of nested bh disable calls because 141 * softirqs always disables bh. 142 */ 143 if (in_serving_softirq()) { 144 /* If there is an sk_classid we'll use that. */ 145 if (!skb->sk) 146 return -1; 147 classid = skb->sk->sk_classid; 148 } 149 150 if (!classid) 151 return -1; 152 153 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 154 return -1; 155 156 res->classid = classid; 157 res->class = 0; 158 return tcf_exts_exec(skb, &head->exts, res); 159 } 160 161 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) 162 { 163 return 0UL; 164 } 165 166 static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f) 167 { 168 } 169 170 static int cls_cgroup_init(struct tcf_proto *tp) 171 { 172 return 0; 173 } 174 175 static const struct tcf_ext_map cgroup_ext_map = { 176 .action = TCA_CGROUP_ACT, 177 .police = TCA_CGROUP_POLICE, 178 }; 179 180 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { 181 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 182 }; 183 184 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 185 struct tcf_proto *tp, unsigned long base, 186 u32 handle, struct nlattr **tca, 187 unsigned long *arg) 188 { 189 struct nlattr *tb[TCA_CGROUP_MAX + 1]; 190 struct cls_cgroup_head *head = tp->root; 191 struct tcf_ematch_tree t; 192 struct tcf_exts e; 193 int err; 194 195 if (!tca[TCA_OPTIONS]) 196 return -EINVAL; 197 198 if (head == NULL) { 199 if (!handle) 200 return -EINVAL; 201 202 head = kzalloc(sizeof(*head), GFP_KERNEL); 203 if (head == NULL) 204 return -ENOBUFS; 205 206 head->handle = handle; 207 208 tcf_tree_lock(tp); 209 tp->root = head; 210 tcf_tree_unlock(tp); 211 } 212 213 if (handle != head->handle) 214 return -ENOENT; 215 216 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], 217 cgroup_policy); 218 if (err < 0) 219 return err; 220 221 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, 222 &cgroup_ext_map); 223 if (err < 0) 224 return err; 225 226 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); 227 if (err < 0) 228 return err; 229 230 tcf_exts_change(tp, &head->exts, &e); 231 tcf_em_tree_change(tp, &head->ematches, &t); 232 233 return 0; 234 } 235 236 static void cls_cgroup_destroy(struct tcf_proto *tp) 237 { 238 struct cls_cgroup_head *head = tp->root; 239 240 if (head) { 241 tcf_exts_destroy(tp, &head->exts); 242 tcf_em_tree_destroy(tp, &head->ematches); 243 kfree(head); 244 } 245 } 246 247 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) 248 { 249 return -EOPNOTSUPP; 250 } 251 252 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) 253 { 254 struct cls_cgroup_head *head = tp->root; 255 256 if (arg->count < arg->skip) 257 goto skip; 258 259 if (arg->fn(tp, (unsigned long) head, arg) < 0) { 260 arg->stop = 1; 261 return; 262 } 263 skip: 264 arg->count++; 265 } 266 267 static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh, 268 struct sk_buff *skb, struct tcmsg *t) 269 { 270 struct cls_cgroup_head *head = tp->root; 271 unsigned char *b = skb_tail_pointer(skb); 272 struct nlattr *nest; 273 274 t->tcm_handle = head->handle; 275 276 nest = nla_nest_start(skb, TCA_OPTIONS); 277 if (nest == NULL) 278 goto nla_put_failure; 279 280 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 || 281 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) 282 goto nla_put_failure; 283 284 nla_nest_end(skb, nest); 285 286 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0) 287 goto nla_put_failure; 288 289 return skb->len; 290 291 nla_put_failure: 292 nlmsg_trim(skb, b); 293 return -1; 294 } 295 296 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { 297 .kind = "cgroup", 298 .init = cls_cgroup_init, 299 .change = cls_cgroup_change, 300 .classify = cls_cgroup_classify, 301 .destroy = cls_cgroup_destroy, 302 .get = cls_cgroup_get, 303 .put = cls_cgroup_put, 304 .delete = cls_cgroup_delete, 305 .walk = cls_cgroup_walk, 306 .dump = cls_cgroup_dump, 307 .owner = THIS_MODULE, 308 }; 309 310 static int __init init_cgroup_cls(void) 311 { 312 int ret; 313 314 ret = cgroup_load_subsys(&net_cls_subsys); 315 if (ret) 316 goto out; 317 318 ret = register_tcf_proto_ops(&cls_cgroup_ops); 319 if (ret) 320 cgroup_unload_subsys(&net_cls_subsys); 321 322 out: 323 return ret; 324 } 325 326 static void __exit exit_cgroup_cls(void) 327 { 328 unregister_tcf_proto_ops(&cls_cgroup_ops); 329 330 cgroup_unload_subsys(&net_cls_subsys); 331 } 332 333 module_init(init_cgroup_cls); 334 module_exit(exit_cgroup_cls); 335 MODULE_LICENSE("GPL"); 336