1 /* 2 * net/sched/cls_cgroup.c Control Group Classifier 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/string.h> 15 #include <linux/errno.h> 16 #include <linux/skbuff.h> 17 #include <linux/cgroup.h> 18 #include <net/rtnetlink.h> 19 #include <net/pkt_cls.h> 20 21 struct cgroup_cls_state 22 { 23 struct cgroup_subsys_state css; 24 u32 classid; 25 }; 26 27 static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 28 { 29 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 30 struct cgroup_cls_state, css); 31 } 32 33 static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) 34 { 35 return container_of(task_subsys_state(p, net_cls_subsys_id), 36 struct cgroup_cls_state, css); 37 } 38 39 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 40 struct cgroup *cgrp) 41 { 42 struct cgroup_cls_state *cs; 43 44 if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) 45 return ERR_PTR(-ENOMEM); 46 47 if (cgrp->parent) 48 cs->classid = cgrp_cls_state(cgrp->parent)->classid; 49 50 return &cs->css; 51 } 52 53 static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) 54 { 55 kfree(cgrp_cls_state(cgrp)); 56 } 57 58 static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) 59 { 60 return cgrp_cls_state(cgrp)->classid; 61 } 62 63 static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 64 { 65 if (!cgroup_lock_live_group(cgrp)) 66 return -ENODEV; 67 68 cgrp_cls_state(cgrp)->classid = (u32) value; 69 70 cgroup_unlock(); 71 72 return 0; 73 } 74 75 static struct cftype ss_files[] = { 76 { 77 .name = "classid", 78 .read_u64 = read_classid, 79 .write_u64 = write_classid, 80 }, 81 }; 82 83 static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) 84 { 85 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 86 } 87 88 struct cgroup_subsys net_cls_subsys = { 89 .name = "net_cls", 90 .create = cgrp_create, 91 .destroy = cgrp_destroy, 92 .populate = cgrp_populate, 93 .subsys_id = net_cls_subsys_id, 94 }; 95 96 struct cls_cgroup_head 97 { 98 u32 handle; 99 struct tcf_exts exts; 100 struct tcf_ematch_tree ematches; 101 }; 102 103 static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, 104 struct tcf_result *res) 105 { 106 struct cls_cgroup_head *head = tp->root; 107 u32 classid; 108 109 /* 110 * Due to the nature of the classifier it is required to ignore all 111 * packets originating from softirq context as accessing `current' 112 * would lead to false results. 113 * 114 * This test assumes that all callers of dev_queue_xmit() explicitely 115 * disable bh. Knowing this, it is possible to detect softirq based 116 * calls by looking at the number of nested bh disable calls because 117 * softirqs always disables bh. 118 */ 119 if (softirq_count() != SOFTIRQ_OFFSET) 120 return -1; 121 122 rcu_read_lock(); 123 classid = task_cls_state(current)->classid; 124 rcu_read_unlock(); 125 126 if (!classid) 127 return -1; 128 129 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 130 return -1; 131 132 res->classid = classid; 133 res->class = 0; 134 return tcf_exts_exec(skb, &head->exts, res); 135 } 136 137 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) 138 { 139 return 0UL; 140 } 141 142 static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f) 143 { 144 } 145 146 static int cls_cgroup_init(struct tcf_proto *tp) 147 { 148 return 0; 149 } 150 151 static const struct tcf_ext_map cgroup_ext_map = { 152 .action = TCA_CGROUP_ACT, 153 .police = TCA_CGROUP_POLICE, 154 }; 155 156 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { 157 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 158 }; 159 160 static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, 161 u32 handle, struct nlattr **tca, 162 unsigned long *arg) 163 { 164 struct nlattr *tb[TCA_CGROUP_MAX+1]; 165 struct cls_cgroup_head *head = tp->root; 166 struct tcf_ematch_tree t; 167 struct tcf_exts e; 168 int err; 169 170 if (!tca[TCA_OPTIONS]) 171 return -EINVAL; 172 173 if (head == NULL) { 174 if (!handle) 175 return -EINVAL; 176 177 head = kzalloc(sizeof(*head), GFP_KERNEL); 178 if (head == NULL) 179 return -ENOBUFS; 180 181 head->handle = handle; 182 183 tcf_tree_lock(tp); 184 tp->root = head; 185 tcf_tree_unlock(tp); 186 } 187 188 if (handle != head->handle) 189 return -ENOENT; 190 191 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], 192 cgroup_policy); 193 if (err < 0) 194 return err; 195 196 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map); 197 if (err < 0) 198 return err; 199 200 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); 201 if (err < 0) 202 return err; 203 204 tcf_exts_change(tp, &head->exts, &e); 205 tcf_em_tree_change(tp, &head->ematches, &t); 206 207 return 0; 208 } 209 210 static void cls_cgroup_destroy(struct tcf_proto *tp) 211 { 212 struct cls_cgroup_head *head = tp->root; 213 214 if (head) { 215 tcf_exts_destroy(tp, &head->exts); 216 tcf_em_tree_destroy(tp, &head->ematches); 217 kfree(head); 218 } 219 } 220 221 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) 222 { 223 return -EOPNOTSUPP; 224 } 225 226 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) 227 { 228 struct cls_cgroup_head *head = tp->root; 229 230 if (arg->count < arg->skip) 231 goto skip; 232 233 if (arg->fn(tp, (unsigned long) head, arg) < 0) { 234 arg->stop = 1; 235 return; 236 } 237 skip: 238 arg->count++; 239 } 240 241 static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh, 242 struct sk_buff *skb, struct tcmsg *t) 243 { 244 struct cls_cgroup_head *head = tp->root; 245 unsigned char *b = skb_tail_pointer(skb); 246 struct nlattr *nest; 247 248 t->tcm_handle = head->handle; 249 250 nest = nla_nest_start(skb, TCA_OPTIONS); 251 if (nest == NULL) 252 goto nla_put_failure; 253 254 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 || 255 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) 256 goto nla_put_failure; 257 258 nla_nest_end(skb, nest); 259 260 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0) 261 goto nla_put_failure; 262 263 return skb->len; 264 265 nla_put_failure: 266 nlmsg_trim(skb, b); 267 return -1; 268 } 269 270 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { 271 .kind = "cgroup", 272 .init = cls_cgroup_init, 273 .change = cls_cgroup_change, 274 .classify = cls_cgroup_classify, 275 .destroy = cls_cgroup_destroy, 276 .get = cls_cgroup_get, 277 .put = cls_cgroup_put, 278 .delete = cls_cgroup_delete, 279 .walk = cls_cgroup_walk, 280 .dump = cls_cgroup_dump, 281 .owner = THIS_MODULE, 282 }; 283 284 static int __init init_cgroup_cls(void) 285 { 286 return register_tcf_proto_ops(&cls_cgroup_ops); 287 } 288 289 static void __exit exit_cgroup_cls(void) 290 { 291 unregister_tcf_proto_ops(&cls_cgroup_ops); 292 } 293 294 module_init(init_cgroup_cls); 295 module_exit(exit_cgroup_cls); 296 MODULE_LICENSE("GPL"); 297