1 /* 2 * net/sched/cls_cgroup.c Control Group Classifier 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <tgraf@suug.ch> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/string.h> 15 #include <linux/errno.h> 16 #include <linux/skbuff.h> 17 #include <linux/cgroup.h> 18 #include <net/rtnetlink.h> 19 #include <net/pkt_cls.h> 20 21 struct cgroup_cls_state 22 { 23 struct cgroup_subsys_state css; 24 u32 classid; 25 }; 26 27 static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 28 { 29 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 30 struct cgroup_cls_state, css); 31 } 32 33 static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) 34 { 35 return container_of(task_subsys_state(p, net_cls_subsys_id), 36 struct cgroup_cls_state, css); 37 } 38 39 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 40 struct cgroup *cgrp) 41 { 42 struct cgroup_cls_state *cs; 43 44 if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) 45 return ERR_PTR(-ENOMEM); 46 47 if (cgrp->parent) 48 cs->classid = cgrp_cls_state(cgrp->parent)->classid; 49 50 return &cs->css; 51 } 52 53 static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) 54 { 55 kfree(cgrp_cls_state(cgrp)); 56 } 57 58 static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) 59 { 60 return cgrp_cls_state(cgrp)->classid; 61 } 62 63 static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 64 { 65 cgrp_cls_state(cgrp)->classid = (u32) value; 66 return 0; 67 } 68 69 static struct cftype ss_files[] = { 70 { 71 .name = "classid", 72 .read_u64 = read_classid, 73 .write_u64 = write_classid, 74 }, 75 }; 76 77 static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) 78 { 79 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 80 } 81 82 struct cgroup_subsys net_cls_subsys = { 83 .name = "net_cls", 84 .create = cgrp_create, 85 .destroy = cgrp_destroy, 86 .populate = cgrp_populate, 87 .subsys_id = net_cls_subsys_id, 88 }; 89 90 struct cls_cgroup_head 91 { 92 u32 handle; 93 struct tcf_exts exts; 94 struct tcf_ematch_tree ematches; 95 }; 96 97 static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, 98 struct tcf_result *res) 99 { 100 struct cls_cgroup_head *head = tp->root; 101 u32 classid; 102 103 /* 104 * Due to the nature of the classifier it is required to ignore all 105 * packets originating from softirq context as accessing `current' 106 * would lead to false results. 107 * 108 * This test assumes that all callers of dev_queue_xmit() explicitely 109 * disable bh. Knowing this, it is possible to detect softirq based 110 * calls by looking at the number of nested bh disable calls because 111 * softirqs always disables bh. 112 */ 113 if (softirq_count() != SOFTIRQ_OFFSET) 114 return -1; 115 116 rcu_read_lock(); 117 classid = task_cls_state(current)->classid; 118 rcu_read_unlock(); 119 120 if (!classid) 121 return -1; 122 123 if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 124 return -1; 125 126 res->classid = classid; 127 res->class = 0; 128 return tcf_exts_exec(skb, &head->exts, res); 129 } 130 131 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) 132 { 133 return 0UL; 134 } 135 136 static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f) 137 { 138 } 139 140 static int cls_cgroup_init(struct tcf_proto *tp) 141 { 142 return 0; 143 } 144 145 static const struct tcf_ext_map cgroup_ext_map = { 146 .action = TCA_CGROUP_ACT, 147 .police = TCA_CGROUP_POLICE, 148 }; 149 150 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { 151 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 152 }; 153 154 static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, 155 u32 handle, struct nlattr **tca, 156 unsigned long *arg) 157 { 158 struct nlattr *tb[TCA_CGROUP_MAX+1]; 159 struct cls_cgroup_head *head = tp->root; 160 struct tcf_ematch_tree t; 161 struct tcf_exts e; 162 int err; 163 164 if (!tca[TCA_OPTIONS]) 165 return -EINVAL; 166 167 if (head == NULL) { 168 if (!handle) 169 return -EINVAL; 170 171 head = kzalloc(sizeof(*head), GFP_KERNEL); 172 if (head == NULL) 173 return -ENOBUFS; 174 175 head->handle = handle; 176 177 tcf_tree_lock(tp); 178 tp->root = head; 179 tcf_tree_unlock(tp); 180 } 181 182 if (handle != head->handle) 183 return -ENOENT; 184 185 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], 186 cgroup_policy); 187 if (err < 0) 188 return err; 189 190 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map); 191 if (err < 0) 192 return err; 193 194 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); 195 if (err < 0) 196 return err; 197 198 tcf_exts_change(tp, &head->exts, &e); 199 tcf_em_tree_change(tp, &head->ematches, &t); 200 201 return 0; 202 } 203 204 static void cls_cgroup_destroy(struct tcf_proto *tp) 205 { 206 struct cls_cgroup_head *head = tp->root; 207 208 if (head) { 209 tcf_exts_destroy(tp, &head->exts); 210 tcf_em_tree_destroy(tp, &head->ematches); 211 kfree(head); 212 } 213 } 214 215 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) 216 { 217 return -EOPNOTSUPP; 218 } 219 220 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) 221 { 222 struct cls_cgroup_head *head = tp->root; 223 224 if (arg->count < arg->skip) 225 goto skip; 226 227 if (arg->fn(tp, (unsigned long) head, arg) < 0) { 228 arg->stop = 1; 229 return; 230 } 231 skip: 232 arg->count++; 233 } 234 235 static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh, 236 struct sk_buff *skb, struct tcmsg *t) 237 { 238 struct cls_cgroup_head *head = tp->root; 239 unsigned char *b = skb_tail_pointer(skb); 240 struct nlattr *nest; 241 242 t->tcm_handle = head->handle; 243 244 nest = nla_nest_start(skb, TCA_OPTIONS); 245 if (nest == NULL) 246 goto nla_put_failure; 247 248 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 || 249 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) 250 goto nla_put_failure; 251 252 nla_nest_end(skb, nest); 253 254 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0) 255 goto nla_put_failure; 256 257 return skb->len; 258 259 nla_put_failure: 260 nlmsg_trim(skb, b); 261 return -1; 262 } 263 264 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { 265 .kind = "cgroup", 266 .init = cls_cgroup_init, 267 .change = cls_cgroup_change, 268 .classify = cls_cgroup_classify, 269 .destroy = cls_cgroup_destroy, 270 .get = cls_cgroup_get, 271 .put = cls_cgroup_put, 272 .delete = cls_cgroup_delete, 273 .walk = cls_cgroup_walk, 274 .dump = cls_cgroup_dump, 275 .owner = THIS_MODULE, 276 }; 277 278 static int __init init_cgroup_cls(void) 279 { 280 return register_tcf_proto_ops(&cls_cgroup_ops); 281 } 282 283 static void __exit exit_cgroup_cls(void) 284 { 285 unregister_tcf_proto_ops(&cls_cgroup_ops); 286 } 287 288 module_init(init_cgroup_cls); 289 module_exit(exit_cgroup_cls); 290 MODULE_LICENSE("GPL"); 291