1c757249aSShailabh Nagar /* 2c757249aSShailabh Nagar * taskstats.c - Export per-task statistics to userland 3c757249aSShailabh Nagar * 4c757249aSShailabh Nagar * Copyright (C) Shailabh Nagar, IBM Corp. 2006 5c757249aSShailabh Nagar * (C) Balbir Singh, IBM Corp. 2006 6c757249aSShailabh Nagar * 7c757249aSShailabh Nagar * This program is free software; you can redistribute it and/or modify 8c757249aSShailabh Nagar * it under the terms of the GNU General Public License as published by 9c757249aSShailabh Nagar * the Free Software Foundation; either version 2 of the License, or 10c757249aSShailabh Nagar * (at your option) any later version. 11c757249aSShailabh Nagar * 12c757249aSShailabh Nagar * This program is distributed in the hope that it will be useful, 13c757249aSShailabh Nagar * but WITHOUT ANY WARRANTY; without even the implied warranty of 14c757249aSShailabh Nagar * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15c757249aSShailabh Nagar * GNU General Public License for more details. 16c757249aSShailabh Nagar * 17c757249aSShailabh Nagar */ 18c757249aSShailabh Nagar 19c757249aSShailabh Nagar #include <linux/kernel.h> 20c757249aSShailabh Nagar #include <linux/taskstats_kern.h> 21f3cef7a9SJay Lan #include <linux/tsacct_kern.h> 226f44993fSShailabh Nagar #include <linux/delayacct.h> 23f9fd8914SShailabh Nagar #include <linux/cpumask.h> 24f9fd8914SShailabh Nagar #include <linux/percpu.h> 255a0e3ad6STejun Heo #include <linux/slab.h> 26846c7bb0SBalbir Singh #include <linux/cgroupstats.h> 27846c7bb0SBalbir Singh #include <linux/cgroup.h> 28846c7bb0SBalbir Singh #include <linux/fs.h> 29846c7bb0SBalbir Singh #include <linux/file.h> 30c757249aSShailabh Nagar #include <net/genetlink.h> 31c757249aSShailabh Nagar #include <asm/atomic.h> 32c757249aSShailabh Nagar 33f9fd8914SShailabh Nagar /* 34f9fd8914SShailabh Nagar * Maximum length of a cpumask that can be specified in 35f9fd8914SShailabh Nagar * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute 36f9fd8914SShailabh Nagar */ 37f9fd8914SShailabh Nagar #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) 38f9fd8914SShailabh Nagar 39b81f3ea9SVegard Nossum static DEFINE_PER_CPU(__u32, taskstats_seqnum); 40c757249aSShailabh Nagar static int family_registered; 41e18b890bSChristoph Lameter struct kmem_cache *taskstats_cache; 42c757249aSShailabh Nagar 43c757249aSShailabh Nagar static struct genl_family family = { 44c757249aSShailabh Nagar .id = GENL_ID_GENERATE, 45c757249aSShailabh Nagar .name = TASKSTATS_GENL_NAME, 46c757249aSShailabh Nagar .version = TASKSTATS_GENL_VERSION, 47c757249aSShailabh Nagar .maxattr = TASKSTATS_CMD_ATTR_MAX, 48c757249aSShailabh Nagar }; 49c757249aSShailabh Nagar 50b54452b0SAlexey Dobriyan static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { 51c757249aSShailabh Nagar [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 }, 52c757249aSShailabh Nagar [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 }, 53f9fd8914SShailabh Nagar [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, 54f9fd8914SShailabh Nagar [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; 55f9fd8914SShailabh Nagar 56b54452b0SAlexey Dobriyan static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { 57846c7bb0SBalbir Singh [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, 58846c7bb0SBalbir Singh }; 59846c7bb0SBalbir Singh 60f9fd8914SShailabh Nagar struct listener { 61f9fd8914SShailabh Nagar struct list_head list; 62f9fd8914SShailabh Nagar pid_t pid; 63bb129994SShailabh Nagar char valid; 64c757249aSShailabh Nagar }; 65c757249aSShailabh Nagar 66f9fd8914SShailabh Nagar struct listener_list { 67f9fd8914SShailabh Nagar struct rw_semaphore sem; 68f9fd8914SShailabh Nagar struct list_head list; 69f9fd8914SShailabh Nagar }; 70f9fd8914SShailabh Nagar static DEFINE_PER_CPU(struct listener_list, listener_array); 71f9fd8914SShailabh Nagar 72f9fd8914SShailabh Nagar enum actions { 73f9fd8914SShailabh Nagar REGISTER, 74f9fd8914SShailabh Nagar DEREGISTER, 75f9fd8914SShailabh Nagar CPU_DONT_CARE 76f9fd8914SShailabh Nagar }; 77c757249aSShailabh Nagar 78c757249aSShailabh Nagar static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, 7937167485SOleg Nesterov size_t size) 80c757249aSShailabh Nagar { 81c757249aSShailabh Nagar struct sk_buff *skb; 82c757249aSShailabh Nagar void *reply; 83c757249aSShailabh Nagar 84c757249aSShailabh Nagar /* 85c757249aSShailabh Nagar * If new attributes are added, please revisit this allocation 86c757249aSShailabh Nagar */ 873dabc715SThomas Graf skb = genlmsg_new(size, GFP_KERNEL); 88c757249aSShailabh Nagar if (!skb) 89c757249aSShailabh Nagar return -ENOMEM; 90c757249aSShailabh Nagar 91c757249aSShailabh Nagar if (!info) { 92c757249aSShailabh Nagar int seq = get_cpu_var(taskstats_seqnum)++; 93c757249aSShailabh Nagar put_cpu_var(taskstats_seqnum); 94c757249aSShailabh Nagar 9517c157c8SThomas Graf reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); 96c757249aSShailabh Nagar } else 9717c157c8SThomas Graf reply = genlmsg_put_reply(skb, info, &family, 0, cmd); 98c757249aSShailabh Nagar if (reply == NULL) { 99c757249aSShailabh Nagar nlmsg_free(skb); 100c757249aSShailabh Nagar return -EINVAL; 101c757249aSShailabh Nagar } 102c757249aSShailabh Nagar 103c757249aSShailabh Nagar *skbp = skb; 104c757249aSShailabh Nagar return 0; 105c757249aSShailabh Nagar } 106c757249aSShailabh Nagar 107f9fd8914SShailabh Nagar /* 108f9fd8914SShailabh Nagar * Send taskstats data in @skb to listener with nl_pid @pid 109f9fd8914SShailabh Nagar */ 110134e6375SJohannes Berg static int send_reply(struct sk_buff *skb, struct genl_info *info) 111c757249aSShailabh Nagar { 112b529ccf2SArnaldo Carvalho de Melo struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); 113f9fd8914SShailabh Nagar void *reply = genlmsg_data(genlhdr); 114c757249aSShailabh Nagar int rc; 115c757249aSShailabh Nagar 116c757249aSShailabh Nagar rc = genlmsg_end(skb, reply); 117c757249aSShailabh Nagar if (rc < 0) { 118c757249aSShailabh Nagar nlmsg_free(skb); 119c757249aSShailabh Nagar return rc; 120c757249aSShailabh Nagar } 121c757249aSShailabh Nagar 122134e6375SJohannes Berg return genlmsg_reply(skb, info); 123c757249aSShailabh Nagar } 124c757249aSShailabh Nagar 125f9fd8914SShailabh Nagar /* 126f9fd8914SShailabh Nagar * Send taskstats data in @skb to listeners registered for @cpu's exit data 127f9fd8914SShailabh Nagar */ 128115085eaSOleg Nesterov static void send_cpu_listeners(struct sk_buff *skb, 129115085eaSOleg Nesterov struct listener_list *listeners) 130f9fd8914SShailabh Nagar { 131b529ccf2SArnaldo Carvalho de Melo struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); 132f9fd8914SShailabh Nagar struct listener *s, *tmp; 133f9fd8914SShailabh Nagar struct sk_buff *skb_next, *skb_cur = skb; 134f9fd8914SShailabh Nagar void *reply = genlmsg_data(genlhdr); 135d94a0415SShailabh Nagar int rc, delcount = 0; 136f9fd8914SShailabh Nagar 137f9fd8914SShailabh Nagar rc = genlmsg_end(skb, reply); 138f9fd8914SShailabh Nagar if (rc < 0) { 139f9fd8914SShailabh Nagar nlmsg_free(skb); 140d94a0415SShailabh Nagar return; 141f9fd8914SShailabh Nagar } 142f9fd8914SShailabh Nagar 143f9fd8914SShailabh Nagar rc = 0; 144bb129994SShailabh Nagar down_read(&listeners->sem); 145d94a0415SShailabh Nagar list_for_each_entry(s, &listeners->list, list) { 146f9fd8914SShailabh Nagar skb_next = NULL; 147f9fd8914SShailabh Nagar if (!list_is_last(&s->list, &listeners->list)) { 148f9fd8914SShailabh Nagar skb_next = skb_clone(skb_cur, GFP_KERNEL); 149d94a0415SShailabh Nagar if (!skb_next) 150f9fd8914SShailabh Nagar break; 151f9fd8914SShailabh Nagar } 152134e6375SJohannes Berg rc = genlmsg_unicast(&init_net, skb_cur, s->pid); 153d94a0415SShailabh Nagar if (rc == -ECONNREFUSED) { 154bb129994SShailabh Nagar s->valid = 0; 155bb129994SShailabh Nagar delcount++; 156f9fd8914SShailabh Nagar } 157f9fd8914SShailabh Nagar skb_cur = skb_next; 158f9fd8914SShailabh Nagar } 159bb129994SShailabh Nagar up_read(&listeners->sem); 160f9fd8914SShailabh Nagar 161d94a0415SShailabh Nagar if (skb_cur) 162d94a0415SShailabh Nagar nlmsg_free(skb_cur); 163d94a0415SShailabh Nagar 164bb129994SShailabh Nagar if (!delcount) 165d94a0415SShailabh Nagar return; 166bb129994SShailabh Nagar 167bb129994SShailabh Nagar /* Delete invalidated entries */ 168bb129994SShailabh Nagar down_write(&listeners->sem); 169bb129994SShailabh Nagar list_for_each_entry_safe(s, tmp, &listeners->list, list) { 170bb129994SShailabh Nagar if (!s->valid) { 171bb129994SShailabh Nagar list_del(&s->list); 172bb129994SShailabh Nagar kfree(s); 173bb129994SShailabh Nagar } 174bb129994SShailabh Nagar } 175bb129994SShailabh Nagar up_write(&listeners->sem); 176f9fd8914SShailabh Nagar } 177f9fd8914SShailabh Nagar 178a98b6094SOleg Nesterov static int fill_pid(pid_t pid, struct task_struct *tsk, 179c757249aSShailabh Nagar struct taskstats *stats) 180c757249aSShailabh Nagar { 1817d94ddddSShailabh Nagar int rc = 0; 182c757249aSShailabh Nagar 183c757249aSShailabh Nagar if (!tsk) { 184a98b6094SOleg Nesterov rcu_read_lock(); 185cb41d6d0SPavel Emelyanov tsk = find_task_by_vpid(pid); 186a98b6094SOleg Nesterov if (tsk) 187c757249aSShailabh Nagar get_task_struct(tsk); 188a98b6094SOleg Nesterov rcu_read_unlock(); 189a98b6094SOleg Nesterov if (!tsk) 190a98b6094SOleg Nesterov return -ESRCH; 191c757249aSShailabh Nagar } else 192c757249aSShailabh Nagar get_task_struct(tsk); 193c757249aSShailabh Nagar 19451de4d90SOleg Nesterov memset(stats, 0, sizeof(*stats)); 195c757249aSShailabh Nagar /* 196c757249aSShailabh Nagar * Each accounting subsystem adds calls to its functions to 197c757249aSShailabh Nagar * fill in relevant parts of struct taskstsats as follows 198c757249aSShailabh Nagar * 1997d94ddddSShailabh Nagar * per-task-foo(stats, tsk); 200c757249aSShailabh Nagar */ 201c757249aSShailabh Nagar 2027d94ddddSShailabh Nagar delayacct_add_tsk(stats, tsk); 203f3cef7a9SJay Lan 204f3cef7a9SJay Lan /* fill in basic acct fields */ 2056f44993fSShailabh Nagar stats->version = TASKSTATS_VERSION; 206b663a79cSMaxim Uvarov stats->nvcsw = tsk->nvcsw; 207b663a79cSMaxim Uvarov stats->nivcsw = tsk->nivcsw; 208f3cef7a9SJay Lan bacct_add_tsk(stats, tsk); 2096f44993fSShailabh Nagar 2109acc1853SJay Lan /* fill in extended acct fields */ 2119acc1853SJay Lan xacct_add_tsk(stats, tsk); 2129acc1853SJay Lan 2136f44993fSShailabh Nagar /* Define err: label here if needed */ 214c757249aSShailabh Nagar put_task_struct(tsk); 215c757249aSShailabh Nagar return rc; 216c757249aSShailabh Nagar 217c757249aSShailabh Nagar } 218c757249aSShailabh Nagar 219a98b6094SOleg Nesterov static int fill_tgid(pid_t tgid, struct task_struct *first, 220c757249aSShailabh Nagar struct taskstats *stats) 221c757249aSShailabh Nagar { 222a98b6094SOleg Nesterov struct task_struct *tsk; 223ad4ecbcbSShailabh Nagar unsigned long flags; 224a98b6094SOleg Nesterov int rc = -ESRCH; 225c757249aSShailabh Nagar 226ad4ecbcbSShailabh Nagar /* 227ad4ecbcbSShailabh Nagar * Add additional stats from live tasks except zombie thread group 228ad4ecbcbSShailabh Nagar * leaders who are already counted with the dead tasks 229ad4ecbcbSShailabh Nagar */ 230a98b6094SOleg Nesterov rcu_read_lock(); 231a98b6094SOleg Nesterov if (!first) 232cb41d6d0SPavel Emelyanov first = find_task_by_vpid(tgid); 233ad4ecbcbSShailabh Nagar 234a98b6094SOleg Nesterov if (!first || !lock_task_sighand(first, &flags)) 235a98b6094SOleg Nesterov goto out; 236fca178c0SOleg Nesterov 237ad4ecbcbSShailabh Nagar if (first->signal->stats) 238ad4ecbcbSShailabh Nagar memcpy(stats, first->signal->stats, sizeof(*stats)); 23951de4d90SOleg Nesterov else 24051de4d90SOleg Nesterov memset(stats, 0, sizeof(*stats)); 241ad4ecbcbSShailabh Nagar 242a98b6094SOleg Nesterov tsk = first; 243c757249aSShailabh Nagar do { 244d7c3f5f2SOleg Nesterov if (tsk->exit_state) 245ad4ecbcbSShailabh Nagar continue; 246c757249aSShailabh Nagar /* 247ad4ecbcbSShailabh Nagar * Accounting subsystem can call its functions here to 248c757249aSShailabh Nagar * fill in relevant parts of struct taskstsats as follows 249c757249aSShailabh Nagar * 250ad4ecbcbSShailabh Nagar * per-task-foo(stats, tsk); 251c757249aSShailabh Nagar */ 252ad4ecbcbSShailabh Nagar delayacct_add_tsk(stats, tsk); 2536f44993fSShailabh Nagar 254b663a79cSMaxim Uvarov stats->nvcsw += tsk->nvcsw; 255b663a79cSMaxim Uvarov stats->nivcsw += tsk->nivcsw; 256c757249aSShailabh Nagar } while_each_thread(first, tsk); 2576f44993fSShailabh Nagar 258a98b6094SOleg Nesterov unlock_task_sighand(first, &flags); 259a98b6094SOleg Nesterov rc = 0; 260a98b6094SOleg Nesterov out: 261a98b6094SOleg Nesterov rcu_read_unlock(); 262a98b6094SOleg Nesterov 263a98b6094SOleg Nesterov stats->version = TASKSTATS_VERSION; 264c757249aSShailabh Nagar /* 2653a4fa0a2SRobert P. J. Day * Accounting subsystems can also add calls here to modify 266ad4ecbcbSShailabh Nagar * fields of taskstats. 267c757249aSShailabh Nagar */ 268a98b6094SOleg Nesterov return rc; 269c757249aSShailabh Nagar } 270c757249aSShailabh Nagar 271ad4ecbcbSShailabh Nagar 272ad4ecbcbSShailabh Nagar static void fill_tgid_exit(struct task_struct *tsk) 273ad4ecbcbSShailabh Nagar { 274ad4ecbcbSShailabh Nagar unsigned long flags; 275ad4ecbcbSShailabh Nagar 276b8534d7bSOleg Nesterov spin_lock_irqsave(&tsk->sighand->siglock, flags); 277ad4ecbcbSShailabh Nagar if (!tsk->signal->stats) 278ad4ecbcbSShailabh Nagar goto ret; 279ad4ecbcbSShailabh Nagar 280ad4ecbcbSShailabh Nagar /* 281ad4ecbcbSShailabh Nagar * Each accounting subsystem calls its functions here to 282ad4ecbcbSShailabh Nagar * accumalate its per-task stats for tsk, into the per-tgid structure 283ad4ecbcbSShailabh Nagar * 284ad4ecbcbSShailabh Nagar * per-task-foo(tsk->signal->stats, tsk); 285ad4ecbcbSShailabh Nagar */ 286ad4ecbcbSShailabh Nagar delayacct_add_tsk(tsk->signal->stats, tsk); 287ad4ecbcbSShailabh Nagar ret: 288b8534d7bSOleg Nesterov spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 289ad4ecbcbSShailabh Nagar return; 290ad4ecbcbSShailabh Nagar } 291ad4ecbcbSShailabh Nagar 29241c7bb95SRusty Russell static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) 293f9fd8914SShailabh Nagar { 294f9fd8914SShailabh Nagar struct listener_list *listeners; 295f9fd8914SShailabh Nagar struct listener *s, *tmp; 296f9fd8914SShailabh Nagar unsigned int cpu; 297ad4ecbcbSShailabh Nagar 29841c7bb95SRusty Russell if (!cpumask_subset(mask, cpu_possible_mask)) 299f9fd8914SShailabh Nagar return -EINVAL; 300f9fd8914SShailabh Nagar 301f9fd8914SShailabh Nagar if (isadd == REGISTER) { 30241c7bb95SRusty Russell for_each_cpu(cpu, mask) { 303f9fd8914SShailabh Nagar s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 304f9fd8914SShailabh Nagar cpu_to_node(cpu)); 305f9fd8914SShailabh Nagar if (!s) 306f9fd8914SShailabh Nagar goto cleanup; 307f9fd8914SShailabh Nagar s->pid = pid; 308f9fd8914SShailabh Nagar INIT_LIST_HEAD(&s->list); 309bb129994SShailabh Nagar s->valid = 1; 310f9fd8914SShailabh Nagar 311f9fd8914SShailabh Nagar listeners = &per_cpu(listener_array, cpu); 312f9fd8914SShailabh Nagar down_write(&listeners->sem); 313f9fd8914SShailabh Nagar list_add(&s->list, &listeners->list); 314f9fd8914SShailabh Nagar up_write(&listeners->sem); 315f9fd8914SShailabh Nagar } 316f9fd8914SShailabh Nagar return 0; 317f9fd8914SShailabh Nagar } 318f9fd8914SShailabh Nagar 319f9fd8914SShailabh Nagar /* Deregister or cleanup */ 320f9fd8914SShailabh Nagar cleanup: 32141c7bb95SRusty Russell for_each_cpu(cpu, mask) { 322f9fd8914SShailabh Nagar listeners = &per_cpu(listener_array, cpu); 323f9fd8914SShailabh Nagar down_write(&listeners->sem); 324f9fd8914SShailabh Nagar list_for_each_entry_safe(s, tmp, &listeners->list, list) { 325f9fd8914SShailabh Nagar if (s->pid == pid) { 326f9fd8914SShailabh Nagar list_del(&s->list); 327f9fd8914SShailabh Nagar kfree(s); 328f9fd8914SShailabh Nagar break; 329f9fd8914SShailabh Nagar } 330f9fd8914SShailabh Nagar } 331f9fd8914SShailabh Nagar up_write(&listeners->sem); 332f9fd8914SShailabh Nagar } 333f9fd8914SShailabh Nagar return 0; 334f9fd8914SShailabh Nagar } 335f9fd8914SShailabh Nagar 33641c7bb95SRusty Russell static int parse(struct nlattr *na, struct cpumask *mask) 337f9fd8914SShailabh Nagar { 338f9fd8914SShailabh Nagar char *data; 339f9fd8914SShailabh Nagar int len; 340f9fd8914SShailabh Nagar int ret; 341f9fd8914SShailabh Nagar 342f9fd8914SShailabh Nagar if (na == NULL) 343f9fd8914SShailabh Nagar return 1; 344f9fd8914SShailabh Nagar len = nla_len(na); 345f9fd8914SShailabh Nagar if (len > TASKSTATS_CPUMASK_MAXLEN) 346f9fd8914SShailabh Nagar return -E2BIG; 347f9fd8914SShailabh Nagar if (len < 1) 348f9fd8914SShailabh Nagar return -EINVAL; 349f9fd8914SShailabh Nagar data = kmalloc(len, GFP_KERNEL); 350f9fd8914SShailabh Nagar if (!data) 351f9fd8914SShailabh Nagar return -ENOMEM; 352f9fd8914SShailabh Nagar nla_strlcpy(data, na, len); 35329c0177eSRusty Russell ret = cpulist_parse(data, mask); 354f9fd8914SShailabh Nagar kfree(data); 355f9fd8914SShailabh Nagar return ret; 356f9fd8914SShailabh Nagar } 357f9fd8914SShailabh Nagar 35851de4d90SOleg Nesterov static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) 35968062b86SOleg Nesterov { 36051de4d90SOleg Nesterov struct nlattr *na, *ret; 36168062b86SOleg Nesterov int aggr; 36268062b86SOleg Nesterov 36385893120SJeff Mahoney /* If we don't pad, we end up with alignment on a 4 byte boundary. 36485893120SJeff Mahoney * This causes lots of runtime warnings on systems requiring 8 byte 36585893120SJeff Mahoney * alignment */ 36685893120SJeff Mahoney u32 pids[2] = { pid, 0 }; 36785893120SJeff Mahoney int pid_size = ALIGN(sizeof(pid), sizeof(long)); 36885893120SJeff Mahoney 36937167485SOleg Nesterov aggr = (type == TASKSTATS_TYPE_PID) 37037167485SOleg Nesterov ? TASKSTATS_TYPE_AGGR_PID 37137167485SOleg Nesterov : TASKSTATS_TYPE_AGGR_TGID; 37268062b86SOleg Nesterov 37368062b86SOleg Nesterov na = nla_nest_start(skb, aggr); 37437167485SOleg Nesterov if (!na) 37537167485SOleg Nesterov goto err; 37685893120SJeff Mahoney if (nla_put(skb, type, pid_size, pids) < 0) 37751de4d90SOleg Nesterov goto err; 37851de4d90SOleg Nesterov ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); 37951de4d90SOleg Nesterov if (!ret) 38051de4d90SOleg Nesterov goto err; 38168062b86SOleg Nesterov nla_nest_end(skb, na); 38268062b86SOleg Nesterov 38351de4d90SOleg Nesterov return nla_data(ret); 38451de4d90SOleg Nesterov err: 38551de4d90SOleg Nesterov return NULL; 38668062b86SOleg Nesterov } 38768062b86SOleg Nesterov 388846c7bb0SBalbir Singh static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info) 389846c7bb0SBalbir Singh { 390846c7bb0SBalbir Singh int rc = 0; 391846c7bb0SBalbir Singh struct sk_buff *rep_skb; 392846c7bb0SBalbir Singh struct cgroupstats *stats; 393846c7bb0SBalbir Singh struct nlattr *na; 394846c7bb0SBalbir Singh size_t size; 395846c7bb0SBalbir Singh u32 fd; 396846c7bb0SBalbir Singh struct file *file; 397846c7bb0SBalbir Singh int fput_needed; 398846c7bb0SBalbir Singh 399846c7bb0SBalbir Singh na = info->attrs[CGROUPSTATS_CMD_ATTR_FD]; 400846c7bb0SBalbir Singh if (!na) 401846c7bb0SBalbir Singh return -EINVAL; 402846c7bb0SBalbir Singh 403846c7bb0SBalbir Singh fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); 404846c7bb0SBalbir Singh file = fget_light(fd, &fput_needed); 405f9615984SAdrian Bunk if (!file) 406f9615984SAdrian Bunk return 0; 407f9615984SAdrian Bunk 408846c7bb0SBalbir Singh size = nla_total_size(sizeof(struct cgroupstats)); 409846c7bb0SBalbir Singh 410846c7bb0SBalbir Singh rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, 411846c7bb0SBalbir Singh size); 412846c7bb0SBalbir Singh if (rc < 0) 413846c7bb0SBalbir Singh goto err; 414846c7bb0SBalbir Singh 415846c7bb0SBalbir Singh na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, 416846c7bb0SBalbir Singh sizeof(struct cgroupstats)); 417846c7bb0SBalbir Singh stats = nla_data(na); 418846c7bb0SBalbir Singh memset(stats, 0, sizeof(*stats)); 419846c7bb0SBalbir Singh 420846c7bb0SBalbir Singh rc = cgroupstats_build(stats, file->f_dentry); 421f9615984SAdrian Bunk if (rc < 0) { 422f9615984SAdrian Bunk nlmsg_free(rep_skb); 423846c7bb0SBalbir Singh goto err; 424846c7bb0SBalbir Singh } 425846c7bb0SBalbir Singh 426134e6375SJohannes Berg rc = send_reply(rep_skb, info); 427f9615984SAdrian Bunk 428846c7bb0SBalbir Singh err: 429846c7bb0SBalbir Singh fput_light(file, fput_needed); 430846c7bb0SBalbir Singh return rc; 431846c7bb0SBalbir Singh } 432846c7bb0SBalbir Singh 433*93233125SMichael Holzheu static int cmd_attr_register_cpumask(struct genl_info *info) 434c757249aSShailabh Nagar { 43541c7bb95SRusty Russell cpumask_var_t mask; 436*93233125SMichael Holzheu int rc; 437f9fd8914SShailabh Nagar 43841c7bb95SRusty Russell if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 43941c7bb95SRusty Russell return -ENOMEM; 44041c7bb95SRusty Russell rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); 441f9fd8914SShailabh Nagar if (rc < 0) 442*93233125SMichael Holzheu goto out; 44341c7bb95SRusty Russell rc = add_del_listener(info->snd_pid, mask, REGISTER); 444*93233125SMichael Holzheu out: 44541c7bb95SRusty Russell free_cpumask_var(mask); 446f9fd8914SShailabh Nagar return rc; 44741c7bb95SRusty Russell } 448c757249aSShailabh Nagar 449*93233125SMichael Holzheu static int cmd_attr_deregister_cpumask(struct genl_info *info) 450*93233125SMichael Holzheu { 451*93233125SMichael Holzheu cpumask_var_t mask; 452*93233125SMichael Holzheu int rc; 453*93233125SMichael Holzheu 454*93233125SMichael Holzheu if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 455*93233125SMichael Holzheu return -ENOMEM; 456*93233125SMichael Holzheu rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); 457*93233125SMichael Holzheu if (rc < 0) 458*93233125SMichael Holzheu goto out; 459*93233125SMichael Holzheu rc = add_del_listener(info->snd_pid, mask, DEREGISTER); 460*93233125SMichael Holzheu out: 461*93233125SMichael Holzheu free_cpumask_var(mask); 462*93233125SMichael Holzheu return rc; 463*93233125SMichael Holzheu } 464*93233125SMichael Holzheu 465*93233125SMichael Holzheu static int cmd_attr_pid(struct genl_info *info) 466*93233125SMichael Holzheu { 467*93233125SMichael Holzheu struct taskstats *stats; 468*93233125SMichael Holzheu struct sk_buff *rep_skb; 469*93233125SMichael Holzheu size_t size; 470*93233125SMichael Holzheu u32 pid; 471*93233125SMichael Holzheu int rc; 472*93233125SMichael Holzheu 473c757249aSShailabh Nagar size = nla_total_size(sizeof(u32)) + 474c757249aSShailabh Nagar nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 475c757249aSShailabh Nagar 47637167485SOleg Nesterov rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); 477c757249aSShailabh Nagar if (rc < 0) 478c757249aSShailabh Nagar return rc; 479c757249aSShailabh Nagar 48051de4d90SOleg Nesterov rc = -EINVAL; 481*93233125SMichael Holzheu pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); 48251de4d90SOleg Nesterov stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); 48351de4d90SOleg Nesterov if (!stats) 48437167485SOleg Nesterov goto err; 485c757249aSShailabh Nagar 48651de4d90SOleg Nesterov rc = fill_pid(pid, NULL, stats); 48751de4d90SOleg Nesterov if (rc < 0) 48837167485SOleg Nesterov goto err; 489*93233125SMichael Holzheu return send_reply(rep_skb, info); 490*93233125SMichael Holzheu err: 491*93233125SMichael Holzheu nlmsg_free(rep_skb); 492*93233125SMichael Holzheu return rc; 493*93233125SMichael Holzheu } 494*93233125SMichael Holzheu 495*93233125SMichael Holzheu static int cmd_attr_tgid(struct genl_info *info) 496*93233125SMichael Holzheu { 497*93233125SMichael Holzheu struct taskstats *stats; 498*93233125SMichael Holzheu struct sk_buff *rep_skb; 499*93233125SMichael Holzheu size_t size; 500*93233125SMichael Holzheu u32 tgid; 501*93233125SMichael Holzheu int rc; 502*93233125SMichael Holzheu 503*93233125SMichael Holzheu size = nla_total_size(sizeof(u32)) + 504*93233125SMichael Holzheu nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 505*93233125SMichael Holzheu 506*93233125SMichael Holzheu rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); 507*93233125SMichael Holzheu if (rc < 0) 508*93233125SMichael Holzheu return rc; 509*93233125SMichael Holzheu 510*93233125SMichael Holzheu rc = -EINVAL; 511*93233125SMichael Holzheu tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); 51251de4d90SOleg Nesterov stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); 51351de4d90SOleg Nesterov if (!stats) 51437167485SOleg Nesterov goto err; 515c757249aSShailabh Nagar 51651de4d90SOleg Nesterov rc = fill_tgid(tgid, NULL, stats); 51751de4d90SOleg Nesterov if (rc < 0) 51837167485SOleg Nesterov goto err; 519134e6375SJohannes Berg return send_reply(rep_skb, info); 520c757249aSShailabh Nagar err: 521c757249aSShailabh Nagar nlmsg_free(rep_skb); 522c757249aSShailabh Nagar return rc; 523c757249aSShailabh Nagar } 524c757249aSShailabh Nagar 525*93233125SMichael Holzheu static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) 526*93233125SMichael Holzheu { 527*93233125SMichael Holzheu if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) 528*93233125SMichael Holzheu return cmd_attr_register_cpumask(info); 529*93233125SMichael Holzheu else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) 530*93233125SMichael Holzheu return cmd_attr_deregister_cpumask(info); 531*93233125SMichael Holzheu else if (info->attrs[TASKSTATS_CMD_ATTR_PID]) 532*93233125SMichael Holzheu return cmd_attr_pid(info); 533*93233125SMichael Holzheu else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) 534*93233125SMichael Holzheu return cmd_attr_tgid(info); 535*93233125SMichael Holzheu else 536*93233125SMichael Holzheu return -EINVAL; 537*93233125SMichael Holzheu } 538*93233125SMichael Holzheu 53934ec1234SOleg Nesterov static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) 54034ec1234SOleg Nesterov { 54134ec1234SOleg Nesterov struct signal_struct *sig = tsk->signal; 54234ec1234SOleg Nesterov struct taskstats *stats; 54334ec1234SOleg Nesterov 54434ec1234SOleg Nesterov if (sig->stats || thread_group_empty(tsk)) 54534ec1234SOleg Nesterov goto ret; 54634ec1234SOleg Nesterov 54734ec1234SOleg Nesterov /* No problem if kmem_cache_zalloc() fails */ 54834ec1234SOleg Nesterov stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); 54934ec1234SOleg Nesterov 55034ec1234SOleg Nesterov spin_lock_irq(&tsk->sighand->siglock); 55134ec1234SOleg Nesterov if (!sig->stats) { 55234ec1234SOleg Nesterov sig->stats = stats; 55334ec1234SOleg Nesterov stats = NULL; 55434ec1234SOleg Nesterov } 55534ec1234SOleg Nesterov spin_unlock_irq(&tsk->sighand->siglock); 55634ec1234SOleg Nesterov 55734ec1234SOleg Nesterov if (stats) 55834ec1234SOleg Nesterov kmem_cache_free(taskstats_cache, stats); 55934ec1234SOleg Nesterov ret: 56034ec1234SOleg Nesterov return sig->stats; 56134ec1234SOleg Nesterov } 56234ec1234SOleg Nesterov 563c757249aSShailabh Nagar /* Send pid data out on exit */ 564115085eaSOleg Nesterov void taskstats_exit(struct task_struct *tsk, int group_dead) 565c757249aSShailabh Nagar { 566c757249aSShailabh Nagar int rc; 567115085eaSOleg Nesterov struct listener_list *listeners; 56851de4d90SOleg Nesterov struct taskstats *stats; 569c757249aSShailabh Nagar struct sk_buff *rep_skb; 570c757249aSShailabh Nagar size_t size; 571c757249aSShailabh Nagar int is_thread_group; 572c757249aSShailabh Nagar 5734a279ff1SOleg Nesterov if (!family_registered) 574c757249aSShailabh Nagar return; 575c757249aSShailabh Nagar 576c757249aSShailabh Nagar /* 577c757249aSShailabh Nagar * Size includes space for nested attributes 578c757249aSShailabh Nagar */ 579c757249aSShailabh Nagar size = nla_total_size(sizeof(u32)) + 580c757249aSShailabh Nagar nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 581c757249aSShailabh Nagar 58234ec1234SOleg Nesterov is_thread_group = !!taskstats_tgid_alloc(tsk); 5834a279ff1SOleg Nesterov if (is_thread_group) { 5844a279ff1SOleg Nesterov /* PID + STATS + TGID + STATS */ 5854a279ff1SOleg Nesterov size = 2 * size; 5864a279ff1SOleg Nesterov /* fill the tsk->signal->stats structure */ 5874a279ff1SOleg Nesterov fill_tgid_exit(tsk); 5884a279ff1SOleg Nesterov } 5894a279ff1SOleg Nesterov 590115085eaSOleg Nesterov listeners = &__raw_get_cpu_var(listener_array); 591115085eaSOleg Nesterov if (list_empty(&listeners->list)) 592115085eaSOleg Nesterov return; 593115085eaSOleg Nesterov 59437167485SOleg Nesterov rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size); 595c757249aSShailabh Nagar if (rc < 0) 59651de4d90SOleg Nesterov return; 597c757249aSShailabh Nagar 59851de4d90SOleg Nesterov stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid); 59951de4d90SOleg Nesterov if (!stats) 60037167485SOleg Nesterov goto err; 60151de4d90SOleg Nesterov 602cb41d6d0SPavel Emelyanov rc = fill_pid(-1, tsk, stats); 603c757249aSShailabh Nagar if (rc < 0) 60437167485SOleg Nesterov goto err; 605c757249aSShailabh Nagar 606c757249aSShailabh Nagar /* 607ad4ecbcbSShailabh Nagar * Doesn't matter if tsk is the leader or the last group member leaving 608c757249aSShailabh Nagar */ 60968062b86SOleg Nesterov if (!is_thread_group || !group_dead) 610ad4ecbcbSShailabh Nagar goto send; 611c757249aSShailabh Nagar 61251de4d90SOleg Nesterov stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid); 61351de4d90SOleg Nesterov if (!stats) 61437167485SOleg Nesterov goto err; 61551de4d90SOleg Nesterov 61651de4d90SOleg Nesterov memcpy(stats, tsk->signal->stats, sizeof(*stats)); 617c757249aSShailabh Nagar 618ad4ecbcbSShailabh Nagar send: 619115085eaSOleg Nesterov send_cpu_listeners(rep_skb, listeners); 620ad4ecbcbSShailabh Nagar return; 62137167485SOleg Nesterov err: 622c757249aSShailabh Nagar nlmsg_free(rep_skb); 623c757249aSShailabh Nagar } 624c757249aSShailabh Nagar 625c757249aSShailabh Nagar static struct genl_ops taskstats_ops = { 626c757249aSShailabh Nagar .cmd = TASKSTATS_CMD_GET, 627f9fd8914SShailabh Nagar .doit = taskstats_user_cmd, 628c757249aSShailabh Nagar .policy = taskstats_cmd_get_policy, 629c757249aSShailabh Nagar }; 630c757249aSShailabh Nagar 631846c7bb0SBalbir Singh static struct genl_ops cgroupstats_ops = { 632846c7bb0SBalbir Singh .cmd = CGROUPSTATS_CMD_GET, 633846c7bb0SBalbir Singh .doit = cgroupstats_user_cmd, 634846c7bb0SBalbir Singh .policy = cgroupstats_cmd_get_policy, 635846c7bb0SBalbir Singh }; 636846c7bb0SBalbir Singh 637c757249aSShailabh Nagar /* Needed early in initialization */ 638c757249aSShailabh Nagar void __init taskstats_init_early(void) 639c757249aSShailabh Nagar { 640f9fd8914SShailabh Nagar unsigned int i; 641f9fd8914SShailabh Nagar 6420a31bd5fSChristoph Lameter taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC); 643f9fd8914SShailabh Nagar for_each_possible_cpu(i) { 644f9fd8914SShailabh Nagar INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); 645f9fd8914SShailabh Nagar init_rwsem(&(per_cpu(listener_array, i).sem)); 646f9fd8914SShailabh Nagar } 647c757249aSShailabh Nagar } 648c757249aSShailabh Nagar 649c757249aSShailabh Nagar static int __init taskstats_init(void) 650c757249aSShailabh Nagar { 651c757249aSShailabh Nagar int rc; 652c757249aSShailabh Nagar 653c757249aSShailabh Nagar rc = genl_register_family(&family); 654c757249aSShailabh Nagar if (rc) 655c757249aSShailabh Nagar return rc; 656c757249aSShailabh Nagar 657c757249aSShailabh Nagar rc = genl_register_ops(&family, &taskstats_ops); 658c757249aSShailabh Nagar if (rc < 0) 659c757249aSShailabh Nagar goto err; 660c757249aSShailabh Nagar 661846c7bb0SBalbir Singh rc = genl_register_ops(&family, &cgroupstats_ops); 662846c7bb0SBalbir Singh if (rc < 0) 663846c7bb0SBalbir Singh goto err_cgroup_ops; 664846c7bb0SBalbir Singh 665c757249aSShailabh Nagar family_registered = 1; 666846c7bb0SBalbir Singh printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); 667c757249aSShailabh Nagar return 0; 668846c7bb0SBalbir Singh err_cgroup_ops: 669846c7bb0SBalbir Singh genl_unregister_ops(&family, &taskstats_ops); 670c757249aSShailabh Nagar err: 671c757249aSShailabh Nagar genl_unregister_family(&family); 672c757249aSShailabh Nagar return rc; 673c757249aSShailabh Nagar } 674c757249aSShailabh Nagar 675c757249aSShailabh Nagar /* 676c757249aSShailabh Nagar * late initcall ensures initialization of statistics collection 677c757249aSShailabh Nagar * mechanisms precedes initialization of the taskstats interface 678c757249aSShailabh Nagar */ 679c757249aSShailabh Nagar late_initcall(taskstats_init); 680