Loading...
1/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
21#include <linux/tsacct_kern.h>
22#include <linux/delayacct.h>
23#include <linux/cpumask.h>
24#include <linux/percpu.h>
25#include <linux/slab.h>
26#include <linux/cgroupstats.h>
27#include <linux/cgroup.h>
28#include <linux/fs.h>
29#include <linux/file.h>
30#include <linux/pid_namespace.h>
31#include <net/genetlink.h>
32#include <linux/atomic.h>
33
34/*
35 * Maximum length of a cpumask that can be specified in
36 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
37 */
38#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
39
40static DEFINE_PER_CPU(__u32, taskstats_seqnum);
41static int family_registered;
42struct kmem_cache *taskstats_cache;
43
44static struct genl_family family = {
45 .id = GENL_ID_GENERATE,
46 .name = TASKSTATS_GENL_NAME,
47 .version = TASKSTATS_GENL_VERSION,
48 .maxattr = TASKSTATS_CMD_ATTR_MAX,
49};
50
51static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
52 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56
57static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
58 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
59};
60
61struct listener {
62 struct list_head list;
63 pid_t pid;
64 char valid;
65};
66
67struct listener_list {
68 struct rw_semaphore sem;
69 struct list_head list;
70};
71static DEFINE_PER_CPU(struct listener_list, listener_array);
72
73enum actions {
74 REGISTER,
75 DEREGISTER,
76 CPU_DONT_CARE
77};
78
79static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
80 size_t size)
81{
82 struct sk_buff *skb;
83 void *reply;
84
85 /*
86 * If new attributes are added, please revisit this allocation
87 */
88 skb = genlmsg_new(size, GFP_KERNEL);
89 if (!skb)
90 return -ENOMEM;
91
92 if (!info) {
93 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
94
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
96 } else
97 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
98 if (reply == NULL) {
99 nlmsg_free(skb);
100 return -EINVAL;
101 }
102
103 *skbp = skb;
104 return 0;
105}
106
107/*
108 * Send taskstats data in @skb to listener with nl_pid @pid
109 */
110static int send_reply(struct sk_buff *skb, struct genl_info *info)
111{
112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 void *reply = genlmsg_data(genlhdr);
114
115 genlmsg_end(skb, reply);
116
117 return genlmsg_reply(skb, info);
118}
119
120/*
121 * Send taskstats data in @skb to listeners registered for @cpu's exit data
122 */
123static void send_cpu_listeners(struct sk_buff *skb,
124 struct listener_list *listeners)
125{
126 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
127 struct listener *s, *tmp;
128 struct sk_buff *skb_next, *skb_cur = skb;
129 void *reply = genlmsg_data(genlhdr);
130 int rc, delcount = 0;
131
132 genlmsg_end(skb, reply);
133
134 rc = 0;
135 down_read(&listeners->sem);
136 list_for_each_entry(s, &listeners->list, list) {
137 skb_next = NULL;
138 if (!list_is_last(&s->list, &listeners->list)) {
139 skb_next = skb_clone(skb_cur, GFP_KERNEL);
140 if (!skb_next)
141 break;
142 }
143 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
144 if (rc == -ECONNREFUSED) {
145 s->valid = 0;
146 delcount++;
147 }
148 skb_cur = skb_next;
149 }
150 up_read(&listeners->sem);
151
152 if (skb_cur)
153 nlmsg_free(skb_cur);
154
155 if (!delcount)
156 return;
157
158 /* Delete invalidated entries */
159 down_write(&listeners->sem);
160 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
161 if (!s->valid) {
162 list_del(&s->list);
163 kfree(s);
164 }
165 }
166 up_write(&listeners->sem);
167}
168
169static void fill_stats(struct user_namespace *user_ns,
170 struct pid_namespace *pid_ns,
171 struct task_struct *tsk, struct taskstats *stats)
172{
173 memset(stats, 0, sizeof(*stats));
174 /*
175 * Each accounting subsystem adds calls to its functions to
176 * fill in relevant parts of struct taskstsats as follows
177 *
178 * per-task-foo(stats, tsk);
179 */
180
181 delayacct_add_tsk(stats, tsk);
182
183 /* fill in basic acct fields */
184 stats->version = TASKSTATS_VERSION;
185 stats->nvcsw = tsk->nvcsw;
186 stats->nivcsw = tsk->nivcsw;
187 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
188
189 /* fill in extended acct fields */
190 xacct_add_tsk(stats, tsk);
191}
192
193static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
194{
195 struct task_struct *tsk;
196
197 rcu_read_lock();
198 tsk = find_task_by_vpid(pid);
199 if (tsk)
200 get_task_struct(tsk);
201 rcu_read_unlock();
202 if (!tsk)
203 return -ESRCH;
204 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
205 put_task_struct(tsk);
206 return 0;
207}
208
209static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
210{
211 struct task_struct *tsk, *first;
212 unsigned long flags;
213 int rc = -ESRCH;
214
215 /*
216 * Add additional stats from live tasks except zombie thread group
217 * leaders who are already counted with the dead tasks
218 */
219 rcu_read_lock();
220 first = find_task_by_vpid(tgid);
221
222 if (!first || !lock_task_sighand(first, &flags))
223 goto out;
224
225 if (first->signal->stats)
226 memcpy(stats, first->signal->stats, sizeof(*stats));
227 else
228 memset(stats, 0, sizeof(*stats));
229
230 tsk = first;
231 do {
232 if (tsk->exit_state)
233 continue;
234 /*
235 * Accounting subsystem can call its functions here to
236 * fill in relevant parts of struct taskstsats as follows
237 *
238 * per-task-foo(stats, tsk);
239 */
240 delayacct_add_tsk(stats, tsk);
241
242 stats->nvcsw += tsk->nvcsw;
243 stats->nivcsw += tsk->nivcsw;
244 } while_each_thread(first, tsk);
245
246 unlock_task_sighand(first, &flags);
247 rc = 0;
248out:
249 rcu_read_unlock();
250
251 stats->version = TASKSTATS_VERSION;
252 /*
253 * Accounting subsystems can also add calls here to modify
254 * fields of taskstats.
255 */
256 return rc;
257}
258
259static void fill_tgid_exit(struct task_struct *tsk)
260{
261 unsigned long flags;
262
263 spin_lock_irqsave(&tsk->sighand->siglock, flags);
264 if (!tsk->signal->stats)
265 goto ret;
266
267 /*
268 * Each accounting subsystem calls its functions here to
269 * accumalate its per-task stats for tsk, into the per-tgid structure
270 *
271 * per-task-foo(tsk->signal->stats, tsk);
272 */
273 delayacct_add_tsk(tsk->signal->stats, tsk);
274ret:
275 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
276 return;
277}
278
279static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
280{
281 struct listener_list *listeners;
282 struct listener *s, *tmp, *s2;
283 unsigned int cpu;
284 int ret = 0;
285
286 if (!cpumask_subset(mask, cpu_possible_mask))
287 return -EINVAL;
288
289 if (current_user_ns() != &init_user_ns)
290 return -EINVAL;
291
292 if (task_active_pid_ns(current) != &init_pid_ns)
293 return -EINVAL;
294
295 if (isadd == REGISTER) {
296 for_each_cpu(cpu, mask) {
297 s = kmalloc_node(sizeof(struct listener),
298 GFP_KERNEL, cpu_to_node(cpu));
299 if (!s) {
300 ret = -ENOMEM;
301 goto cleanup;
302 }
303 s->pid = pid;
304 s->valid = 1;
305
306 listeners = &per_cpu(listener_array, cpu);
307 down_write(&listeners->sem);
308 list_for_each_entry(s2, &listeners->list, list) {
309 if (s2->pid == pid && s2->valid)
310 goto exists;
311 }
312 list_add(&s->list, &listeners->list);
313 s = NULL;
314exists:
315 up_write(&listeners->sem);
316 kfree(s); /* nop if NULL */
317 }
318 return 0;
319 }
320
321 /* Deregister or cleanup */
322cleanup:
323 for_each_cpu(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
327 if (s->pid == pid) {
328 list_del(&s->list);
329 kfree(s);
330 break;
331 }
332 }
333 up_write(&listeners->sem);
334 }
335 return ret;
336}
337
338static int parse(struct nlattr *na, struct cpumask *mask)
339{
340 char *data;
341 int len;
342 int ret;
343
344 if (na == NULL)
345 return 1;
346 len = nla_len(na);
347 if (len > TASKSTATS_CPUMASK_MAXLEN)
348 return -E2BIG;
349 if (len < 1)
350 return -EINVAL;
351 data = kmalloc(len, GFP_KERNEL);
352 if (!data)
353 return -ENOMEM;
354 nla_strlcpy(data, na, len);
355 ret = cpulist_parse(data, mask);
356 kfree(data);
357 return ret;
358}
359
360#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
361#define TASKSTATS_NEEDS_PADDING 1
362#endif
363
364static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
365{
366 struct nlattr *na, *ret;
367 int aggr;
368
369 aggr = (type == TASKSTATS_TYPE_PID)
370 ? TASKSTATS_TYPE_AGGR_PID
371 : TASKSTATS_TYPE_AGGR_TGID;
372
373 /*
374 * The taskstats structure is internally aligned on 8 byte
375 * boundaries but the layout of the aggregrate reply, with
376 * two NLA headers and the pid (each 4 bytes), actually
377 * force the entire structure to be unaligned. This causes
378 * the kernel to issue unaligned access warnings on some
379 * architectures like ia64. Unfortunately, some software out there
380 * doesn't properly unroll the NLA packet and assumes that the start
381 * of the taskstats structure will always be 20 bytes from the start
382 * of the netlink payload. Aligning the start of the taskstats
383 * structure breaks this software, which we don't want. So, for now
384 * the alignment only happens on architectures that require it
385 * and those users will have to update to fixed versions of those
386 * packages. Space is reserved in the packet only when needed.
387 * This ifdef should be removed in several years e.g. 2012 once
388 * we can be confident that fixed versions are installed on most
389 * systems. We add the padding before the aggregate since the
390 * aggregate is already a defined type.
391 */
392#ifdef TASKSTATS_NEEDS_PADDING
393 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
394 goto err;
395#endif
396 na = nla_nest_start(skb, aggr);
397 if (!na)
398 goto err;
399
400 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
401 nla_nest_cancel(skb, na);
402 goto err;
403 }
404 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
405 if (!ret) {
406 nla_nest_cancel(skb, na);
407 goto err;
408 }
409 nla_nest_end(skb, na);
410
411 return nla_data(ret);
412err:
413 return NULL;
414}
415
416static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
417{
418 int rc = 0;
419 struct sk_buff *rep_skb;
420 struct cgroupstats *stats;
421 struct nlattr *na;
422 size_t size;
423 u32 fd;
424 struct fd f;
425
426 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
427 if (!na)
428 return -EINVAL;
429
430 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
431 f = fdget(fd);
432 if (!f.file)
433 return 0;
434
435 size = nla_total_size(sizeof(struct cgroupstats));
436
437 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
438 size);
439 if (rc < 0)
440 goto err;
441
442 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
443 sizeof(struct cgroupstats));
444 if (na == NULL) {
445 nlmsg_free(rep_skb);
446 rc = -EMSGSIZE;
447 goto err;
448 }
449
450 stats = nla_data(na);
451 memset(stats, 0, sizeof(*stats));
452
453 rc = cgroupstats_build(stats, f.file->f_path.dentry);
454 if (rc < 0) {
455 nlmsg_free(rep_skb);
456 goto err;
457 }
458
459 rc = send_reply(rep_skb, info);
460
461err:
462 fdput(f);
463 return rc;
464}
465
466static int cmd_attr_register_cpumask(struct genl_info *info)
467{
468 cpumask_var_t mask;
469 int rc;
470
471 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
472 return -ENOMEM;
473 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
474 if (rc < 0)
475 goto out;
476 rc = add_del_listener(info->snd_portid, mask, REGISTER);
477out:
478 free_cpumask_var(mask);
479 return rc;
480}
481
482static int cmd_attr_deregister_cpumask(struct genl_info *info)
483{
484 cpumask_var_t mask;
485 int rc;
486
487 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
488 return -ENOMEM;
489 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
490 if (rc < 0)
491 goto out;
492 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
493out:
494 free_cpumask_var(mask);
495 return rc;
496}
497
498static size_t taskstats_packet_size(void)
499{
500 size_t size;
501
502 size = nla_total_size(sizeof(u32)) +
503 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
504#ifdef TASKSTATS_NEEDS_PADDING
505 size += nla_total_size(0); /* Padding for alignment */
506#endif
507 return size;
508}
509
510static int cmd_attr_pid(struct genl_info *info)
511{
512 struct taskstats *stats;
513 struct sk_buff *rep_skb;
514 size_t size;
515 u32 pid;
516 int rc;
517
518 size = taskstats_packet_size();
519
520 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
521 if (rc < 0)
522 return rc;
523
524 rc = -EINVAL;
525 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
526 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
527 if (!stats)
528 goto err;
529
530 rc = fill_stats_for_pid(pid, stats);
531 if (rc < 0)
532 goto err;
533 return send_reply(rep_skb, info);
534err:
535 nlmsg_free(rep_skb);
536 return rc;
537}
538
539static int cmd_attr_tgid(struct genl_info *info)
540{
541 struct taskstats *stats;
542 struct sk_buff *rep_skb;
543 size_t size;
544 u32 tgid;
545 int rc;
546
547 size = taskstats_packet_size();
548
549 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
550 if (rc < 0)
551 return rc;
552
553 rc = -EINVAL;
554 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
555 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
556 if (!stats)
557 goto err;
558
559 rc = fill_stats_for_tgid(tgid, stats);
560 if (rc < 0)
561 goto err;
562 return send_reply(rep_skb, info);
563err:
564 nlmsg_free(rep_skb);
565 return rc;
566}
567
568static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
569{
570 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
571 return cmd_attr_register_cpumask(info);
572 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
573 return cmd_attr_deregister_cpumask(info);
574 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
575 return cmd_attr_pid(info);
576 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
577 return cmd_attr_tgid(info);
578 else
579 return -EINVAL;
580}
581
582static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
583{
584 struct signal_struct *sig = tsk->signal;
585 struct taskstats *stats;
586
587 if (sig->stats || thread_group_empty(tsk))
588 goto ret;
589
590 /* No problem if kmem_cache_zalloc() fails */
591 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
592
593 spin_lock_irq(&tsk->sighand->siglock);
594 if (!sig->stats) {
595 sig->stats = stats;
596 stats = NULL;
597 }
598 spin_unlock_irq(&tsk->sighand->siglock);
599
600 if (stats)
601 kmem_cache_free(taskstats_cache, stats);
602ret:
603 return sig->stats;
604}
605
606/* Send pid data out on exit */
607void taskstats_exit(struct task_struct *tsk, int group_dead)
608{
609 int rc;
610 struct listener_list *listeners;
611 struct taskstats *stats;
612 struct sk_buff *rep_skb;
613 size_t size;
614 int is_thread_group;
615
616 if (!family_registered)
617 return;
618
619 /*
620 * Size includes space for nested attributes
621 */
622 size = taskstats_packet_size();
623
624 is_thread_group = !!taskstats_tgid_alloc(tsk);
625 if (is_thread_group) {
626 /* PID + STATS + TGID + STATS */
627 size = 2 * size;
628 /* fill the tsk->signal->stats structure */
629 fill_tgid_exit(tsk);
630 }
631
632 listeners = raw_cpu_ptr(&listener_array);
633 if (list_empty(&listeners->list))
634 return;
635
636 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
637 if (rc < 0)
638 return;
639
640 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
641 task_pid_nr_ns(tsk, &init_pid_ns));
642 if (!stats)
643 goto err;
644
645 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
646
647 /*
648 * Doesn't matter if tsk is the leader or the last group member leaving
649 */
650 if (!is_thread_group || !group_dead)
651 goto send;
652
653 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
654 task_tgid_nr_ns(tsk, &init_pid_ns));
655 if (!stats)
656 goto err;
657
658 memcpy(stats, tsk->signal->stats, sizeof(*stats));
659
660send:
661 send_cpu_listeners(rep_skb, listeners);
662 return;
663err:
664 nlmsg_free(rep_skb);
665}
666
667static const struct genl_ops taskstats_ops[] = {
668 {
669 .cmd = TASKSTATS_CMD_GET,
670 .doit = taskstats_user_cmd,
671 .policy = taskstats_cmd_get_policy,
672 .flags = GENL_ADMIN_PERM,
673 },
674 {
675 .cmd = CGROUPSTATS_CMD_GET,
676 .doit = cgroupstats_user_cmd,
677 .policy = cgroupstats_cmd_get_policy,
678 },
679};
680
681/* Needed early in initialization */
682void __init taskstats_init_early(void)
683{
684 unsigned int i;
685
686 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
687 for_each_possible_cpu(i) {
688 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
689 init_rwsem(&(per_cpu(listener_array, i).sem));
690 }
691}
692
693static int __init taskstats_init(void)
694{
695 int rc;
696
697 rc = genl_register_family_with_ops(&family, taskstats_ops);
698 if (rc)
699 return rc;
700
701 family_registered = 1;
702 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
703 return 0;
704}
705
706/*
707 * late initcall ensures initialization of statistics collection
708 * mechanisms precedes initialization of the taskstats interface
709 */
710late_initcall(taskstats_init);
1/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
21#include <linux/tsacct_kern.h>
22#include <linux/delayacct.h>
23#include <linux/cpumask.h>
24#include <linux/percpu.h>
25#include <linux/slab.h>
26#include <linux/cgroupstats.h>
27#include <linux/cgroup.h>
28#include <linux/fs.h>
29#include <linux/file.h>
30#include <net/genetlink.h>
31#include <linux/atomic.h>
32
33/*
34 * Maximum length of a cpumask that can be specified in
35 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
36 */
37#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
38
39static DEFINE_PER_CPU(__u32, taskstats_seqnum);
40static int family_registered;
41struct kmem_cache *taskstats_cache;
42
43static struct genl_family family = {
44 .id = GENL_ID_GENERATE,
45 .name = TASKSTATS_GENL_NAME,
46 .version = TASKSTATS_GENL_VERSION,
47 .maxattr = TASKSTATS_CMD_ATTR_MAX,
48};
49
50static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
51 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
52 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
54 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
55
56static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
57 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
58};
59
60struct listener {
61 struct list_head list;
62 pid_t pid;
63 char valid;
64};
65
66struct listener_list {
67 struct rw_semaphore sem;
68 struct list_head list;
69};
70static DEFINE_PER_CPU(struct listener_list, listener_array);
71
72enum actions {
73 REGISTER,
74 DEREGISTER,
75 CPU_DONT_CARE
76};
77
78static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
79 size_t size)
80{
81 struct sk_buff *skb;
82 void *reply;
83
84 /*
85 * If new attributes are added, please revisit this allocation
86 */
87 skb = genlmsg_new(size, GFP_KERNEL);
88 if (!skb)
89 return -ENOMEM;
90
91 if (!info) {
92 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
93
94 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
95 } else
96 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
97 if (reply == NULL) {
98 nlmsg_free(skb);
99 return -EINVAL;
100 }
101
102 *skbp = skb;
103 return 0;
104}
105
106/*
107 * Send taskstats data in @skb to listener with nl_pid @pid
108 */
109static int send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
112 void *reply = genlmsg_data(genlhdr);
113 int rc;
114
115 rc = genlmsg_end(skb, reply);
116 if (rc < 0) {
117 nlmsg_free(skb);
118 return rc;
119 }
120
121 return genlmsg_reply(skb, info);
122}
123
124/*
125 * Send taskstats data in @skb to listeners registered for @cpu's exit data
126 */
127static void send_cpu_listeners(struct sk_buff *skb,
128 struct listener_list *listeners)
129{
130 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
131 struct listener *s, *tmp;
132 struct sk_buff *skb_next, *skb_cur = skb;
133 void *reply = genlmsg_data(genlhdr);
134 int rc, delcount = 0;
135
136 rc = genlmsg_end(skb, reply);
137 if (rc < 0) {
138 nlmsg_free(skb);
139 return;
140 }
141
142 rc = 0;
143 down_read(&listeners->sem);
144 list_for_each_entry(s, &listeners->list, list) {
145 skb_next = NULL;
146 if (!list_is_last(&s->list, &listeners->list)) {
147 skb_next = skb_clone(skb_cur, GFP_KERNEL);
148 if (!skb_next)
149 break;
150 }
151 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
152 if (rc == -ECONNREFUSED) {
153 s->valid = 0;
154 delcount++;
155 }
156 skb_cur = skb_next;
157 }
158 up_read(&listeners->sem);
159
160 if (skb_cur)
161 nlmsg_free(skb_cur);
162
163 if (!delcount)
164 return;
165
166 /* Delete invalidated entries */
167 down_write(&listeners->sem);
168 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
169 if (!s->valid) {
170 list_del(&s->list);
171 kfree(s);
172 }
173 }
174 up_write(&listeners->sem);
175}
176
177static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
178{
179 memset(stats, 0, sizeof(*stats));
180 /*
181 * Each accounting subsystem adds calls to its functions to
182 * fill in relevant parts of struct taskstsats as follows
183 *
184 * per-task-foo(stats, tsk);
185 */
186
187 delayacct_add_tsk(stats, tsk);
188
189 /* fill in basic acct fields */
190 stats->version = TASKSTATS_VERSION;
191 stats->nvcsw = tsk->nvcsw;
192 stats->nivcsw = tsk->nivcsw;
193 bacct_add_tsk(stats, tsk);
194
195 /* fill in extended acct fields */
196 xacct_add_tsk(stats, tsk);
197}
198
199static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
200{
201 struct task_struct *tsk;
202
203 rcu_read_lock();
204 tsk = find_task_by_vpid(pid);
205 if (tsk)
206 get_task_struct(tsk);
207 rcu_read_unlock();
208 if (!tsk)
209 return -ESRCH;
210 fill_stats(tsk, stats);
211 put_task_struct(tsk);
212 return 0;
213}
214
215static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
216{
217 struct task_struct *tsk, *first;
218 unsigned long flags;
219 int rc = -ESRCH;
220
221 /*
222 * Add additional stats from live tasks except zombie thread group
223 * leaders who are already counted with the dead tasks
224 */
225 rcu_read_lock();
226 first = find_task_by_vpid(tgid);
227
228 if (!first || !lock_task_sighand(first, &flags))
229 goto out;
230
231 if (first->signal->stats)
232 memcpy(stats, first->signal->stats, sizeof(*stats));
233 else
234 memset(stats, 0, sizeof(*stats));
235
236 tsk = first;
237 do {
238 if (tsk->exit_state)
239 continue;
240 /*
241 * Accounting subsystem can call its functions here to
242 * fill in relevant parts of struct taskstsats as follows
243 *
244 * per-task-foo(stats, tsk);
245 */
246 delayacct_add_tsk(stats, tsk);
247
248 stats->nvcsw += tsk->nvcsw;
249 stats->nivcsw += tsk->nivcsw;
250 } while_each_thread(first, tsk);
251
252 unlock_task_sighand(first, &flags);
253 rc = 0;
254out:
255 rcu_read_unlock();
256
257 stats->version = TASKSTATS_VERSION;
258 /*
259 * Accounting subsystems can also add calls here to modify
260 * fields of taskstats.
261 */
262 return rc;
263}
264
265static void fill_tgid_exit(struct task_struct *tsk)
266{
267 unsigned long flags;
268
269 spin_lock_irqsave(&tsk->sighand->siglock, flags);
270 if (!tsk->signal->stats)
271 goto ret;
272
273 /*
274 * Each accounting subsystem calls its functions here to
275 * accumalate its per-task stats for tsk, into the per-tgid structure
276 *
277 * per-task-foo(tsk->signal->stats, tsk);
278 */
279 delayacct_add_tsk(tsk->signal->stats, tsk);
280ret:
281 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
282 return;
283}
284
285static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
286{
287 struct listener_list *listeners;
288 struct listener *s, *tmp, *s2;
289 unsigned int cpu;
290
291 if (!cpumask_subset(mask, cpu_possible_mask))
292 return -EINVAL;
293
294 if (isadd == REGISTER) {
295 for_each_cpu(cpu, mask) {
296 s = kmalloc_node(sizeof(struct listener),
297 GFP_KERNEL, cpu_to_node(cpu));
298 if (!s)
299 goto cleanup;
300
301 s->pid = pid;
302 s->valid = 1;
303
304 listeners = &per_cpu(listener_array, cpu);
305 down_write(&listeners->sem);
306 list_for_each_entry(s2, &listeners->list, list) {
307 if (s2->pid == pid && s2->valid)
308 goto exists;
309 }
310 list_add(&s->list, &listeners->list);
311 s = NULL;
312exists:
313 up_write(&listeners->sem);
314 kfree(s); /* nop if NULL */
315 }
316 return 0;
317 }
318
319 /* Deregister or cleanup */
320cleanup:
321 for_each_cpu(cpu, mask) {
322 listeners = &per_cpu(listener_array, cpu);
323 down_write(&listeners->sem);
324 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
325 if (s->pid == pid) {
326 list_del(&s->list);
327 kfree(s);
328 break;
329 }
330 }
331 up_write(&listeners->sem);
332 }
333 return 0;
334}
335
336static int parse(struct nlattr *na, struct cpumask *mask)
337{
338 char *data;
339 int len;
340 int ret;
341
342 if (na == NULL)
343 return 1;
344 len = nla_len(na);
345 if (len > TASKSTATS_CPUMASK_MAXLEN)
346 return -E2BIG;
347 if (len < 1)
348 return -EINVAL;
349 data = kmalloc(len, GFP_KERNEL);
350 if (!data)
351 return -ENOMEM;
352 nla_strlcpy(data, na, len);
353 ret = cpulist_parse(data, mask);
354 kfree(data);
355 return ret;
356}
357
358#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
359#define TASKSTATS_NEEDS_PADDING 1
360#endif
361
362static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
363{
364 struct nlattr *na, *ret;
365 int aggr;
366
367 aggr = (type == TASKSTATS_TYPE_PID)
368 ? TASKSTATS_TYPE_AGGR_PID
369 : TASKSTATS_TYPE_AGGR_TGID;
370
371 /*
372 * The taskstats structure is internally aligned on 8 byte
373 * boundaries but the layout of the aggregrate reply, with
374 * two NLA headers and the pid (each 4 bytes), actually
375 * force the entire structure to be unaligned. This causes
376 * the kernel to issue unaligned access warnings on some
377 * architectures like ia64. Unfortunately, some software out there
378 * doesn't properly unroll the NLA packet and assumes that the start
379 * of the taskstats structure will always be 20 bytes from the start
380 * of the netlink payload. Aligning the start of the taskstats
381 * structure breaks this software, which we don't want. So, for now
382 * the alignment only happens on architectures that require it
383 * and those users will have to update to fixed versions of those
384 * packages. Space is reserved in the packet only when needed.
385 * This ifdef should be removed in several years e.g. 2012 once
386 * we can be confident that fixed versions are installed on most
387 * systems. We add the padding before the aggregate since the
388 * aggregate is already a defined type.
389 */
390#ifdef TASKSTATS_NEEDS_PADDING
391 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
392 goto err;
393#endif
394 na = nla_nest_start(skb, aggr);
395 if (!na)
396 goto err;
397
398 if (nla_put(skb, type, sizeof(pid), &pid) < 0)
399 goto err;
400 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
401 if (!ret)
402 goto err;
403 nla_nest_end(skb, na);
404
405 return nla_data(ret);
406err:
407 return NULL;
408}
409
410static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
411{
412 int rc = 0;
413 struct sk_buff *rep_skb;
414 struct cgroupstats *stats;
415 struct nlattr *na;
416 size_t size;
417 u32 fd;
418 struct file *file;
419 int fput_needed;
420
421 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
422 if (!na)
423 return -EINVAL;
424
425 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
426 file = fget_light(fd, &fput_needed);
427 if (!file)
428 return 0;
429
430 size = nla_total_size(sizeof(struct cgroupstats));
431
432 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
433 size);
434 if (rc < 0)
435 goto err;
436
437 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
438 sizeof(struct cgroupstats));
439 stats = nla_data(na);
440 memset(stats, 0, sizeof(*stats));
441
442 rc = cgroupstats_build(stats, file->f_dentry);
443 if (rc < 0) {
444 nlmsg_free(rep_skb);
445 goto err;
446 }
447
448 rc = send_reply(rep_skb, info);
449
450err:
451 fput_light(file, fput_needed);
452 return rc;
453}
454
455static int cmd_attr_register_cpumask(struct genl_info *info)
456{
457 cpumask_var_t mask;
458 int rc;
459
460 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
461 return -ENOMEM;
462 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
463 if (rc < 0)
464 goto out;
465 rc = add_del_listener(info->snd_pid, mask, REGISTER);
466out:
467 free_cpumask_var(mask);
468 return rc;
469}
470
471static int cmd_attr_deregister_cpumask(struct genl_info *info)
472{
473 cpumask_var_t mask;
474 int rc;
475
476 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
477 return -ENOMEM;
478 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
479 if (rc < 0)
480 goto out;
481 rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
482out:
483 free_cpumask_var(mask);
484 return rc;
485}
486
487static size_t taskstats_packet_size(void)
488{
489 size_t size;
490
491 size = nla_total_size(sizeof(u32)) +
492 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
493#ifdef TASKSTATS_NEEDS_PADDING
494 size += nla_total_size(0); /* Padding for alignment */
495#endif
496 return size;
497}
498
499static int cmd_attr_pid(struct genl_info *info)
500{
501 struct taskstats *stats;
502 struct sk_buff *rep_skb;
503 size_t size;
504 u32 pid;
505 int rc;
506
507 size = taskstats_packet_size();
508
509 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
510 if (rc < 0)
511 return rc;
512
513 rc = -EINVAL;
514 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
515 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
516 if (!stats)
517 goto err;
518
519 rc = fill_stats_for_pid(pid, stats);
520 if (rc < 0)
521 goto err;
522 return send_reply(rep_skb, info);
523err:
524 nlmsg_free(rep_skb);
525 return rc;
526}
527
528static int cmd_attr_tgid(struct genl_info *info)
529{
530 struct taskstats *stats;
531 struct sk_buff *rep_skb;
532 size_t size;
533 u32 tgid;
534 int rc;
535
536 size = taskstats_packet_size();
537
538 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
539 if (rc < 0)
540 return rc;
541
542 rc = -EINVAL;
543 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
544 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
545 if (!stats)
546 goto err;
547
548 rc = fill_stats_for_tgid(tgid, stats);
549 if (rc < 0)
550 goto err;
551 return send_reply(rep_skb, info);
552err:
553 nlmsg_free(rep_skb);
554 return rc;
555}
556
557static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
558{
559 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
560 return cmd_attr_register_cpumask(info);
561 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
562 return cmd_attr_deregister_cpumask(info);
563 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
564 return cmd_attr_pid(info);
565 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
566 return cmd_attr_tgid(info);
567 else
568 return -EINVAL;
569}
570
571static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
572{
573 struct signal_struct *sig = tsk->signal;
574 struct taskstats *stats;
575
576 if (sig->stats || thread_group_empty(tsk))
577 goto ret;
578
579 /* No problem if kmem_cache_zalloc() fails */
580 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
581
582 spin_lock_irq(&tsk->sighand->siglock);
583 if (!sig->stats) {
584 sig->stats = stats;
585 stats = NULL;
586 }
587 spin_unlock_irq(&tsk->sighand->siglock);
588
589 if (stats)
590 kmem_cache_free(taskstats_cache, stats);
591ret:
592 return sig->stats;
593}
594
595/* Send pid data out on exit */
596void taskstats_exit(struct task_struct *tsk, int group_dead)
597{
598 int rc;
599 struct listener_list *listeners;
600 struct taskstats *stats;
601 struct sk_buff *rep_skb;
602 size_t size;
603 int is_thread_group;
604
605 if (!family_registered)
606 return;
607
608 /*
609 * Size includes space for nested attributes
610 */
611 size = taskstats_packet_size();
612
613 is_thread_group = !!taskstats_tgid_alloc(tsk);
614 if (is_thread_group) {
615 /* PID + STATS + TGID + STATS */
616 size = 2 * size;
617 /* fill the tsk->signal->stats structure */
618 fill_tgid_exit(tsk);
619 }
620
621 listeners = __this_cpu_ptr(&listener_array);
622 if (list_empty(&listeners->list))
623 return;
624
625 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
626 if (rc < 0)
627 return;
628
629 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
630 if (!stats)
631 goto err;
632
633 fill_stats(tsk, stats);
634
635 /*
636 * Doesn't matter if tsk is the leader or the last group member leaving
637 */
638 if (!is_thread_group || !group_dead)
639 goto send;
640
641 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
642 if (!stats)
643 goto err;
644
645 memcpy(stats, tsk->signal->stats, sizeof(*stats));
646
647send:
648 send_cpu_listeners(rep_skb, listeners);
649 return;
650err:
651 nlmsg_free(rep_skb);
652}
653
654static struct genl_ops taskstats_ops = {
655 .cmd = TASKSTATS_CMD_GET,
656 .doit = taskstats_user_cmd,
657 .policy = taskstats_cmd_get_policy,
658 .flags = GENL_ADMIN_PERM,
659};
660
661static struct genl_ops cgroupstats_ops = {
662 .cmd = CGROUPSTATS_CMD_GET,
663 .doit = cgroupstats_user_cmd,
664 .policy = cgroupstats_cmd_get_policy,
665};
666
667/* Needed early in initialization */
668void __init taskstats_init_early(void)
669{
670 unsigned int i;
671
672 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
673 for_each_possible_cpu(i) {
674 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
675 init_rwsem(&(per_cpu(listener_array, i).sem));
676 }
677}
678
679static int __init taskstats_init(void)
680{
681 int rc;
682
683 rc = genl_register_family(&family);
684 if (rc)
685 return rc;
686
687 rc = genl_register_ops(&family, &taskstats_ops);
688 if (rc < 0)
689 goto err;
690
691 rc = genl_register_ops(&family, &cgroupstats_ops);
692 if (rc < 0)
693 goto err_cgroup_ops;
694
695 family_registered = 1;
696 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
697 return 0;
698err_cgroup_ops:
699 genl_unregister_ops(&family, &taskstats_ops);
700err:
701 genl_unregister_family(&family);
702 return rc;
703}
704
705/*
706 * late initcall ensures initialization of statistics collection
707 * mechanisms precedes initialization of the taskstats interface
708 */
709late_initcall(taskstats_init);