Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DAMON Debugfs Interface
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8#define pr_fmt(fmt) "damon-dbgfs: " fmt
9
10#include <linux/damon.h>
11#include <linux/debugfs.h>
12#include <linux/file.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/page_idle.h>
16#include <linux/slab.h>
17
18static struct damon_ctx **dbgfs_ctxs;
19static int dbgfs_nr_ctxs;
20static struct dentry **dbgfs_dirs;
21static DEFINE_MUTEX(damon_dbgfs_lock);
22
23/*
24 * Returns non-empty string on success, negative error code otherwise.
25 */
26static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
27{
28 char *kbuf;
29 ssize_t ret;
30
31 /* We do not accept continuous write */
32 if (*ppos)
33 return ERR_PTR(-EINVAL);
34
35 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
36 if (!kbuf)
37 return ERR_PTR(-ENOMEM);
38
39 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
40 if (ret != count) {
41 kfree(kbuf);
42 return ERR_PTR(-EIO);
43 }
44 kbuf[ret] = '\0';
45
46 return kbuf;
47}
48
49static ssize_t dbgfs_attrs_read(struct file *file,
50 char __user *buf, size_t count, loff_t *ppos)
51{
52 struct damon_ctx *ctx = file->private_data;
53 char kbuf[128];
54 int ret;
55
56 mutex_lock(&ctx->kdamond_lock);
57 ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
58 ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
59 ctx->attrs.ops_update_interval,
60 ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
61 mutex_unlock(&ctx->kdamond_lock);
62
63 return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
64}
65
66static ssize_t dbgfs_attrs_write(struct file *file,
67 const char __user *buf, size_t count, loff_t *ppos)
68{
69 struct damon_ctx *ctx = file->private_data;
70 struct damon_attrs attrs;
71 char *kbuf;
72 ssize_t ret;
73
74 kbuf = user_input_str(buf, count, ppos);
75 if (IS_ERR(kbuf))
76 return PTR_ERR(kbuf);
77
78 if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
79 &attrs.sample_interval, &attrs.aggr_interval,
80 &attrs.ops_update_interval,
81 &attrs.min_nr_regions,
82 &attrs.max_nr_regions) != 5) {
83 ret = -EINVAL;
84 goto out;
85 }
86
87 mutex_lock(&ctx->kdamond_lock);
88 if (ctx->kdamond) {
89 ret = -EBUSY;
90 goto unlock_out;
91 }
92
93 ret = damon_set_attrs(ctx, &attrs);
94 if (!ret)
95 ret = count;
96unlock_out:
97 mutex_unlock(&ctx->kdamond_lock);
98out:
99 kfree(kbuf);
100 return ret;
101}
102
103/*
104 * Return corresponding dbgfs' scheme action value (int) for the given
105 * damos_action if the given damos_action value is valid and supported by
106 * dbgfs, negative error code otherwise.
107 */
108static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
109{
110 switch (action) {
111 case DAMOS_WILLNEED:
112 return 0;
113 case DAMOS_COLD:
114 return 1;
115 case DAMOS_PAGEOUT:
116 return 2;
117 case DAMOS_HUGEPAGE:
118 return 3;
119 case DAMOS_NOHUGEPAGE:
120 return 4;
121 case DAMOS_STAT:
122 return 5;
123 default:
124 return -EINVAL;
125 }
126}
127
128static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
129{
130 struct damos *s;
131 int written = 0;
132 int rc;
133
134 damon_for_each_scheme(s, c) {
135 rc = scnprintf(&buf[written], len - written,
136 "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
137 s->pattern.min_sz_region,
138 s->pattern.max_sz_region,
139 s->pattern.min_nr_accesses,
140 s->pattern.max_nr_accesses,
141 s->pattern.min_age_region,
142 s->pattern.max_age_region,
143 damos_action_to_dbgfs_scheme_action(s->action),
144 s->quota.ms, s->quota.sz,
145 s->quota.reset_interval,
146 s->quota.weight_sz,
147 s->quota.weight_nr_accesses,
148 s->quota.weight_age,
149 s->wmarks.metric, s->wmarks.interval,
150 s->wmarks.high, s->wmarks.mid, s->wmarks.low,
151 s->stat.nr_tried, s->stat.sz_tried,
152 s->stat.nr_applied, s->stat.sz_applied,
153 s->stat.qt_exceeds);
154 if (!rc)
155 return -ENOMEM;
156
157 written += rc;
158 }
159 return written;
160}
161
162static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
163 size_t count, loff_t *ppos)
164{
165 struct damon_ctx *ctx = file->private_data;
166 char *kbuf;
167 ssize_t len;
168
169 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
170 if (!kbuf)
171 return -ENOMEM;
172
173 mutex_lock(&ctx->kdamond_lock);
174 len = sprint_schemes(ctx, kbuf, count);
175 mutex_unlock(&ctx->kdamond_lock);
176 if (len < 0)
177 goto out;
178 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
179
180out:
181 kfree(kbuf);
182 return len;
183}
184
185static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
186{
187 ssize_t i;
188
189 for (i = 0; i < nr_schemes; i++)
190 kfree(schemes[i]);
191 kfree(schemes);
192}
193
194/*
195 * Return corresponding damos_action for the given dbgfs input for a scheme
196 * action if the input is valid, negative error code otherwise.
197 */
198static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
199{
200 switch (dbgfs_action) {
201 case 0:
202 return DAMOS_WILLNEED;
203 case 1:
204 return DAMOS_COLD;
205 case 2:
206 return DAMOS_PAGEOUT;
207 case 3:
208 return DAMOS_HUGEPAGE;
209 case 4:
210 return DAMOS_NOHUGEPAGE;
211 case 5:
212 return DAMOS_STAT;
213 default:
214 return -EINVAL;
215 }
216}
217
218/*
219 * Converts a string into an array of struct damos pointers
220 *
221 * Returns an array of struct damos pointers that converted if the conversion
222 * success, or NULL otherwise.
223 */
224static struct damos **str_to_schemes(const char *str, ssize_t len,
225 ssize_t *nr_schemes)
226{
227 struct damos *scheme, **schemes;
228 const int max_nr_schemes = 256;
229 int pos = 0, parsed, ret;
230 unsigned int action_input;
231 enum damos_action action;
232
233 schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
234 GFP_KERNEL);
235 if (!schemes)
236 return NULL;
237
238 *nr_schemes = 0;
239 while (pos < len && *nr_schemes < max_nr_schemes) {
240 struct damos_access_pattern pattern = {};
241 struct damos_quota quota = {};
242 struct damos_watermarks wmarks;
243
244 ret = sscanf(&str[pos],
245 "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
246 &pattern.min_sz_region, &pattern.max_sz_region,
247 &pattern.min_nr_accesses,
248 &pattern.max_nr_accesses,
249 &pattern.min_age_region,
250 &pattern.max_age_region,
251 &action_input, "a.ms,
252 "a.sz, "a.reset_interval,
253 "a.weight_sz, "a.weight_nr_accesses,
254 "a.weight_age, &wmarks.metric,
255 &wmarks.interval, &wmarks.high, &wmarks.mid,
256 &wmarks.low, &parsed);
257 if (ret != 18)
258 break;
259 action = dbgfs_scheme_action_to_damos_action(action_input);
260 if ((int)action < 0)
261 goto fail;
262
263 if (pattern.min_sz_region > pattern.max_sz_region ||
264 pattern.min_nr_accesses > pattern.max_nr_accesses ||
265 pattern.min_age_region > pattern.max_age_region)
266 goto fail;
267
268 if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
269 wmarks.mid < wmarks.low)
270 goto fail;
271
272 pos += parsed;
273 scheme = damon_new_scheme(&pattern, action, "a, &wmarks);
274 if (!scheme)
275 goto fail;
276
277 schemes[*nr_schemes] = scheme;
278 *nr_schemes += 1;
279 }
280 return schemes;
281fail:
282 free_schemes_arr(schemes, *nr_schemes);
283 return NULL;
284}
285
286static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
287 size_t count, loff_t *ppos)
288{
289 struct damon_ctx *ctx = file->private_data;
290 char *kbuf;
291 struct damos **schemes;
292 ssize_t nr_schemes = 0, ret;
293
294 kbuf = user_input_str(buf, count, ppos);
295 if (IS_ERR(kbuf))
296 return PTR_ERR(kbuf);
297
298 schemes = str_to_schemes(kbuf, count, &nr_schemes);
299 if (!schemes) {
300 ret = -EINVAL;
301 goto out;
302 }
303
304 mutex_lock(&ctx->kdamond_lock);
305 if (ctx->kdamond) {
306 ret = -EBUSY;
307 goto unlock_out;
308 }
309
310 damon_set_schemes(ctx, schemes, nr_schemes);
311 ret = count;
312 nr_schemes = 0;
313
314unlock_out:
315 mutex_unlock(&ctx->kdamond_lock);
316 free_schemes_arr(schemes, nr_schemes);
317out:
318 kfree(kbuf);
319 return ret;
320}
321
322static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
323{
324 struct damon_target *t;
325 int id;
326 int written = 0;
327 int rc;
328
329 damon_for_each_target(t, ctx) {
330 if (damon_target_has_pid(ctx))
331 /* Show pid numbers to debugfs users */
332 id = pid_vnr(t->pid);
333 else
334 /* Show 42 for physical address space, just for fun */
335 id = 42;
336
337 rc = scnprintf(&buf[written], len - written, "%d ", id);
338 if (!rc)
339 return -ENOMEM;
340 written += rc;
341 }
342 if (written)
343 written -= 1;
344 written += scnprintf(&buf[written], len - written, "\n");
345 return written;
346}
347
348static ssize_t dbgfs_target_ids_read(struct file *file,
349 char __user *buf, size_t count, loff_t *ppos)
350{
351 struct damon_ctx *ctx = file->private_data;
352 ssize_t len;
353 char ids_buf[320];
354
355 mutex_lock(&ctx->kdamond_lock);
356 len = sprint_target_ids(ctx, ids_buf, 320);
357 mutex_unlock(&ctx->kdamond_lock);
358 if (len < 0)
359 return len;
360
361 return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
362}
363
364/*
365 * Converts a string into an integers array
366 *
367 * Returns an array of integers array if the conversion success, or NULL
368 * otherwise.
369 */
370static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
371{
372 int *array;
373 const int max_nr_ints = 32;
374 int nr;
375 int pos = 0, parsed, ret;
376
377 *nr_ints = 0;
378 array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
379 if (!array)
380 return NULL;
381 while (*nr_ints < max_nr_ints && pos < len) {
382 ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
383 pos += parsed;
384 if (ret != 1)
385 break;
386 array[*nr_ints] = nr;
387 *nr_ints += 1;
388 }
389
390 return array;
391}
392
393static void dbgfs_put_pids(struct pid **pids, int nr_pids)
394{
395 int i;
396
397 for (i = 0; i < nr_pids; i++)
398 put_pid(pids[i]);
399}
400
401/*
402 * Converts a string into an struct pid pointers array
403 *
404 * Returns an array of struct pid pointers if the conversion success, or NULL
405 * otherwise.
406 */
407static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
408{
409 int *ints;
410 ssize_t nr_ints;
411 struct pid **pids;
412
413 *nr_pids = 0;
414
415 ints = str_to_ints(str, len, &nr_ints);
416 if (!ints)
417 return NULL;
418
419 pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
420 if (!pids)
421 goto out;
422
423 for (; *nr_pids < nr_ints; (*nr_pids)++) {
424 pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
425 if (!pids[*nr_pids]) {
426 dbgfs_put_pids(pids, *nr_pids);
427 kfree(ints);
428 kfree(pids);
429 return NULL;
430 }
431 }
432
433out:
434 kfree(ints);
435 return pids;
436}
437
438/*
439 * dbgfs_set_targets() - Set monitoring targets.
440 * @ctx: monitoring context
441 * @nr_targets: number of targets
442 * @pids: array of target pids (size is same to @nr_targets)
443 *
444 * This function should not be called while the kdamond is running. @pids is
445 * ignored if the context is not configured to have pid in each target. On
446 * failure, reference counts of all pids in @pids are decremented.
447 *
448 * Return: 0 on success, negative error code otherwise.
449 */
450static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
451 struct pid **pids)
452{
453 ssize_t i;
454 struct damon_target *t, *next;
455
456 damon_for_each_target_safe(t, next, ctx) {
457 if (damon_target_has_pid(ctx))
458 put_pid(t->pid);
459 damon_destroy_target(t);
460 }
461
462 for (i = 0; i < nr_targets; i++) {
463 t = damon_new_target();
464 if (!t) {
465 damon_for_each_target_safe(t, next, ctx)
466 damon_destroy_target(t);
467 if (damon_target_has_pid(ctx))
468 dbgfs_put_pids(pids, nr_targets);
469 return -ENOMEM;
470 }
471 if (damon_target_has_pid(ctx))
472 t->pid = pids[i];
473 damon_add_target(ctx, t);
474 }
475
476 return 0;
477}
478
479static ssize_t dbgfs_target_ids_write(struct file *file,
480 const char __user *buf, size_t count, loff_t *ppos)
481{
482 struct damon_ctx *ctx = file->private_data;
483 bool id_is_pid = true;
484 char *kbuf;
485 struct pid **target_pids = NULL;
486 ssize_t nr_targets;
487 ssize_t ret;
488
489 kbuf = user_input_str(buf, count, ppos);
490 if (IS_ERR(kbuf))
491 return PTR_ERR(kbuf);
492
493 if (!strncmp(kbuf, "paddr\n", count)) {
494 id_is_pid = false;
495 nr_targets = 1;
496 }
497
498 if (id_is_pid) {
499 target_pids = str_to_pids(kbuf, count, &nr_targets);
500 if (!target_pids) {
501 ret = -ENOMEM;
502 goto out;
503 }
504 }
505
506 mutex_lock(&ctx->kdamond_lock);
507 if (ctx->kdamond) {
508 if (id_is_pid)
509 dbgfs_put_pids(target_pids, nr_targets);
510 ret = -EBUSY;
511 goto unlock_out;
512 }
513
514 /* remove previously set targets */
515 dbgfs_set_targets(ctx, 0, NULL);
516 if (!nr_targets) {
517 ret = count;
518 goto unlock_out;
519 }
520
521 /* Configure the context for the address space type */
522 if (id_is_pid)
523 ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
524 else
525 ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
526 if (ret)
527 goto unlock_out;
528
529 ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
530 if (!ret)
531 ret = count;
532
533unlock_out:
534 mutex_unlock(&ctx->kdamond_lock);
535 kfree(target_pids);
536out:
537 kfree(kbuf);
538 return ret;
539}
540
541static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
542{
543 struct damon_target *t;
544 struct damon_region *r;
545 int target_idx = 0;
546 int written = 0;
547 int rc;
548
549 damon_for_each_target(t, c) {
550 damon_for_each_region(r, t) {
551 rc = scnprintf(&buf[written], len - written,
552 "%d %lu %lu\n",
553 target_idx, r->ar.start, r->ar.end);
554 if (!rc)
555 return -ENOMEM;
556 written += rc;
557 }
558 target_idx++;
559 }
560 return written;
561}
562
563static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
564 size_t count, loff_t *ppos)
565{
566 struct damon_ctx *ctx = file->private_data;
567 char *kbuf;
568 ssize_t len;
569
570 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
571 if (!kbuf)
572 return -ENOMEM;
573
574 mutex_lock(&ctx->kdamond_lock);
575 if (ctx->kdamond) {
576 mutex_unlock(&ctx->kdamond_lock);
577 len = -EBUSY;
578 goto out;
579 }
580
581 len = sprint_init_regions(ctx, kbuf, count);
582 mutex_unlock(&ctx->kdamond_lock);
583 if (len < 0)
584 goto out;
585 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
586
587out:
588 kfree(kbuf);
589 return len;
590}
591
592static int add_init_region(struct damon_ctx *c, int target_idx,
593 struct damon_addr_range *ar)
594{
595 struct damon_target *t;
596 struct damon_region *r, *prev;
597 unsigned long idx = 0;
598 int rc = -EINVAL;
599
600 if (ar->start >= ar->end)
601 return -EINVAL;
602
603 damon_for_each_target(t, c) {
604 if (idx++ == target_idx) {
605 r = damon_new_region(ar->start, ar->end);
606 if (!r)
607 return -ENOMEM;
608 damon_add_region(r, t);
609 if (damon_nr_regions(t) > 1) {
610 prev = damon_prev_region(r);
611 if (prev->ar.end > r->ar.start) {
612 damon_destroy_region(r, t);
613 return -EINVAL;
614 }
615 }
616 rc = 0;
617 }
618 }
619 return rc;
620}
621
622static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
623{
624 struct damon_target *t;
625 struct damon_region *r, *next;
626 int pos = 0, parsed, ret;
627 int target_idx;
628 struct damon_addr_range ar;
629 int err;
630
631 damon_for_each_target(t, c) {
632 damon_for_each_region_safe(r, next, t)
633 damon_destroy_region(r, t);
634 }
635
636 while (pos < len) {
637 ret = sscanf(&str[pos], "%d %lu %lu%n",
638 &target_idx, &ar.start, &ar.end, &parsed);
639 if (ret != 3)
640 break;
641 err = add_init_region(c, target_idx, &ar);
642 if (err)
643 goto fail;
644 pos += parsed;
645 }
646
647 return 0;
648
649fail:
650 damon_for_each_target(t, c) {
651 damon_for_each_region_safe(r, next, t)
652 damon_destroy_region(r, t);
653 }
654 return err;
655}
656
657static ssize_t dbgfs_init_regions_write(struct file *file,
658 const char __user *buf, size_t count,
659 loff_t *ppos)
660{
661 struct damon_ctx *ctx = file->private_data;
662 char *kbuf;
663 ssize_t ret = count;
664 int err;
665
666 kbuf = user_input_str(buf, count, ppos);
667 if (IS_ERR(kbuf))
668 return PTR_ERR(kbuf);
669
670 mutex_lock(&ctx->kdamond_lock);
671 if (ctx->kdamond) {
672 ret = -EBUSY;
673 goto unlock_out;
674 }
675
676 err = set_init_regions(ctx, kbuf, ret);
677 if (err)
678 ret = err;
679
680unlock_out:
681 mutex_unlock(&ctx->kdamond_lock);
682 kfree(kbuf);
683 return ret;
684}
685
686static ssize_t dbgfs_kdamond_pid_read(struct file *file,
687 char __user *buf, size_t count, loff_t *ppos)
688{
689 struct damon_ctx *ctx = file->private_data;
690 char *kbuf;
691 ssize_t len;
692
693 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
694 if (!kbuf)
695 return -ENOMEM;
696
697 mutex_lock(&ctx->kdamond_lock);
698 if (ctx->kdamond)
699 len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
700 else
701 len = scnprintf(kbuf, count, "none\n");
702 mutex_unlock(&ctx->kdamond_lock);
703 if (!len)
704 goto out;
705 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
706
707out:
708 kfree(kbuf);
709 return len;
710}
711
712static int damon_dbgfs_open(struct inode *inode, struct file *file)
713{
714 file->private_data = inode->i_private;
715
716 return nonseekable_open(inode, file);
717}
718
719static const struct file_operations attrs_fops = {
720 .open = damon_dbgfs_open,
721 .read = dbgfs_attrs_read,
722 .write = dbgfs_attrs_write,
723};
724
725static const struct file_operations schemes_fops = {
726 .open = damon_dbgfs_open,
727 .read = dbgfs_schemes_read,
728 .write = dbgfs_schemes_write,
729};
730
731static const struct file_operations target_ids_fops = {
732 .open = damon_dbgfs_open,
733 .read = dbgfs_target_ids_read,
734 .write = dbgfs_target_ids_write,
735};
736
737static const struct file_operations init_regions_fops = {
738 .open = damon_dbgfs_open,
739 .read = dbgfs_init_regions_read,
740 .write = dbgfs_init_regions_write,
741};
742
743static const struct file_operations kdamond_pid_fops = {
744 .open = damon_dbgfs_open,
745 .read = dbgfs_kdamond_pid_read,
746};
747
748static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
749{
750 const char * const file_names[] = {"attrs", "schemes", "target_ids",
751 "init_regions", "kdamond_pid"};
752 const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
753 &target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
754 int i;
755
756 for (i = 0; i < ARRAY_SIZE(file_names); i++)
757 debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
758}
759
760static void dbgfs_before_terminate(struct damon_ctx *ctx)
761{
762 struct damon_target *t, *next;
763
764 if (!damon_target_has_pid(ctx))
765 return;
766
767 mutex_lock(&ctx->kdamond_lock);
768 damon_for_each_target_safe(t, next, ctx) {
769 put_pid(t->pid);
770 damon_destroy_target(t);
771 }
772 mutex_unlock(&ctx->kdamond_lock);
773}
774
775static struct damon_ctx *dbgfs_new_ctx(void)
776{
777 struct damon_ctx *ctx;
778
779 ctx = damon_new_ctx();
780 if (!ctx)
781 return NULL;
782
783 if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
784 damon_select_ops(ctx, DAMON_OPS_PADDR)) {
785 damon_destroy_ctx(ctx);
786 return NULL;
787 }
788 ctx->callback.before_terminate = dbgfs_before_terminate;
789 return ctx;
790}
791
792static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
793{
794 damon_destroy_ctx(ctx);
795}
796
797/*
798 * Make a context of @name and create a debugfs directory for it.
799 *
800 * This function should be called while holding damon_dbgfs_lock.
801 *
802 * Returns 0 on success, negative error code otherwise.
803 */
804static int dbgfs_mk_context(char *name)
805{
806 struct dentry *root, **new_dirs, *new_dir;
807 struct damon_ctx **new_ctxs, *new_ctx;
808
809 if (damon_nr_running_ctxs())
810 return -EBUSY;
811
812 new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
813 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
814 if (!new_ctxs)
815 return -ENOMEM;
816 dbgfs_ctxs = new_ctxs;
817
818 new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
819 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
820 if (!new_dirs)
821 return -ENOMEM;
822 dbgfs_dirs = new_dirs;
823
824 root = dbgfs_dirs[0];
825 if (!root)
826 return -ENOENT;
827
828 new_dir = debugfs_create_dir(name, root);
829 /* Below check is required for a potential duplicated name case */
830 if (IS_ERR(new_dir))
831 return PTR_ERR(new_dir);
832 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
833
834 new_ctx = dbgfs_new_ctx();
835 if (!new_ctx) {
836 debugfs_remove(new_dir);
837 dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
838 return -ENOMEM;
839 }
840
841 dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
842 dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
843 dbgfs_ctxs[dbgfs_nr_ctxs]);
844 dbgfs_nr_ctxs++;
845
846 return 0;
847}
848
849static ssize_t dbgfs_mk_context_write(struct file *file,
850 const char __user *buf, size_t count, loff_t *ppos)
851{
852 char *kbuf;
853 char *ctx_name;
854 ssize_t ret;
855
856 kbuf = user_input_str(buf, count, ppos);
857 if (IS_ERR(kbuf))
858 return PTR_ERR(kbuf);
859 ctx_name = kmalloc(count + 1, GFP_KERNEL);
860 if (!ctx_name) {
861 kfree(kbuf);
862 return -ENOMEM;
863 }
864
865 /* Trim white space */
866 if (sscanf(kbuf, "%s", ctx_name) != 1) {
867 ret = -EINVAL;
868 goto out;
869 }
870
871 mutex_lock(&damon_dbgfs_lock);
872 ret = dbgfs_mk_context(ctx_name);
873 if (!ret)
874 ret = count;
875 mutex_unlock(&damon_dbgfs_lock);
876
877out:
878 kfree(kbuf);
879 kfree(ctx_name);
880 return ret;
881}
882
883/*
884 * Remove a context of @name and its debugfs directory.
885 *
886 * This function should be called while holding damon_dbgfs_lock.
887 *
888 * Return 0 on success, negative error code otherwise.
889 */
890static int dbgfs_rm_context(char *name)
891{
892 struct dentry *root, *dir, **new_dirs;
893 struct inode *inode;
894 struct damon_ctx **new_ctxs;
895 int i, j;
896 int ret = 0;
897
898 if (damon_nr_running_ctxs())
899 return -EBUSY;
900
901 root = dbgfs_dirs[0];
902 if (!root)
903 return -ENOENT;
904
905 dir = debugfs_lookup(name, root);
906 if (!dir)
907 return -ENOENT;
908
909 inode = d_inode(dir);
910 if (!S_ISDIR(inode->i_mode)) {
911 ret = -EINVAL;
912 goto out_dput;
913 }
914
915 new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
916 GFP_KERNEL);
917 if (!new_dirs) {
918 ret = -ENOMEM;
919 goto out_dput;
920 }
921
922 new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
923 GFP_KERNEL);
924 if (!new_ctxs) {
925 ret = -ENOMEM;
926 goto out_new_dirs;
927 }
928
929 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
930 if (dbgfs_dirs[i] == dir) {
931 debugfs_remove(dbgfs_dirs[i]);
932 dbgfs_destroy_ctx(dbgfs_ctxs[i]);
933 continue;
934 }
935 new_dirs[j] = dbgfs_dirs[i];
936 new_ctxs[j++] = dbgfs_ctxs[i];
937 }
938
939 kfree(dbgfs_dirs);
940 kfree(dbgfs_ctxs);
941
942 dbgfs_dirs = new_dirs;
943 dbgfs_ctxs = new_ctxs;
944 dbgfs_nr_ctxs--;
945
946 goto out_dput;
947
948out_new_dirs:
949 kfree(new_dirs);
950out_dput:
951 dput(dir);
952 return ret;
953}
954
955static ssize_t dbgfs_rm_context_write(struct file *file,
956 const char __user *buf, size_t count, loff_t *ppos)
957{
958 char *kbuf;
959 ssize_t ret;
960 char *ctx_name;
961
962 kbuf = user_input_str(buf, count, ppos);
963 if (IS_ERR(kbuf))
964 return PTR_ERR(kbuf);
965 ctx_name = kmalloc(count + 1, GFP_KERNEL);
966 if (!ctx_name) {
967 kfree(kbuf);
968 return -ENOMEM;
969 }
970
971 /* Trim white space */
972 if (sscanf(kbuf, "%s", ctx_name) != 1) {
973 ret = -EINVAL;
974 goto out;
975 }
976
977 mutex_lock(&damon_dbgfs_lock);
978 ret = dbgfs_rm_context(ctx_name);
979 if (!ret)
980 ret = count;
981 mutex_unlock(&damon_dbgfs_lock);
982
983out:
984 kfree(kbuf);
985 kfree(ctx_name);
986 return ret;
987}
988
989static ssize_t dbgfs_monitor_on_read(struct file *file,
990 char __user *buf, size_t count, loff_t *ppos)
991{
992 char monitor_on_buf[5];
993 bool monitor_on = damon_nr_running_ctxs() != 0;
994 int len;
995
996 len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
997
998 return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
999}
1000
1001static ssize_t dbgfs_monitor_on_write(struct file *file,
1002 const char __user *buf, size_t count, loff_t *ppos)
1003{
1004 ssize_t ret;
1005 char *kbuf;
1006
1007 kbuf = user_input_str(buf, count, ppos);
1008 if (IS_ERR(kbuf))
1009 return PTR_ERR(kbuf);
1010
1011 /* Remove white space */
1012 if (sscanf(kbuf, "%s", kbuf) != 1) {
1013 kfree(kbuf);
1014 return -EINVAL;
1015 }
1016
1017 mutex_lock(&damon_dbgfs_lock);
1018 if (!strncmp(kbuf, "on", count)) {
1019 int i;
1020
1021 for (i = 0; i < dbgfs_nr_ctxs; i++) {
1022 if (damon_targets_empty(dbgfs_ctxs[i])) {
1023 kfree(kbuf);
1024 mutex_unlock(&damon_dbgfs_lock);
1025 return -EINVAL;
1026 }
1027 }
1028 ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
1029 } else if (!strncmp(kbuf, "off", count)) {
1030 ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
1031 } else {
1032 ret = -EINVAL;
1033 }
1034 mutex_unlock(&damon_dbgfs_lock);
1035
1036 if (!ret)
1037 ret = count;
1038 kfree(kbuf);
1039 return ret;
1040}
1041
1042static const struct file_operations mk_contexts_fops = {
1043 .write = dbgfs_mk_context_write,
1044};
1045
1046static const struct file_operations rm_contexts_fops = {
1047 .write = dbgfs_rm_context_write,
1048};
1049
1050static const struct file_operations monitor_on_fops = {
1051 .read = dbgfs_monitor_on_read,
1052 .write = dbgfs_monitor_on_write,
1053};
1054
1055static int __init __damon_dbgfs_init(void)
1056{
1057 struct dentry *dbgfs_root;
1058 const char * const file_names[] = {"mk_contexts", "rm_contexts",
1059 "monitor_on"};
1060 const struct file_operations *fops[] = {&mk_contexts_fops,
1061 &rm_contexts_fops, &monitor_on_fops};
1062 int i;
1063
1064 dbgfs_root = debugfs_create_dir("damon", NULL);
1065
1066 for (i = 0; i < ARRAY_SIZE(file_names); i++)
1067 debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1068 fops[i]);
1069 dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1070
1071 dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
1072 if (!dbgfs_dirs) {
1073 debugfs_remove(dbgfs_root);
1074 return -ENOMEM;
1075 }
1076 dbgfs_dirs[0] = dbgfs_root;
1077
1078 return 0;
1079}
1080
1081/*
1082 * Functions for the initialization
1083 */
1084
1085static int __init damon_dbgfs_init(void)
1086{
1087 int rc = -ENOMEM;
1088
1089 mutex_lock(&damon_dbgfs_lock);
1090 dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1091 if (!dbgfs_ctxs)
1092 goto out;
1093 dbgfs_ctxs[0] = dbgfs_new_ctx();
1094 if (!dbgfs_ctxs[0]) {
1095 kfree(dbgfs_ctxs);
1096 goto out;
1097 }
1098 dbgfs_nr_ctxs = 1;
1099
1100 rc = __damon_dbgfs_init();
1101 if (rc) {
1102 kfree(dbgfs_ctxs[0]);
1103 kfree(dbgfs_ctxs);
1104 pr_err("%s: dbgfs init failed\n", __func__);
1105 }
1106
1107out:
1108 mutex_unlock(&damon_dbgfs_lock);
1109 return rc;
1110}
1111
1112module_init(damon_dbgfs_init);
1113
1114#include "dbgfs-test.h"
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DAMON Debugfs Interface
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon-dbgfs: " fmt
9
10#include <linux/damon.h>
11#include <linux/debugfs.h>
12#include <linux/file.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/page_idle.h>
16#include <linux/slab.h>
17
18static struct damon_ctx **dbgfs_ctxs;
19static int dbgfs_nr_ctxs;
20static struct dentry **dbgfs_dirs;
21static DEFINE_MUTEX(damon_dbgfs_lock);
22
23static void damon_dbgfs_warn_deprecation(void)
24{
25 pr_warn_once("DAMON debugfs interface is deprecated, "
26 "so users should move to DAMON_SYSFS. If you cannot, "
27 "please report your usecase to damon@lists.linux.dev and "
28 "linux-mm@kvack.org.\n");
29}
30
31/*
32 * Returns non-empty string on success, negative error code otherwise.
33 */
34static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
35{
36 char *kbuf;
37 ssize_t ret;
38
39 /* We do not accept continuous write */
40 if (*ppos)
41 return ERR_PTR(-EINVAL);
42
43 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
44 if (!kbuf)
45 return ERR_PTR(-ENOMEM);
46
47 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
48 if (ret != count) {
49 kfree(kbuf);
50 return ERR_PTR(-EIO);
51 }
52 kbuf[ret] = '\0';
53
54 return kbuf;
55}
56
57static ssize_t dbgfs_attrs_read(struct file *file,
58 char __user *buf, size_t count, loff_t *ppos)
59{
60 struct damon_ctx *ctx = file->private_data;
61 char kbuf[128];
62 int ret;
63
64 mutex_lock(&ctx->kdamond_lock);
65 ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
66 ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
67 ctx->attrs.ops_update_interval,
68 ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
69 mutex_unlock(&ctx->kdamond_lock);
70
71 return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
72}
73
74static ssize_t dbgfs_attrs_write(struct file *file,
75 const char __user *buf, size_t count, loff_t *ppos)
76{
77 struct damon_ctx *ctx = file->private_data;
78 struct damon_attrs attrs;
79 char *kbuf;
80 ssize_t ret;
81
82 kbuf = user_input_str(buf, count, ppos);
83 if (IS_ERR(kbuf))
84 return PTR_ERR(kbuf);
85
86 if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
87 &attrs.sample_interval, &attrs.aggr_interval,
88 &attrs.ops_update_interval,
89 &attrs.min_nr_regions,
90 &attrs.max_nr_regions) != 5) {
91 ret = -EINVAL;
92 goto out;
93 }
94
95 mutex_lock(&ctx->kdamond_lock);
96 if (ctx->kdamond) {
97 ret = -EBUSY;
98 goto unlock_out;
99 }
100
101 ret = damon_set_attrs(ctx, &attrs);
102 if (!ret)
103 ret = count;
104unlock_out:
105 mutex_unlock(&ctx->kdamond_lock);
106out:
107 kfree(kbuf);
108 return ret;
109}
110
111/*
112 * Return corresponding dbgfs' scheme action value (int) for the given
113 * damos_action if the given damos_action value is valid and supported by
114 * dbgfs, negative error code otherwise.
115 */
116static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
117{
118 switch (action) {
119 case DAMOS_WILLNEED:
120 return 0;
121 case DAMOS_COLD:
122 return 1;
123 case DAMOS_PAGEOUT:
124 return 2;
125 case DAMOS_HUGEPAGE:
126 return 3;
127 case DAMOS_NOHUGEPAGE:
128 return 4;
129 case DAMOS_STAT:
130 return 5;
131 default:
132 return -EINVAL;
133 }
134}
135
136static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
137{
138 struct damos *s;
139 int written = 0;
140 int rc;
141
142 damon_for_each_scheme(s, c) {
143 rc = scnprintf(&buf[written], len - written,
144 "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
145 s->pattern.min_sz_region,
146 s->pattern.max_sz_region,
147 s->pattern.min_nr_accesses,
148 s->pattern.max_nr_accesses,
149 s->pattern.min_age_region,
150 s->pattern.max_age_region,
151 damos_action_to_dbgfs_scheme_action(s->action),
152 s->quota.ms, s->quota.sz,
153 s->quota.reset_interval,
154 s->quota.weight_sz,
155 s->quota.weight_nr_accesses,
156 s->quota.weight_age,
157 s->wmarks.metric, s->wmarks.interval,
158 s->wmarks.high, s->wmarks.mid, s->wmarks.low,
159 s->stat.nr_tried, s->stat.sz_tried,
160 s->stat.nr_applied, s->stat.sz_applied,
161 s->stat.qt_exceeds);
162 if (!rc)
163 return -ENOMEM;
164
165 written += rc;
166 }
167 return written;
168}
169
170static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
171 size_t count, loff_t *ppos)
172{
173 struct damon_ctx *ctx = file->private_data;
174 char *kbuf;
175 ssize_t len;
176
177 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
178 if (!kbuf)
179 return -ENOMEM;
180
181 mutex_lock(&ctx->kdamond_lock);
182 len = sprint_schemes(ctx, kbuf, count);
183 mutex_unlock(&ctx->kdamond_lock);
184 if (len < 0)
185 goto out;
186 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
187
188out:
189 kfree(kbuf);
190 return len;
191}
192
193static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
194{
195 ssize_t i;
196
197 for (i = 0; i < nr_schemes; i++)
198 kfree(schemes[i]);
199 kfree(schemes);
200}
201
202/*
203 * Return corresponding damos_action for the given dbgfs input for a scheme
204 * action if the input is valid, negative error code otherwise.
205 */
206static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
207{
208 switch (dbgfs_action) {
209 case 0:
210 return DAMOS_WILLNEED;
211 case 1:
212 return DAMOS_COLD;
213 case 2:
214 return DAMOS_PAGEOUT;
215 case 3:
216 return DAMOS_HUGEPAGE;
217 case 4:
218 return DAMOS_NOHUGEPAGE;
219 case 5:
220 return DAMOS_STAT;
221 default:
222 return -EINVAL;
223 }
224}
225
226/*
227 * Converts a string into an array of struct damos pointers
228 *
229 * Returns an array of struct damos pointers that converted if the conversion
230 * success, or NULL otherwise.
231 */
232static struct damos **str_to_schemes(const char *str, ssize_t len,
233 ssize_t *nr_schemes)
234{
235 struct damos *scheme, **schemes;
236 const int max_nr_schemes = 256;
237 int pos = 0, parsed, ret;
238 unsigned int action_input;
239 enum damos_action action;
240
241 schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
242 GFP_KERNEL);
243 if (!schemes)
244 return NULL;
245
246 *nr_schemes = 0;
247 while (pos < len && *nr_schemes < max_nr_schemes) {
248 struct damos_access_pattern pattern = {};
249 struct damos_quota quota = {};
250 struct damos_watermarks wmarks;
251
252 ret = sscanf(&str[pos],
253 "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
254 &pattern.min_sz_region, &pattern.max_sz_region,
255 &pattern.min_nr_accesses,
256 &pattern.max_nr_accesses,
257 &pattern.min_age_region,
258 &pattern.max_age_region,
259 &action_input, "a.ms,
260 "a.sz, "a.reset_interval,
261 "a.weight_sz, "a.weight_nr_accesses,
262 "a.weight_age, &wmarks.metric,
263 &wmarks.interval, &wmarks.high, &wmarks.mid,
264 &wmarks.low, &parsed);
265 if (ret != 18)
266 break;
267 action = dbgfs_scheme_action_to_damos_action(action_input);
268 if ((int)action < 0)
269 goto fail;
270
271 if (pattern.min_sz_region > pattern.max_sz_region ||
272 pattern.min_nr_accesses > pattern.max_nr_accesses ||
273 pattern.min_age_region > pattern.max_age_region)
274 goto fail;
275
276 if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
277 wmarks.mid < wmarks.low)
278 goto fail;
279
280 pos += parsed;
281 scheme = damon_new_scheme(&pattern, action, 0, "a,
282 &wmarks);
283 if (!scheme)
284 goto fail;
285
286 schemes[*nr_schemes] = scheme;
287 *nr_schemes += 1;
288 }
289 return schemes;
290fail:
291 free_schemes_arr(schemes, *nr_schemes);
292 return NULL;
293}
294
295static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
296 size_t count, loff_t *ppos)
297{
298 struct damon_ctx *ctx = file->private_data;
299 char *kbuf;
300 struct damos **schemes;
301 ssize_t nr_schemes = 0, ret;
302
303 kbuf = user_input_str(buf, count, ppos);
304 if (IS_ERR(kbuf))
305 return PTR_ERR(kbuf);
306
307 schemes = str_to_schemes(kbuf, count, &nr_schemes);
308 if (!schemes) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 mutex_lock(&ctx->kdamond_lock);
314 if (ctx->kdamond) {
315 ret = -EBUSY;
316 goto unlock_out;
317 }
318
319 damon_set_schemes(ctx, schemes, nr_schemes);
320 ret = count;
321 nr_schemes = 0;
322
323unlock_out:
324 mutex_unlock(&ctx->kdamond_lock);
325 free_schemes_arr(schemes, nr_schemes);
326out:
327 kfree(kbuf);
328 return ret;
329}
330
331static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
332{
333 struct damon_target *t;
334 int id;
335 int written = 0;
336 int rc;
337
338 damon_for_each_target(t, ctx) {
339 if (damon_target_has_pid(ctx))
340 /* Show pid numbers to debugfs users */
341 id = pid_vnr(t->pid);
342 else
343 /* Show 42 for physical address space, just for fun */
344 id = 42;
345
346 rc = scnprintf(&buf[written], len - written, "%d ", id);
347 if (!rc)
348 return -ENOMEM;
349 written += rc;
350 }
351 if (written)
352 written -= 1;
353 written += scnprintf(&buf[written], len - written, "\n");
354 return written;
355}
356
357static ssize_t dbgfs_target_ids_read(struct file *file,
358 char __user *buf, size_t count, loff_t *ppos)
359{
360 struct damon_ctx *ctx = file->private_data;
361 ssize_t len;
362 char ids_buf[320];
363
364 mutex_lock(&ctx->kdamond_lock);
365 len = sprint_target_ids(ctx, ids_buf, 320);
366 mutex_unlock(&ctx->kdamond_lock);
367 if (len < 0)
368 return len;
369
370 return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
371}
372
373/*
374 * Converts a string into an integers array
375 *
376 * Returns an array of integers array if the conversion success, or NULL
377 * otherwise.
378 */
379static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
380{
381 int *array;
382 const int max_nr_ints = 32;
383 int nr;
384 int pos = 0, parsed, ret;
385
386 *nr_ints = 0;
387 array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
388 if (!array)
389 return NULL;
390 while (*nr_ints < max_nr_ints && pos < len) {
391 ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
392 pos += parsed;
393 if (ret != 1)
394 break;
395 array[*nr_ints] = nr;
396 *nr_ints += 1;
397 }
398
399 return array;
400}
401
402static void dbgfs_put_pids(struct pid **pids, int nr_pids)
403{
404 int i;
405
406 for (i = 0; i < nr_pids; i++)
407 put_pid(pids[i]);
408}
409
410/*
411 * Converts a string into an struct pid pointers array
412 *
413 * Returns an array of struct pid pointers if the conversion success, or NULL
414 * otherwise.
415 */
416static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
417{
418 int *ints;
419 ssize_t nr_ints;
420 struct pid **pids;
421
422 *nr_pids = 0;
423
424 ints = str_to_ints(str, len, &nr_ints);
425 if (!ints)
426 return NULL;
427
428 pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
429 if (!pids)
430 goto out;
431
432 for (; *nr_pids < nr_ints; (*nr_pids)++) {
433 pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
434 if (!pids[*nr_pids]) {
435 dbgfs_put_pids(pids, *nr_pids);
436 kfree(ints);
437 kfree(pids);
438 return NULL;
439 }
440 }
441
442out:
443 kfree(ints);
444 return pids;
445}
446
447/*
448 * dbgfs_set_targets() - Set monitoring targets.
449 * @ctx: monitoring context
450 * @nr_targets: number of targets
451 * @pids: array of target pids (size is same to @nr_targets)
452 *
453 * This function should not be called while the kdamond is running. @pids is
454 * ignored if the context is not configured to have pid in each target. On
455 * failure, reference counts of all pids in @pids are decremented.
456 *
457 * Return: 0 on success, negative error code otherwise.
458 */
459static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
460 struct pid **pids)
461{
462 ssize_t i;
463 struct damon_target *t, *next;
464
465 damon_for_each_target_safe(t, next, ctx) {
466 if (damon_target_has_pid(ctx))
467 put_pid(t->pid);
468 damon_destroy_target(t);
469 }
470
471 for (i = 0; i < nr_targets; i++) {
472 t = damon_new_target();
473 if (!t) {
474 damon_for_each_target_safe(t, next, ctx)
475 damon_destroy_target(t);
476 if (damon_target_has_pid(ctx))
477 dbgfs_put_pids(pids, nr_targets);
478 return -ENOMEM;
479 }
480 if (damon_target_has_pid(ctx))
481 t->pid = pids[i];
482 damon_add_target(ctx, t);
483 }
484
485 return 0;
486}
487
488static ssize_t dbgfs_target_ids_write(struct file *file,
489 const char __user *buf, size_t count, loff_t *ppos)
490{
491 struct damon_ctx *ctx = file->private_data;
492 bool id_is_pid = true;
493 char *kbuf;
494 struct pid **target_pids = NULL;
495 ssize_t nr_targets;
496 ssize_t ret;
497
498 kbuf = user_input_str(buf, count, ppos);
499 if (IS_ERR(kbuf))
500 return PTR_ERR(kbuf);
501
502 if (!strncmp(kbuf, "paddr\n", count)) {
503 id_is_pid = false;
504 nr_targets = 1;
505 }
506
507 if (id_is_pid) {
508 target_pids = str_to_pids(kbuf, count, &nr_targets);
509 if (!target_pids) {
510 ret = -ENOMEM;
511 goto out;
512 }
513 }
514
515 mutex_lock(&ctx->kdamond_lock);
516 if (ctx->kdamond) {
517 if (id_is_pid)
518 dbgfs_put_pids(target_pids, nr_targets);
519 ret = -EBUSY;
520 goto unlock_out;
521 }
522
523 /* remove previously set targets */
524 dbgfs_set_targets(ctx, 0, NULL);
525 if (!nr_targets) {
526 ret = count;
527 goto unlock_out;
528 }
529
530 /* Configure the context for the address space type */
531 if (id_is_pid)
532 ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
533 else
534 ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
535 if (ret)
536 goto unlock_out;
537
538 ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
539 if (!ret)
540 ret = count;
541
542unlock_out:
543 mutex_unlock(&ctx->kdamond_lock);
544 kfree(target_pids);
545out:
546 kfree(kbuf);
547 return ret;
548}
549
550static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
551{
552 struct damon_target *t;
553 struct damon_region *r;
554 int target_idx = 0;
555 int written = 0;
556 int rc;
557
558 damon_for_each_target(t, c) {
559 damon_for_each_region(r, t) {
560 rc = scnprintf(&buf[written], len - written,
561 "%d %lu %lu\n",
562 target_idx, r->ar.start, r->ar.end);
563 if (!rc)
564 return -ENOMEM;
565 written += rc;
566 }
567 target_idx++;
568 }
569 return written;
570}
571
572static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
573 size_t count, loff_t *ppos)
574{
575 struct damon_ctx *ctx = file->private_data;
576 char *kbuf;
577 ssize_t len;
578
579 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
580 if (!kbuf)
581 return -ENOMEM;
582
583 mutex_lock(&ctx->kdamond_lock);
584 if (ctx->kdamond) {
585 mutex_unlock(&ctx->kdamond_lock);
586 len = -EBUSY;
587 goto out;
588 }
589
590 len = sprint_init_regions(ctx, kbuf, count);
591 mutex_unlock(&ctx->kdamond_lock);
592 if (len < 0)
593 goto out;
594 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
595
596out:
597 kfree(kbuf);
598 return len;
599}
600
601static int add_init_region(struct damon_ctx *c, int target_idx,
602 struct damon_addr_range *ar)
603{
604 struct damon_target *t;
605 struct damon_region *r, *prev;
606 unsigned long idx = 0;
607 int rc = -EINVAL;
608
609 if (ar->start >= ar->end)
610 return -EINVAL;
611
612 damon_for_each_target(t, c) {
613 if (idx++ == target_idx) {
614 r = damon_new_region(ar->start, ar->end);
615 if (!r)
616 return -ENOMEM;
617 damon_add_region(r, t);
618 if (damon_nr_regions(t) > 1) {
619 prev = damon_prev_region(r);
620 if (prev->ar.end > r->ar.start) {
621 damon_destroy_region(r, t);
622 return -EINVAL;
623 }
624 }
625 rc = 0;
626 }
627 }
628 return rc;
629}
630
631static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
632{
633 struct damon_target *t;
634 struct damon_region *r, *next;
635 int pos = 0, parsed, ret;
636 int target_idx;
637 struct damon_addr_range ar;
638 int err;
639
640 damon_for_each_target(t, c) {
641 damon_for_each_region_safe(r, next, t)
642 damon_destroy_region(r, t);
643 }
644
645 while (pos < len) {
646 ret = sscanf(&str[pos], "%d %lu %lu%n",
647 &target_idx, &ar.start, &ar.end, &parsed);
648 if (ret != 3)
649 break;
650 err = add_init_region(c, target_idx, &ar);
651 if (err)
652 goto fail;
653 pos += parsed;
654 }
655
656 return 0;
657
658fail:
659 damon_for_each_target(t, c) {
660 damon_for_each_region_safe(r, next, t)
661 damon_destroy_region(r, t);
662 }
663 return err;
664}
665
666static ssize_t dbgfs_init_regions_write(struct file *file,
667 const char __user *buf, size_t count,
668 loff_t *ppos)
669{
670 struct damon_ctx *ctx = file->private_data;
671 char *kbuf;
672 ssize_t ret = count;
673 int err;
674
675 kbuf = user_input_str(buf, count, ppos);
676 if (IS_ERR(kbuf))
677 return PTR_ERR(kbuf);
678
679 mutex_lock(&ctx->kdamond_lock);
680 if (ctx->kdamond) {
681 ret = -EBUSY;
682 goto unlock_out;
683 }
684
685 err = set_init_regions(ctx, kbuf, ret);
686 if (err)
687 ret = err;
688
689unlock_out:
690 mutex_unlock(&ctx->kdamond_lock);
691 kfree(kbuf);
692 return ret;
693}
694
695static ssize_t dbgfs_kdamond_pid_read(struct file *file,
696 char __user *buf, size_t count, loff_t *ppos)
697{
698 struct damon_ctx *ctx = file->private_data;
699 char *kbuf;
700 ssize_t len;
701
702 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
703 if (!kbuf)
704 return -ENOMEM;
705
706 mutex_lock(&ctx->kdamond_lock);
707 if (ctx->kdamond)
708 len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
709 else
710 len = scnprintf(kbuf, count, "none\n");
711 mutex_unlock(&ctx->kdamond_lock);
712 if (!len)
713 goto out;
714 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
715
716out:
717 kfree(kbuf);
718 return len;
719}
720
721static int damon_dbgfs_open(struct inode *inode, struct file *file)
722{
723 damon_dbgfs_warn_deprecation();
724
725 file->private_data = inode->i_private;
726
727 return nonseekable_open(inode, file);
728}
729
730static const struct file_operations attrs_fops = {
731 .open = damon_dbgfs_open,
732 .read = dbgfs_attrs_read,
733 .write = dbgfs_attrs_write,
734};
735
736static const struct file_operations schemes_fops = {
737 .open = damon_dbgfs_open,
738 .read = dbgfs_schemes_read,
739 .write = dbgfs_schemes_write,
740};
741
742static const struct file_operations target_ids_fops = {
743 .open = damon_dbgfs_open,
744 .read = dbgfs_target_ids_read,
745 .write = dbgfs_target_ids_write,
746};
747
748static const struct file_operations init_regions_fops = {
749 .open = damon_dbgfs_open,
750 .read = dbgfs_init_regions_read,
751 .write = dbgfs_init_regions_write,
752};
753
754static const struct file_operations kdamond_pid_fops = {
755 .open = damon_dbgfs_open,
756 .read = dbgfs_kdamond_pid_read,
757};
758
759static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
760{
761 const char * const file_names[] = {"attrs", "schemes", "target_ids",
762 "init_regions", "kdamond_pid"};
763 const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
764 &target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
765 int i;
766
767 for (i = 0; i < ARRAY_SIZE(file_names); i++)
768 debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
769}
770
771static void dbgfs_before_terminate(struct damon_ctx *ctx)
772{
773 struct damon_target *t, *next;
774
775 if (!damon_target_has_pid(ctx))
776 return;
777
778 mutex_lock(&ctx->kdamond_lock);
779 damon_for_each_target_safe(t, next, ctx) {
780 put_pid(t->pid);
781 damon_destroy_target(t);
782 }
783 mutex_unlock(&ctx->kdamond_lock);
784}
785
786static struct damon_ctx *dbgfs_new_ctx(void)
787{
788 struct damon_ctx *ctx;
789
790 ctx = damon_new_ctx();
791 if (!ctx)
792 return NULL;
793
794 if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
795 damon_select_ops(ctx, DAMON_OPS_PADDR)) {
796 damon_destroy_ctx(ctx);
797 return NULL;
798 }
799 ctx->callback.before_terminate = dbgfs_before_terminate;
800 return ctx;
801}
802
803static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
804{
805 damon_destroy_ctx(ctx);
806}
807
808/*
809 * Make a context of @name and create a debugfs directory for it.
810 *
811 * This function should be called while holding damon_dbgfs_lock.
812 *
813 * Returns 0 on success, negative error code otherwise.
814 */
815static int dbgfs_mk_context(char *name)
816{
817 struct dentry *root, **new_dirs, *new_dir;
818 struct damon_ctx **new_ctxs, *new_ctx;
819
820 if (damon_nr_running_ctxs())
821 return -EBUSY;
822
823 new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
824 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
825 if (!new_ctxs)
826 return -ENOMEM;
827 dbgfs_ctxs = new_ctxs;
828
829 new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
830 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
831 if (!new_dirs)
832 return -ENOMEM;
833 dbgfs_dirs = new_dirs;
834
835 root = dbgfs_dirs[0];
836 if (!root)
837 return -ENOENT;
838
839 new_dir = debugfs_create_dir(name, root);
840 /* Below check is required for a potential duplicated name case */
841 if (IS_ERR(new_dir))
842 return PTR_ERR(new_dir);
843 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
844
845 new_ctx = dbgfs_new_ctx();
846 if (!new_ctx) {
847 debugfs_remove(new_dir);
848 dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
849 return -ENOMEM;
850 }
851
852 dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
853 dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
854 dbgfs_ctxs[dbgfs_nr_ctxs]);
855 dbgfs_nr_ctxs++;
856
857 return 0;
858}
859
860static ssize_t dbgfs_mk_context_write(struct file *file,
861 const char __user *buf, size_t count, loff_t *ppos)
862{
863 char *kbuf;
864 char *ctx_name;
865 ssize_t ret;
866
867 kbuf = user_input_str(buf, count, ppos);
868 if (IS_ERR(kbuf))
869 return PTR_ERR(kbuf);
870 ctx_name = kmalloc(count + 1, GFP_KERNEL);
871 if (!ctx_name) {
872 kfree(kbuf);
873 return -ENOMEM;
874 }
875
876 /* Trim white space */
877 if (sscanf(kbuf, "%s", ctx_name) != 1) {
878 ret = -EINVAL;
879 goto out;
880 }
881
882 mutex_lock(&damon_dbgfs_lock);
883 ret = dbgfs_mk_context(ctx_name);
884 if (!ret)
885 ret = count;
886 mutex_unlock(&damon_dbgfs_lock);
887
888out:
889 kfree(kbuf);
890 kfree(ctx_name);
891 return ret;
892}
893
894/*
895 * Remove a context of @name and its debugfs directory.
896 *
897 * This function should be called while holding damon_dbgfs_lock.
898 *
899 * Return 0 on success, negative error code otherwise.
900 */
901static int dbgfs_rm_context(char *name)
902{
903 struct dentry *root, *dir, **new_dirs;
904 struct inode *inode;
905 struct damon_ctx **new_ctxs;
906 int i, j;
907 int ret = 0;
908
909 if (damon_nr_running_ctxs())
910 return -EBUSY;
911
912 root = dbgfs_dirs[0];
913 if (!root)
914 return -ENOENT;
915
916 dir = debugfs_lookup(name, root);
917 if (!dir)
918 return -ENOENT;
919
920 inode = d_inode(dir);
921 if (!S_ISDIR(inode->i_mode)) {
922 ret = -EINVAL;
923 goto out_dput;
924 }
925
926 new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
927 GFP_KERNEL);
928 if (!new_dirs) {
929 ret = -ENOMEM;
930 goto out_dput;
931 }
932
933 new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
934 GFP_KERNEL);
935 if (!new_ctxs) {
936 ret = -ENOMEM;
937 goto out_new_dirs;
938 }
939
940 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
941 if (dbgfs_dirs[i] == dir) {
942 debugfs_remove(dbgfs_dirs[i]);
943 dbgfs_destroy_ctx(dbgfs_ctxs[i]);
944 continue;
945 }
946 new_dirs[j] = dbgfs_dirs[i];
947 new_ctxs[j++] = dbgfs_ctxs[i];
948 }
949
950 kfree(dbgfs_dirs);
951 kfree(dbgfs_ctxs);
952
953 dbgfs_dirs = new_dirs;
954 dbgfs_ctxs = new_ctxs;
955 dbgfs_nr_ctxs--;
956
957 goto out_dput;
958
959out_new_dirs:
960 kfree(new_dirs);
961out_dput:
962 dput(dir);
963 return ret;
964}
965
966static ssize_t dbgfs_rm_context_write(struct file *file,
967 const char __user *buf, size_t count, loff_t *ppos)
968{
969 char *kbuf;
970 ssize_t ret;
971 char *ctx_name;
972
973 kbuf = user_input_str(buf, count, ppos);
974 if (IS_ERR(kbuf))
975 return PTR_ERR(kbuf);
976 ctx_name = kmalloc(count + 1, GFP_KERNEL);
977 if (!ctx_name) {
978 kfree(kbuf);
979 return -ENOMEM;
980 }
981
982 /* Trim white space */
983 if (sscanf(kbuf, "%s", ctx_name) != 1) {
984 ret = -EINVAL;
985 goto out;
986 }
987
988 mutex_lock(&damon_dbgfs_lock);
989 ret = dbgfs_rm_context(ctx_name);
990 if (!ret)
991 ret = count;
992 mutex_unlock(&damon_dbgfs_lock);
993
994out:
995 kfree(kbuf);
996 kfree(ctx_name);
997 return ret;
998}
999
1000static ssize_t dbgfs_monitor_on_read(struct file *file,
1001 char __user *buf, size_t count, loff_t *ppos)
1002{
1003 char monitor_on_buf[5];
1004 bool monitor_on = damon_nr_running_ctxs() != 0;
1005 int len;
1006
1007 len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
1008
1009 return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
1010}
1011
1012static ssize_t dbgfs_monitor_on_write(struct file *file,
1013 const char __user *buf, size_t count, loff_t *ppos)
1014{
1015 ssize_t ret;
1016 char *kbuf;
1017
1018 kbuf = user_input_str(buf, count, ppos);
1019 if (IS_ERR(kbuf))
1020 return PTR_ERR(kbuf);
1021
1022 /* Remove white space */
1023 if (sscanf(kbuf, "%s", kbuf) != 1) {
1024 kfree(kbuf);
1025 return -EINVAL;
1026 }
1027
1028 mutex_lock(&damon_dbgfs_lock);
1029 if (!strncmp(kbuf, "on", count)) {
1030 int i;
1031
1032 for (i = 0; i < dbgfs_nr_ctxs; i++) {
1033 if (damon_targets_empty(dbgfs_ctxs[i])) {
1034 kfree(kbuf);
1035 mutex_unlock(&damon_dbgfs_lock);
1036 return -EINVAL;
1037 }
1038 }
1039 ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
1040 } else if (!strncmp(kbuf, "off", count)) {
1041 ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
1042 } else {
1043 ret = -EINVAL;
1044 }
1045 mutex_unlock(&damon_dbgfs_lock);
1046
1047 if (!ret)
1048 ret = count;
1049 kfree(kbuf);
1050 return ret;
1051}
1052
1053static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file)
1054{
1055 damon_dbgfs_warn_deprecation();
1056 return nonseekable_open(inode, file);
1057}
1058
1059static const struct file_operations mk_contexts_fops = {
1060 .open = damon_dbgfs_static_file_open,
1061 .write = dbgfs_mk_context_write,
1062};
1063
1064static const struct file_operations rm_contexts_fops = {
1065 .open = damon_dbgfs_static_file_open,
1066 .write = dbgfs_rm_context_write,
1067};
1068
1069static const struct file_operations monitor_on_fops = {
1070 .open = damon_dbgfs_static_file_open,
1071 .read = dbgfs_monitor_on_read,
1072 .write = dbgfs_monitor_on_write,
1073};
1074
1075static int __init __damon_dbgfs_init(void)
1076{
1077 struct dentry *dbgfs_root;
1078 const char * const file_names[] = {"mk_contexts", "rm_contexts",
1079 "monitor_on"};
1080 const struct file_operations *fops[] = {&mk_contexts_fops,
1081 &rm_contexts_fops, &monitor_on_fops};
1082 int i;
1083
1084 dbgfs_root = debugfs_create_dir("damon", NULL);
1085
1086 for (i = 0; i < ARRAY_SIZE(file_names); i++)
1087 debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1088 fops[i]);
1089 dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1090
1091 dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
1092 if (!dbgfs_dirs) {
1093 debugfs_remove(dbgfs_root);
1094 return -ENOMEM;
1095 }
1096 dbgfs_dirs[0] = dbgfs_root;
1097
1098 return 0;
1099}
1100
1101/*
1102 * Functions for the initialization
1103 */
1104
1105static int __init damon_dbgfs_init(void)
1106{
1107 int rc = -ENOMEM;
1108
1109 mutex_lock(&damon_dbgfs_lock);
1110 dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1111 if (!dbgfs_ctxs)
1112 goto out;
1113 dbgfs_ctxs[0] = dbgfs_new_ctx();
1114 if (!dbgfs_ctxs[0]) {
1115 kfree(dbgfs_ctxs);
1116 goto out;
1117 }
1118 dbgfs_nr_ctxs = 1;
1119
1120 rc = __damon_dbgfs_init();
1121 if (rc) {
1122 kfree(dbgfs_ctxs[0]);
1123 kfree(dbgfs_ctxs);
1124 pr_err("%s: dbgfs init failed\n", __func__);
1125 }
1126
1127out:
1128 mutex_unlock(&damon_dbgfs_lock);
1129 return rc;
1130}
1131
1132module_init(damon_dbgfs_init);
1133
1134#include "dbgfs-test.h"