Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8#define pr_fmt(fmt) "damon: " fmt
9
10#include <linux/damon.h>
11#include <linux/delay.h>
12#include <linux/kthread.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16
17#define CREATE_TRACE_POINTS
18#include <trace/events/damon.h>
19
20#ifdef CONFIG_DAMON_KUNIT_TEST
21#undef DAMON_MIN_REGION
22#define DAMON_MIN_REGION 1
23#endif
24
25static DEFINE_MUTEX(damon_lock);
26static int nr_running_ctxs;
27static bool running_exclusive_ctxs;
28
29static DEFINE_MUTEX(damon_ops_lock);
30static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
31
32static struct kmem_cache *damon_region_cache __ro_after_init;
33
34/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
35static bool __damon_is_registered_ops(enum damon_ops_id id)
36{
37 struct damon_operations empty_ops = {};
38
39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
40 return false;
41 return true;
42}
43
44/**
45 * damon_is_registered_ops() - Check if a given damon_operations is registered.
46 * @id: Id of the damon_operations to check if registered.
47 *
48 * Return: true if the ops is set, false otherwise.
49 */
50bool damon_is_registered_ops(enum damon_ops_id id)
51{
52 bool registered;
53
54 if (id >= NR_DAMON_OPS)
55 return false;
56 mutex_lock(&damon_ops_lock);
57 registered = __damon_is_registered_ops(id);
58 mutex_unlock(&damon_ops_lock);
59 return registered;
60}
61
62/**
63 * damon_register_ops() - Register a monitoring operations set to DAMON.
64 * @ops: monitoring operations set to register.
65 *
66 * This function registers a monitoring operations set of valid &struct
67 * damon_operations->id so that others can find and use them later.
68 *
69 * Return: 0 on success, negative error code otherwise.
70 */
71int damon_register_ops(struct damon_operations *ops)
72{
73 int err = 0;
74
75 if (ops->id >= NR_DAMON_OPS)
76 return -EINVAL;
77 mutex_lock(&damon_ops_lock);
78 /* Fail for already registered ops */
79 if (__damon_is_registered_ops(ops->id)) {
80 err = -EINVAL;
81 goto out;
82 }
83 damon_registered_ops[ops->id] = *ops;
84out:
85 mutex_unlock(&damon_ops_lock);
86 return err;
87}
88
89/**
90 * damon_select_ops() - Select a monitoring operations to use with the context.
91 * @ctx: monitoring context to use the operations.
92 * @id: id of the registered monitoring operations to select.
93 *
94 * This function finds registered monitoring operations set of @id and make
95 * @ctx to use it.
96 *
97 * Return: 0 on success, negative error code otherwise.
98 */
99int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
100{
101 int err = 0;
102
103 if (id >= NR_DAMON_OPS)
104 return -EINVAL;
105
106 mutex_lock(&damon_ops_lock);
107 if (!__damon_is_registered_ops(id))
108 err = -EINVAL;
109 else
110 ctx->ops = damon_registered_ops[id];
111 mutex_unlock(&damon_ops_lock);
112 return err;
113}
114
115/*
116 * Construct a damon_region struct
117 *
118 * Returns the pointer to the new struct if success, or NULL otherwise
119 */
120struct damon_region *damon_new_region(unsigned long start, unsigned long end)
121{
122 struct damon_region *region;
123
124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
125 if (!region)
126 return NULL;
127
128 region->ar.start = start;
129 region->ar.end = end;
130 region->nr_accesses = 0;
131 INIT_LIST_HEAD(®ion->list);
132
133 region->age = 0;
134 region->last_nr_accesses = 0;
135
136 return region;
137}
138
139void damon_add_region(struct damon_region *r, struct damon_target *t)
140{
141 list_add_tail(&r->list, &t->regions_list);
142 t->nr_regions++;
143}
144
145static void damon_del_region(struct damon_region *r, struct damon_target *t)
146{
147 list_del(&r->list);
148 t->nr_regions--;
149}
150
151static void damon_free_region(struct damon_region *r)
152{
153 kmem_cache_free(damon_region_cache, r);
154}
155
156void damon_destroy_region(struct damon_region *r, struct damon_target *t)
157{
158 damon_del_region(r, t);
159 damon_free_region(r);
160}
161
162/*
163 * Check whether a region is intersecting an address range
164 *
165 * Returns true if it is.
166 */
167static bool damon_intersect(struct damon_region *r,
168 struct damon_addr_range *re)
169{
170 return !(r->ar.end <= re->start || re->end <= r->ar.start);
171}
172
173/*
174 * Fill holes in regions with new regions.
175 */
176static int damon_fill_regions_holes(struct damon_region *first,
177 struct damon_region *last, struct damon_target *t)
178{
179 struct damon_region *r = first;
180
181 damon_for_each_region_from(r, t) {
182 struct damon_region *next, *newr;
183
184 if (r == last)
185 break;
186 next = damon_next_region(r);
187 if (r->ar.end != next->ar.start) {
188 newr = damon_new_region(r->ar.end, next->ar.start);
189 if (!newr)
190 return -ENOMEM;
191 damon_insert_region(newr, r, next, t);
192 }
193 }
194 return 0;
195}
196
197/*
198 * damon_set_regions() - Set regions of a target for given address ranges.
199 * @t: the given target.
200 * @ranges: array of new monitoring target ranges.
201 * @nr_ranges: length of @ranges.
202 *
203 * This function adds new regions to, or modify existing regions of a
204 * monitoring target to fit in specific ranges.
205 *
206 * Return: 0 if success, or negative error code otherwise.
207 */
208int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
209 unsigned int nr_ranges)
210{
211 struct damon_region *r, *next;
212 unsigned int i;
213 int err;
214
215 /* Remove regions which are not in the new ranges */
216 damon_for_each_region_safe(r, next, t) {
217 for (i = 0; i < nr_ranges; i++) {
218 if (damon_intersect(r, &ranges[i]))
219 break;
220 }
221 if (i == nr_ranges)
222 damon_destroy_region(r, t);
223 }
224
225 r = damon_first_region(t);
226 /* Add new regions or resize existing regions to fit in the ranges */
227 for (i = 0; i < nr_ranges; i++) {
228 struct damon_region *first = NULL, *last, *newr;
229 struct damon_addr_range *range;
230
231 range = &ranges[i];
232 /* Get the first/last regions intersecting with the range */
233 damon_for_each_region_from(r, t) {
234 if (damon_intersect(r, range)) {
235 if (!first)
236 first = r;
237 last = r;
238 }
239 if (r->ar.start >= range->end)
240 break;
241 }
242 if (!first) {
243 /* no region intersects with this range */
244 newr = damon_new_region(
245 ALIGN_DOWN(range->start,
246 DAMON_MIN_REGION),
247 ALIGN(range->end, DAMON_MIN_REGION));
248 if (!newr)
249 return -ENOMEM;
250 damon_insert_region(newr, damon_prev_region(r), r, t);
251 } else {
252 /* resize intersecting regions to fit in this range */
253 first->ar.start = ALIGN_DOWN(range->start,
254 DAMON_MIN_REGION);
255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
256
257 /* fill possible holes in the range */
258 err = damon_fill_regions_holes(first, last, t);
259 if (err)
260 return err;
261 }
262 }
263 return 0;
264}
265
266/* initialize private fields of damos_quota and return the pointer */
267static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
268{
269 quota->total_charged_sz = 0;
270 quota->total_charged_ns = 0;
271 quota->esz = 0;
272 quota->charged_sz = 0;
273 quota->charged_from = 0;
274 quota->charge_target_from = NULL;
275 quota->charge_addr_from = 0;
276 return quota;
277}
278
279struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
280 enum damos_action action, struct damos_quota *quota,
281 struct damos_watermarks *wmarks)
282{
283 struct damos *scheme;
284
285 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
286 if (!scheme)
287 return NULL;
288 scheme->pattern = *pattern;
289 scheme->action = action;
290 scheme->stat = (struct damos_stat){};
291 INIT_LIST_HEAD(&scheme->list);
292
293 scheme->quota = *(damos_quota_init_priv(quota));
294
295 scheme->wmarks = *wmarks;
296 scheme->wmarks.activated = true;
297
298 return scheme;
299}
300
301void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
302{
303 list_add_tail(&s->list, &ctx->schemes);
304}
305
306static void damon_del_scheme(struct damos *s)
307{
308 list_del(&s->list);
309}
310
311static void damon_free_scheme(struct damos *s)
312{
313 kfree(s);
314}
315
316void damon_destroy_scheme(struct damos *s)
317{
318 damon_del_scheme(s);
319 damon_free_scheme(s);
320}
321
322/*
323 * Construct a damon_target struct
324 *
325 * Returns the pointer to the new struct if success, or NULL otherwise
326 */
327struct damon_target *damon_new_target(void)
328{
329 struct damon_target *t;
330
331 t = kmalloc(sizeof(*t), GFP_KERNEL);
332 if (!t)
333 return NULL;
334
335 t->pid = NULL;
336 t->nr_regions = 0;
337 INIT_LIST_HEAD(&t->regions_list);
338 INIT_LIST_HEAD(&t->list);
339
340 return t;
341}
342
343void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
344{
345 list_add_tail(&t->list, &ctx->adaptive_targets);
346}
347
348bool damon_targets_empty(struct damon_ctx *ctx)
349{
350 return list_empty(&ctx->adaptive_targets);
351}
352
353static void damon_del_target(struct damon_target *t)
354{
355 list_del(&t->list);
356}
357
358void damon_free_target(struct damon_target *t)
359{
360 struct damon_region *r, *next;
361
362 damon_for_each_region_safe(r, next, t)
363 damon_free_region(r);
364 kfree(t);
365}
366
367void damon_destroy_target(struct damon_target *t)
368{
369 damon_del_target(t);
370 damon_free_target(t);
371}
372
373unsigned int damon_nr_regions(struct damon_target *t)
374{
375 return t->nr_regions;
376}
377
378struct damon_ctx *damon_new_ctx(void)
379{
380 struct damon_ctx *ctx;
381
382 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
383 if (!ctx)
384 return NULL;
385
386 ctx->attrs.sample_interval = 5 * 1000;
387 ctx->attrs.aggr_interval = 100 * 1000;
388 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
389
390 ktime_get_coarse_ts64(&ctx->last_aggregation);
391 ctx->last_ops_update = ctx->last_aggregation;
392
393 mutex_init(&ctx->kdamond_lock);
394
395 ctx->attrs.min_nr_regions = 10;
396 ctx->attrs.max_nr_regions = 1000;
397
398 INIT_LIST_HEAD(&ctx->adaptive_targets);
399 INIT_LIST_HEAD(&ctx->schemes);
400
401 return ctx;
402}
403
404static void damon_destroy_targets(struct damon_ctx *ctx)
405{
406 struct damon_target *t, *next_t;
407
408 if (ctx->ops.cleanup) {
409 ctx->ops.cleanup(ctx);
410 return;
411 }
412
413 damon_for_each_target_safe(t, next_t, ctx)
414 damon_destroy_target(t);
415}
416
417void damon_destroy_ctx(struct damon_ctx *ctx)
418{
419 struct damos *s, *next_s;
420
421 damon_destroy_targets(ctx);
422
423 damon_for_each_scheme_safe(s, next_s, ctx)
424 damon_destroy_scheme(s);
425
426 kfree(ctx);
427}
428
429/**
430 * damon_set_attrs() - Set attributes for the monitoring.
431 * @ctx: monitoring context
432 * @attrs: monitoring attributes
433 *
434 * This function should not be called while the kdamond is running.
435 * Every time interval is in micro-seconds.
436 *
437 * Return: 0 on success, negative error code otherwise.
438 */
439int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
440{
441 if (attrs->min_nr_regions < 3)
442 return -EINVAL;
443 if (attrs->min_nr_regions > attrs->max_nr_regions)
444 return -EINVAL;
445
446 ctx->attrs = *attrs;
447 return 0;
448}
449
450/**
451 * damon_set_schemes() - Set data access monitoring based operation schemes.
452 * @ctx: monitoring context
453 * @schemes: array of the schemes
454 * @nr_schemes: number of entries in @schemes
455 *
456 * This function should not be called while the kdamond of the context is
457 * running.
458 */
459void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
460 ssize_t nr_schemes)
461{
462 struct damos *s, *next;
463 ssize_t i;
464
465 damon_for_each_scheme_safe(s, next, ctx)
466 damon_destroy_scheme(s);
467 for (i = 0; i < nr_schemes; i++)
468 damon_add_scheme(ctx, schemes[i]);
469}
470
471/**
472 * damon_nr_running_ctxs() - Return number of currently running contexts.
473 */
474int damon_nr_running_ctxs(void)
475{
476 int nr_ctxs;
477
478 mutex_lock(&damon_lock);
479 nr_ctxs = nr_running_ctxs;
480 mutex_unlock(&damon_lock);
481
482 return nr_ctxs;
483}
484
485/* Returns the size upper limit for each monitoring region */
486static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
487{
488 struct damon_target *t;
489 struct damon_region *r;
490 unsigned long sz = 0;
491
492 damon_for_each_target(t, ctx) {
493 damon_for_each_region(r, t)
494 sz += damon_sz_region(r);
495 }
496
497 if (ctx->attrs.min_nr_regions)
498 sz /= ctx->attrs.min_nr_regions;
499 if (sz < DAMON_MIN_REGION)
500 sz = DAMON_MIN_REGION;
501
502 return sz;
503}
504
505static int kdamond_fn(void *data);
506
507/*
508 * __damon_start() - Starts monitoring with given context.
509 * @ctx: monitoring context
510 *
511 * This function should be called while damon_lock is hold.
512 *
513 * Return: 0 on success, negative error code otherwise.
514 */
515static int __damon_start(struct damon_ctx *ctx)
516{
517 int err = -EBUSY;
518
519 mutex_lock(&ctx->kdamond_lock);
520 if (!ctx->kdamond) {
521 err = 0;
522 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
523 nr_running_ctxs);
524 if (IS_ERR(ctx->kdamond)) {
525 err = PTR_ERR(ctx->kdamond);
526 ctx->kdamond = NULL;
527 }
528 }
529 mutex_unlock(&ctx->kdamond_lock);
530
531 return err;
532}
533
534/**
535 * damon_start() - Starts the monitorings for a given group of contexts.
536 * @ctxs: an array of the pointers for contexts to start monitoring
537 * @nr_ctxs: size of @ctxs
538 * @exclusive: exclusiveness of this contexts group
539 *
540 * This function starts a group of monitoring threads for a group of monitoring
541 * contexts. One thread per each context is created and run in parallel. The
542 * caller should handle synchronization between the threads by itself. If
543 * @exclusive is true and a group of threads that created by other
544 * 'damon_start()' call is currently running, this function does nothing but
545 * returns -EBUSY.
546 *
547 * Return: 0 on success, negative error code otherwise.
548 */
549int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
550{
551 int i;
552 int err = 0;
553
554 mutex_lock(&damon_lock);
555 if ((exclusive && nr_running_ctxs) ||
556 (!exclusive && running_exclusive_ctxs)) {
557 mutex_unlock(&damon_lock);
558 return -EBUSY;
559 }
560
561 for (i = 0; i < nr_ctxs; i++) {
562 err = __damon_start(ctxs[i]);
563 if (err)
564 break;
565 nr_running_ctxs++;
566 }
567 if (exclusive && nr_running_ctxs)
568 running_exclusive_ctxs = true;
569 mutex_unlock(&damon_lock);
570
571 return err;
572}
573
574/*
575 * __damon_stop() - Stops monitoring of a given context.
576 * @ctx: monitoring context
577 *
578 * Return: 0 on success, negative error code otherwise.
579 */
580static int __damon_stop(struct damon_ctx *ctx)
581{
582 struct task_struct *tsk;
583
584 mutex_lock(&ctx->kdamond_lock);
585 tsk = ctx->kdamond;
586 if (tsk) {
587 get_task_struct(tsk);
588 mutex_unlock(&ctx->kdamond_lock);
589 kthread_stop(tsk);
590 put_task_struct(tsk);
591 return 0;
592 }
593 mutex_unlock(&ctx->kdamond_lock);
594
595 return -EPERM;
596}
597
598/**
599 * damon_stop() - Stops the monitorings for a given group of contexts.
600 * @ctxs: an array of the pointers for contexts to stop monitoring
601 * @nr_ctxs: size of @ctxs
602 *
603 * Return: 0 on success, negative error code otherwise.
604 */
605int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
606{
607 int i, err = 0;
608
609 for (i = 0; i < nr_ctxs; i++) {
610 /* nr_running_ctxs is decremented in kdamond_fn */
611 err = __damon_stop(ctxs[i]);
612 if (err)
613 break;
614 }
615 return err;
616}
617
618/*
619 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
620 * @baseline: the time to check whether the interval has elapsed since
621 * @interval: the time interval (microseconds)
622 *
623 * See whether the given time interval has passed since the given baseline
624 * time. If so, it also updates the baseline to current time for next check.
625 *
626 * Return: true if the time interval has passed, or false otherwise.
627 */
628static bool damon_check_reset_time_interval(struct timespec64 *baseline,
629 unsigned long interval)
630{
631 struct timespec64 now;
632
633 ktime_get_coarse_ts64(&now);
634 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
635 interval * 1000)
636 return false;
637 *baseline = now;
638 return true;
639}
640
641/*
642 * Check whether it is time to flush the aggregated information
643 */
644static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
645{
646 return damon_check_reset_time_interval(&ctx->last_aggregation,
647 ctx->attrs.aggr_interval);
648}
649
650/*
651 * Reset the aggregated monitoring results ('nr_accesses' of each region).
652 */
653static void kdamond_reset_aggregated(struct damon_ctx *c)
654{
655 struct damon_target *t;
656 unsigned int ti = 0; /* target's index */
657
658 damon_for_each_target(t, c) {
659 struct damon_region *r;
660
661 damon_for_each_region(r, t) {
662 trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
663 r->last_nr_accesses = r->nr_accesses;
664 r->nr_accesses = 0;
665 }
666 ti++;
667 }
668}
669
670static void damon_split_region_at(struct damon_target *t,
671 struct damon_region *r, unsigned long sz_r);
672
673static bool __damos_valid_target(struct damon_region *r, struct damos *s)
674{
675 unsigned long sz;
676
677 sz = damon_sz_region(r);
678 return s->pattern.min_sz_region <= sz &&
679 sz <= s->pattern.max_sz_region &&
680 s->pattern.min_nr_accesses <= r->nr_accesses &&
681 r->nr_accesses <= s->pattern.max_nr_accesses &&
682 s->pattern.min_age_region <= r->age &&
683 r->age <= s->pattern.max_age_region;
684}
685
686static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
687 struct damon_region *r, struct damos *s)
688{
689 bool ret = __damos_valid_target(r, s);
690
691 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
692 return ret;
693
694 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
695}
696
697/*
698 * damos_skip_charged_region() - Check if the given region or starting part of
699 * it is already charged for the DAMOS quota.
700 * @t: The target of the region.
701 * @rp: The pointer to the region.
702 * @s: The scheme to be applied.
703 *
704 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
705 * action would applied to only a part of the target access pattern fulfilling
706 * regions. To avoid applying the scheme action to only already applied
707 * regions, DAMON skips applying the scheme action to the regions that charged
708 * in the previous charge window.
709 *
710 * This function checks if a given region should be skipped or not for the
711 * reason. If only the starting part of the region has previously charged,
712 * this function splits the region into two so that the second one covers the
713 * area that not charged in the previous charge widnow and saves the second
714 * region in *rp and returns false, so that the caller can apply DAMON action
715 * to the second one.
716 *
717 * Return: true if the region should be entirely skipped, false otherwise.
718 */
719static bool damos_skip_charged_region(struct damon_target *t,
720 struct damon_region **rp, struct damos *s)
721{
722 struct damon_region *r = *rp;
723 struct damos_quota *quota = &s->quota;
724 unsigned long sz_to_skip;
725
726 /* Skip previously charged regions */
727 if (quota->charge_target_from) {
728 if (t != quota->charge_target_from)
729 return true;
730 if (r == damon_last_region(t)) {
731 quota->charge_target_from = NULL;
732 quota->charge_addr_from = 0;
733 return true;
734 }
735 if (quota->charge_addr_from &&
736 r->ar.end <= quota->charge_addr_from)
737 return true;
738
739 if (quota->charge_addr_from && r->ar.start <
740 quota->charge_addr_from) {
741 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
742 r->ar.start, DAMON_MIN_REGION);
743 if (!sz_to_skip) {
744 if (damon_sz_region(r) <= DAMON_MIN_REGION)
745 return true;
746 sz_to_skip = DAMON_MIN_REGION;
747 }
748 damon_split_region_at(t, r, sz_to_skip);
749 r = damon_next_region(r);
750 *rp = r;
751 }
752 quota->charge_target_from = NULL;
753 quota->charge_addr_from = 0;
754 }
755 return false;
756}
757
758static void damos_update_stat(struct damos *s,
759 unsigned long sz_tried, unsigned long sz_applied)
760{
761 s->stat.nr_tried++;
762 s->stat.sz_tried += sz_tried;
763 if (sz_applied)
764 s->stat.nr_applied++;
765 s->stat.sz_applied += sz_applied;
766}
767
768static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
769 struct damon_region *r, struct damos *s)
770{
771 struct damos_quota *quota = &s->quota;
772 unsigned long sz = damon_sz_region(r);
773 struct timespec64 begin, end;
774 unsigned long sz_applied = 0;
775 int err = 0;
776
777 if (c->ops.apply_scheme) {
778 if (quota->esz && quota->charged_sz + sz > quota->esz) {
779 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
780 DAMON_MIN_REGION);
781 if (!sz)
782 goto update_stat;
783 damon_split_region_at(t, r, sz);
784 }
785 ktime_get_coarse_ts64(&begin);
786 if (c->callback.before_damos_apply)
787 err = c->callback.before_damos_apply(c, t, r, s);
788 if (!err)
789 sz_applied = c->ops.apply_scheme(c, t, r, s);
790 ktime_get_coarse_ts64(&end);
791 quota->total_charged_ns += timespec64_to_ns(&end) -
792 timespec64_to_ns(&begin);
793 quota->charged_sz += sz;
794 if (quota->esz && quota->charged_sz >= quota->esz) {
795 quota->charge_target_from = t;
796 quota->charge_addr_from = r->ar.end + 1;
797 }
798 }
799 if (s->action != DAMOS_STAT)
800 r->age = 0;
801
802update_stat:
803 damos_update_stat(s, sz, sz_applied);
804}
805
806static void damon_do_apply_schemes(struct damon_ctx *c,
807 struct damon_target *t,
808 struct damon_region *r)
809{
810 struct damos *s;
811
812 damon_for_each_scheme(s, c) {
813 struct damos_quota *quota = &s->quota;
814
815 if (!s->wmarks.activated)
816 continue;
817
818 /* Check the quota */
819 if (quota->esz && quota->charged_sz >= quota->esz)
820 continue;
821
822 if (damos_skip_charged_region(t, &r, s))
823 continue;
824
825 if (!damos_valid_target(c, t, r, s))
826 continue;
827
828 damos_apply_scheme(c, t, r, s);
829 }
830}
831
832/* Shouldn't be called if quota->ms and quota->sz are zero */
833static void damos_set_effective_quota(struct damos_quota *quota)
834{
835 unsigned long throughput;
836 unsigned long esz;
837
838 if (!quota->ms) {
839 quota->esz = quota->sz;
840 return;
841 }
842
843 if (quota->total_charged_ns)
844 throughput = quota->total_charged_sz * 1000000 /
845 quota->total_charged_ns;
846 else
847 throughput = PAGE_SIZE * 1024;
848 esz = throughput * quota->ms;
849
850 if (quota->sz && quota->sz < esz)
851 esz = quota->sz;
852 quota->esz = esz;
853}
854
855static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
856{
857 struct damos_quota *quota = &s->quota;
858 struct damon_target *t;
859 struct damon_region *r;
860 unsigned long cumulated_sz;
861 unsigned int score, max_score = 0;
862
863 if (!quota->ms && !quota->sz)
864 return;
865
866 /* New charge window starts */
867 if (time_after_eq(jiffies, quota->charged_from +
868 msecs_to_jiffies(quota->reset_interval))) {
869 if (quota->esz && quota->charged_sz >= quota->esz)
870 s->stat.qt_exceeds++;
871 quota->total_charged_sz += quota->charged_sz;
872 quota->charged_from = jiffies;
873 quota->charged_sz = 0;
874 damos_set_effective_quota(quota);
875 }
876
877 if (!c->ops.get_scheme_score)
878 return;
879
880 /* Fill up the score histogram */
881 memset(quota->histogram, 0, sizeof(quota->histogram));
882 damon_for_each_target(t, c) {
883 damon_for_each_region(r, t) {
884 if (!__damos_valid_target(r, s))
885 continue;
886 score = c->ops.get_scheme_score(c, t, r, s);
887 quota->histogram[score] += damon_sz_region(r);
888 if (score > max_score)
889 max_score = score;
890 }
891 }
892
893 /* Set the min score limit */
894 for (cumulated_sz = 0, score = max_score; ; score--) {
895 cumulated_sz += quota->histogram[score];
896 if (cumulated_sz >= quota->esz || !score)
897 break;
898 }
899 quota->min_score = score;
900}
901
902static void kdamond_apply_schemes(struct damon_ctx *c)
903{
904 struct damon_target *t;
905 struct damon_region *r, *next_r;
906 struct damos *s;
907
908 damon_for_each_scheme(s, c) {
909 if (!s->wmarks.activated)
910 continue;
911
912 damos_adjust_quota(c, s);
913 }
914
915 damon_for_each_target(t, c) {
916 damon_for_each_region_safe(r, next_r, t)
917 damon_do_apply_schemes(c, t, r);
918 }
919}
920
921/*
922 * Merge two adjacent regions into one region
923 */
924static void damon_merge_two_regions(struct damon_target *t,
925 struct damon_region *l, struct damon_region *r)
926{
927 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
928
929 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
930 (sz_l + sz_r);
931 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
932 l->ar.end = r->ar.end;
933 damon_destroy_region(r, t);
934}
935
936/*
937 * Merge adjacent regions having similar access frequencies
938 *
939 * t target affected by this merge operation
940 * thres '->nr_accesses' diff threshold for the merge
941 * sz_limit size upper limit of each region
942 */
943static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
944 unsigned long sz_limit)
945{
946 struct damon_region *r, *prev = NULL, *next;
947
948 damon_for_each_region_safe(r, next, t) {
949 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
950 r->age = 0;
951 else
952 r->age++;
953
954 if (prev && prev->ar.end == r->ar.start &&
955 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
956 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
957 damon_merge_two_regions(t, prev, r);
958 else
959 prev = r;
960 }
961}
962
963/*
964 * Merge adjacent regions having similar access frequencies
965 *
966 * threshold '->nr_accesses' diff threshold for the merge
967 * sz_limit size upper limit of each region
968 *
969 * This function merges monitoring target regions which are adjacent and their
970 * access frequencies are similar. This is for minimizing the monitoring
971 * overhead under the dynamically changeable access pattern. If a merge was
972 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
973 */
974static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
975 unsigned long sz_limit)
976{
977 struct damon_target *t;
978
979 damon_for_each_target(t, c)
980 damon_merge_regions_of(t, threshold, sz_limit);
981}
982
983/*
984 * Split a region in two
985 *
986 * r the region to be split
987 * sz_r size of the first sub-region that will be made
988 */
989static void damon_split_region_at(struct damon_target *t,
990 struct damon_region *r, unsigned long sz_r)
991{
992 struct damon_region *new;
993
994 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
995 if (!new)
996 return;
997
998 r->ar.end = new->ar.start;
999
1000 new->age = r->age;
1001 new->last_nr_accesses = r->last_nr_accesses;
1002
1003 damon_insert_region(new, r, damon_next_region(r), t);
1004}
1005
1006/* Split every region in the given target into 'nr_subs' regions */
1007static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1008{
1009 struct damon_region *r, *next;
1010 unsigned long sz_region, sz_sub = 0;
1011 int i;
1012
1013 damon_for_each_region_safe(r, next, t) {
1014 sz_region = damon_sz_region(r);
1015
1016 for (i = 0; i < nr_subs - 1 &&
1017 sz_region > 2 * DAMON_MIN_REGION; i++) {
1018 /*
1019 * Randomly select size of left sub-region to be at
1020 * least 10 percent and at most 90% of original region
1021 */
1022 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1023 sz_region / 10, DAMON_MIN_REGION);
1024 /* Do not allow blank region */
1025 if (sz_sub == 0 || sz_sub >= sz_region)
1026 continue;
1027
1028 damon_split_region_at(t, r, sz_sub);
1029 sz_region = sz_sub;
1030 }
1031 }
1032}
1033
1034/*
1035 * Split every target region into randomly-sized small regions
1036 *
1037 * This function splits every target region into random-sized small regions if
1038 * current total number of the regions is equal or smaller than half of the
1039 * user-specified maximum number of regions. This is for maximizing the
1040 * monitoring accuracy under the dynamically changeable access patterns. If a
1041 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1042 * it.
1043 */
1044static void kdamond_split_regions(struct damon_ctx *ctx)
1045{
1046 struct damon_target *t;
1047 unsigned int nr_regions = 0;
1048 static unsigned int last_nr_regions;
1049 int nr_subregions = 2;
1050
1051 damon_for_each_target(t, ctx)
1052 nr_regions += damon_nr_regions(t);
1053
1054 if (nr_regions > ctx->attrs.max_nr_regions / 2)
1055 return;
1056
1057 /* Maybe the middle of the region has different access frequency */
1058 if (last_nr_regions == nr_regions &&
1059 nr_regions < ctx->attrs.max_nr_regions / 3)
1060 nr_subregions = 3;
1061
1062 damon_for_each_target(t, ctx)
1063 damon_split_regions_of(t, nr_subregions);
1064
1065 last_nr_regions = nr_regions;
1066}
1067
1068/*
1069 * Check whether it is time to check and apply the operations-related data
1070 * structures.
1071 *
1072 * Returns true if it is.
1073 */
1074static bool kdamond_need_update_operations(struct damon_ctx *ctx)
1075{
1076 return damon_check_reset_time_interval(&ctx->last_ops_update,
1077 ctx->attrs.ops_update_interval);
1078}
1079
1080/*
1081 * Check whether current monitoring should be stopped
1082 *
1083 * The monitoring is stopped when either the user requested to stop, or all
1084 * monitoring targets are invalid.
1085 *
1086 * Returns true if need to stop current monitoring.
1087 */
1088static bool kdamond_need_stop(struct damon_ctx *ctx)
1089{
1090 struct damon_target *t;
1091
1092 if (kthread_should_stop())
1093 return true;
1094
1095 if (!ctx->ops.target_valid)
1096 return false;
1097
1098 damon_for_each_target(t, ctx) {
1099 if (ctx->ops.target_valid(t))
1100 return false;
1101 }
1102
1103 return true;
1104}
1105
1106static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
1107{
1108 struct sysinfo i;
1109
1110 switch (metric) {
1111 case DAMOS_WMARK_FREE_MEM_RATE:
1112 si_meminfo(&i);
1113 return i.freeram * 1000 / i.totalram;
1114 default:
1115 break;
1116 }
1117 return -EINVAL;
1118}
1119
1120/*
1121 * Returns zero if the scheme is active. Else, returns time to wait for next
1122 * watermark check in micro-seconds.
1123 */
1124static unsigned long damos_wmark_wait_us(struct damos *scheme)
1125{
1126 unsigned long metric;
1127
1128 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
1129 return 0;
1130
1131 metric = damos_wmark_metric_value(scheme->wmarks.metric);
1132 /* higher than high watermark or lower than low watermark */
1133 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1134 if (scheme->wmarks.activated)
1135 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1136 scheme->action,
1137 metric > scheme->wmarks.high ?
1138 "high" : "low");
1139 scheme->wmarks.activated = false;
1140 return scheme->wmarks.interval;
1141 }
1142
1143 /* inactive and higher than middle watermark */
1144 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1145 !scheme->wmarks.activated)
1146 return scheme->wmarks.interval;
1147
1148 if (!scheme->wmarks.activated)
1149 pr_debug("activate a scheme (%d)\n", scheme->action);
1150 scheme->wmarks.activated = true;
1151 return 0;
1152}
1153
1154static void kdamond_usleep(unsigned long usecs)
1155{
1156 /* See Documentation/timers/timers-howto.rst for the thresholds */
1157 if (usecs > 20 * USEC_PER_MSEC)
1158 schedule_timeout_idle(usecs_to_jiffies(usecs));
1159 else
1160 usleep_idle_range(usecs, usecs + 1);
1161}
1162
1163/* Returns negative error code if it's not activated but should return */
1164static int kdamond_wait_activation(struct damon_ctx *ctx)
1165{
1166 struct damos *s;
1167 unsigned long wait_time;
1168 unsigned long min_wait_time = 0;
1169 bool init_wait_time = false;
1170
1171 while (!kdamond_need_stop(ctx)) {
1172 damon_for_each_scheme(s, ctx) {
1173 wait_time = damos_wmark_wait_us(s);
1174 if (!init_wait_time || wait_time < min_wait_time) {
1175 init_wait_time = true;
1176 min_wait_time = wait_time;
1177 }
1178 }
1179 if (!min_wait_time)
1180 return 0;
1181
1182 kdamond_usleep(min_wait_time);
1183
1184 if (ctx->callback.after_wmarks_check &&
1185 ctx->callback.after_wmarks_check(ctx))
1186 break;
1187 }
1188 return -EBUSY;
1189}
1190
1191/*
1192 * The monitoring daemon that runs as a kernel thread
1193 */
1194static int kdamond_fn(void *data)
1195{
1196 struct damon_ctx *ctx = data;
1197 struct damon_target *t;
1198 struct damon_region *r, *next;
1199 unsigned int max_nr_accesses = 0;
1200 unsigned long sz_limit = 0;
1201
1202 pr_debug("kdamond (%d) starts\n", current->pid);
1203
1204 if (ctx->ops.init)
1205 ctx->ops.init(ctx);
1206 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1207 goto done;
1208
1209 sz_limit = damon_region_sz_limit(ctx);
1210
1211 while (!kdamond_need_stop(ctx)) {
1212 if (kdamond_wait_activation(ctx))
1213 break;
1214
1215 if (ctx->ops.prepare_access_checks)
1216 ctx->ops.prepare_access_checks(ctx);
1217 if (ctx->callback.after_sampling &&
1218 ctx->callback.after_sampling(ctx))
1219 break;
1220
1221 kdamond_usleep(ctx->attrs.sample_interval);
1222
1223 if (ctx->ops.check_accesses)
1224 max_nr_accesses = ctx->ops.check_accesses(ctx);
1225
1226 if (kdamond_aggregate_interval_passed(ctx)) {
1227 kdamond_merge_regions(ctx,
1228 max_nr_accesses / 10,
1229 sz_limit);
1230 if (ctx->callback.after_aggregation &&
1231 ctx->callback.after_aggregation(ctx))
1232 break;
1233 kdamond_apply_schemes(ctx);
1234 kdamond_reset_aggregated(ctx);
1235 kdamond_split_regions(ctx);
1236 if (ctx->ops.reset_aggregated)
1237 ctx->ops.reset_aggregated(ctx);
1238 }
1239
1240 if (kdamond_need_update_operations(ctx)) {
1241 if (ctx->ops.update)
1242 ctx->ops.update(ctx);
1243 sz_limit = damon_region_sz_limit(ctx);
1244 }
1245 }
1246done:
1247 damon_for_each_target(t, ctx) {
1248 damon_for_each_region_safe(r, next, t)
1249 damon_destroy_region(r, t);
1250 }
1251
1252 if (ctx->callback.before_terminate)
1253 ctx->callback.before_terminate(ctx);
1254 if (ctx->ops.cleanup)
1255 ctx->ops.cleanup(ctx);
1256
1257 pr_debug("kdamond (%d) finishes\n", current->pid);
1258 mutex_lock(&ctx->kdamond_lock);
1259 ctx->kdamond = NULL;
1260 mutex_unlock(&ctx->kdamond_lock);
1261
1262 mutex_lock(&damon_lock);
1263 nr_running_ctxs--;
1264 if (!nr_running_ctxs && running_exclusive_ctxs)
1265 running_exclusive_ctxs = false;
1266 mutex_unlock(&damon_lock);
1267
1268 return 0;
1269}
1270
1271/*
1272 * struct damon_system_ram_region - System RAM resource address region of
1273 * [@start, @end).
1274 * @start: Start address of the region (inclusive).
1275 * @end: End address of the region (exclusive).
1276 */
1277struct damon_system_ram_region {
1278 unsigned long start;
1279 unsigned long end;
1280};
1281
1282static int walk_system_ram(struct resource *res, void *arg)
1283{
1284 struct damon_system_ram_region *a = arg;
1285
1286 if (a->end - a->start < resource_size(res)) {
1287 a->start = res->start;
1288 a->end = res->end;
1289 }
1290 return 0;
1291}
1292
1293/*
1294 * Find biggest 'System RAM' resource and store its start and end address in
1295 * @start and @end, respectively. If no System RAM is found, returns false.
1296 */
1297static bool damon_find_biggest_system_ram(unsigned long *start,
1298 unsigned long *end)
1299
1300{
1301 struct damon_system_ram_region arg = {};
1302
1303 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
1304 if (arg.end <= arg.start)
1305 return false;
1306
1307 *start = arg.start;
1308 *end = arg.end;
1309 return true;
1310}
1311
1312/**
1313 * damon_set_region_biggest_system_ram_default() - Set the region of the given
1314 * monitoring target as requested, or biggest 'System RAM'.
1315 * @t: The monitoring target to set the region.
1316 * @start: The pointer to the start address of the region.
1317 * @end: The pointer to the end address of the region.
1318 *
1319 * This function sets the region of @t as requested by @start and @end. If the
1320 * values of @start and @end are zero, however, this function finds the biggest
1321 * 'System RAM' resource and sets the region to cover the resource. In the
1322 * latter case, this function saves the start and end addresses of the resource
1323 * in @start and @end, respectively.
1324 *
1325 * Return: 0 on success, negative error code otherwise.
1326 */
1327int damon_set_region_biggest_system_ram_default(struct damon_target *t,
1328 unsigned long *start, unsigned long *end)
1329{
1330 struct damon_addr_range addr_range;
1331
1332 if (*start > *end)
1333 return -EINVAL;
1334
1335 if (!*start && !*end &&
1336 !damon_find_biggest_system_ram(start, end))
1337 return -EINVAL;
1338
1339 addr_range.start = *start;
1340 addr_range.end = *end;
1341 return damon_set_regions(t, &addr_range, 1);
1342}
1343
1344static int __init damon_init(void)
1345{
1346 damon_region_cache = KMEM_CACHE(damon_region, 0);
1347 if (unlikely(!damon_region_cache)) {
1348 pr_err("creating damon_region_cache fails\n");
1349 return -ENOMEM;
1350 }
1351
1352 return 0;
1353}
1354
1355subsys_initcall(damon_init);
1356
1357#include "core-test.h"
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon: " fmt
9
10#include <linux/damon.h>
11#include <linux/delay.h>
12#include <linux/kthread.h>
13#include <linux/mm.h>
14#include <linux/psi.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17
18#define CREATE_TRACE_POINTS
19#include <trace/events/damon.h>
20
21#ifdef CONFIG_DAMON_KUNIT_TEST
22#undef DAMON_MIN_REGION
23#define DAMON_MIN_REGION 1
24#endif
25
26static DEFINE_MUTEX(damon_lock);
27static int nr_running_ctxs;
28static bool running_exclusive_ctxs;
29
30static DEFINE_MUTEX(damon_ops_lock);
31static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
32
33static struct kmem_cache *damon_region_cache __ro_after_init;
34
35/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
36static bool __damon_is_registered_ops(enum damon_ops_id id)
37{
38 struct damon_operations empty_ops = {};
39
40 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
41 return false;
42 return true;
43}
44
45/**
46 * damon_is_registered_ops() - Check if a given damon_operations is registered.
47 * @id: Id of the damon_operations to check if registered.
48 *
49 * Return: true if the ops is set, false otherwise.
50 */
51bool damon_is_registered_ops(enum damon_ops_id id)
52{
53 bool registered;
54
55 if (id >= NR_DAMON_OPS)
56 return false;
57 mutex_lock(&damon_ops_lock);
58 registered = __damon_is_registered_ops(id);
59 mutex_unlock(&damon_ops_lock);
60 return registered;
61}
62
63/**
64 * damon_register_ops() - Register a monitoring operations set to DAMON.
65 * @ops: monitoring operations set to register.
66 *
67 * This function registers a monitoring operations set of valid &struct
68 * damon_operations->id so that others can find and use them later.
69 *
70 * Return: 0 on success, negative error code otherwise.
71 */
72int damon_register_ops(struct damon_operations *ops)
73{
74 int err = 0;
75
76 if (ops->id >= NR_DAMON_OPS)
77 return -EINVAL;
78 mutex_lock(&damon_ops_lock);
79 /* Fail for already registered ops */
80 if (__damon_is_registered_ops(ops->id)) {
81 err = -EINVAL;
82 goto out;
83 }
84 damon_registered_ops[ops->id] = *ops;
85out:
86 mutex_unlock(&damon_ops_lock);
87 return err;
88}
89
90/**
91 * damon_select_ops() - Select a monitoring operations to use with the context.
92 * @ctx: monitoring context to use the operations.
93 * @id: id of the registered monitoring operations to select.
94 *
95 * This function finds registered monitoring operations set of @id and make
96 * @ctx to use it.
97 *
98 * Return: 0 on success, negative error code otherwise.
99 */
100int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101{
102 int err = 0;
103
104 if (id >= NR_DAMON_OPS)
105 return -EINVAL;
106
107 mutex_lock(&damon_ops_lock);
108 if (!__damon_is_registered_ops(id))
109 err = -EINVAL;
110 else
111 ctx->ops = damon_registered_ops[id];
112 mutex_unlock(&damon_ops_lock);
113 return err;
114}
115
116/*
117 * Construct a damon_region struct
118 *
119 * Returns the pointer to the new struct if success, or NULL otherwise
120 */
121struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122{
123 struct damon_region *region;
124
125 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 if (!region)
127 return NULL;
128
129 region->ar.start = start;
130 region->ar.end = end;
131 region->nr_accesses = 0;
132 region->nr_accesses_bp = 0;
133 INIT_LIST_HEAD(®ion->list);
134
135 region->age = 0;
136 region->last_nr_accesses = 0;
137
138 return region;
139}
140
141void damon_add_region(struct damon_region *r, struct damon_target *t)
142{
143 list_add_tail(&r->list, &t->regions_list);
144 t->nr_regions++;
145}
146
147static void damon_del_region(struct damon_region *r, struct damon_target *t)
148{
149 list_del(&r->list);
150 t->nr_regions--;
151}
152
153static void damon_free_region(struct damon_region *r)
154{
155 kmem_cache_free(damon_region_cache, r);
156}
157
158void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159{
160 damon_del_region(r, t);
161 damon_free_region(r);
162}
163
164/*
165 * Check whether a region is intersecting an address range
166 *
167 * Returns true if it is.
168 */
169static bool damon_intersect(struct damon_region *r,
170 struct damon_addr_range *re)
171{
172 return !(r->ar.end <= re->start || re->end <= r->ar.start);
173}
174
175/*
176 * Fill holes in regions with new regions.
177 */
178static int damon_fill_regions_holes(struct damon_region *first,
179 struct damon_region *last, struct damon_target *t)
180{
181 struct damon_region *r = first;
182
183 damon_for_each_region_from(r, t) {
184 struct damon_region *next, *newr;
185
186 if (r == last)
187 break;
188 next = damon_next_region(r);
189 if (r->ar.end != next->ar.start) {
190 newr = damon_new_region(r->ar.end, next->ar.start);
191 if (!newr)
192 return -ENOMEM;
193 damon_insert_region(newr, r, next, t);
194 }
195 }
196 return 0;
197}
198
199/*
200 * damon_set_regions() - Set regions of a target for given address ranges.
201 * @t: the given target.
202 * @ranges: array of new monitoring target ranges.
203 * @nr_ranges: length of @ranges.
204 *
205 * This function adds new regions to, or modify existing regions of a
206 * monitoring target to fit in specific ranges.
207 *
208 * Return: 0 if success, or negative error code otherwise.
209 */
210int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
211 unsigned int nr_ranges)
212{
213 struct damon_region *r, *next;
214 unsigned int i;
215 int err;
216
217 /* Remove regions which are not in the new ranges */
218 damon_for_each_region_safe(r, next, t) {
219 for (i = 0; i < nr_ranges; i++) {
220 if (damon_intersect(r, &ranges[i]))
221 break;
222 }
223 if (i == nr_ranges)
224 damon_destroy_region(r, t);
225 }
226
227 r = damon_first_region(t);
228 /* Add new regions or resize existing regions to fit in the ranges */
229 for (i = 0; i < nr_ranges; i++) {
230 struct damon_region *first = NULL, *last, *newr;
231 struct damon_addr_range *range;
232
233 range = &ranges[i];
234 /* Get the first/last regions intersecting with the range */
235 damon_for_each_region_from(r, t) {
236 if (damon_intersect(r, range)) {
237 if (!first)
238 first = r;
239 last = r;
240 }
241 if (r->ar.start >= range->end)
242 break;
243 }
244 if (!first) {
245 /* no region intersects with this range */
246 newr = damon_new_region(
247 ALIGN_DOWN(range->start,
248 DAMON_MIN_REGION),
249 ALIGN(range->end, DAMON_MIN_REGION));
250 if (!newr)
251 return -ENOMEM;
252 damon_insert_region(newr, damon_prev_region(r), r, t);
253 } else {
254 /* resize intersecting regions to fit in this range */
255 first->ar.start = ALIGN_DOWN(range->start,
256 DAMON_MIN_REGION);
257 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
258
259 /* fill possible holes in the range */
260 err = damon_fill_regions_holes(first, last, t);
261 if (err)
262 return err;
263 }
264 }
265 return 0;
266}
267
268struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 bool matching)
270{
271 struct damos_filter *filter;
272
273 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
274 if (!filter)
275 return NULL;
276 filter->type = type;
277 filter->matching = matching;
278 INIT_LIST_HEAD(&filter->list);
279 return filter;
280}
281
282void damos_add_filter(struct damos *s, struct damos_filter *f)
283{
284 list_add_tail(&f->list, &s->filters);
285}
286
287static void damos_del_filter(struct damos_filter *f)
288{
289 list_del(&f->list);
290}
291
292static void damos_free_filter(struct damos_filter *f)
293{
294 kfree(f);
295}
296
297void damos_destroy_filter(struct damos_filter *f)
298{
299 damos_del_filter(f);
300 damos_free_filter(f);
301}
302
303struct damos_quota_goal *damos_new_quota_goal(
304 enum damos_quota_goal_metric metric,
305 unsigned long target_value)
306{
307 struct damos_quota_goal *goal;
308
309 goal = kmalloc(sizeof(*goal), GFP_KERNEL);
310 if (!goal)
311 return NULL;
312 goal->metric = metric;
313 goal->target_value = target_value;
314 INIT_LIST_HEAD(&goal->list);
315 return goal;
316}
317
318void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
319{
320 list_add_tail(&g->list, &q->goals);
321}
322
323static void damos_del_quota_goal(struct damos_quota_goal *g)
324{
325 list_del(&g->list);
326}
327
328static void damos_free_quota_goal(struct damos_quota_goal *g)
329{
330 kfree(g);
331}
332
333void damos_destroy_quota_goal(struct damos_quota_goal *g)
334{
335 damos_del_quota_goal(g);
336 damos_free_quota_goal(g);
337}
338
339/* initialize fields of @quota that normally API users wouldn't set */
340static struct damos_quota *damos_quota_init(struct damos_quota *quota)
341{
342 quota->esz = 0;
343 quota->total_charged_sz = 0;
344 quota->total_charged_ns = 0;
345 quota->charged_sz = 0;
346 quota->charged_from = 0;
347 quota->charge_target_from = NULL;
348 quota->charge_addr_from = 0;
349 quota->esz_bp = 0;
350 return quota;
351}
352
353struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
354 enum damos_action action,
355 unsigned long apply_interval_us,
356 struct damos_quota *quota,
357 struct damos_watermarks *wmarks,
358 int target_nid)
359{
360 struct damos *scheme;
361
362 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
363 if (!scheme)
364 return NULL;
365 scheme->pattern = *pattern;
366 scheme->action = action;
367 scheme->apply_interval_us = apply_interval_us;
368 /*
369 * next_apply_sis will be set when kdamond starts. While kdamond is
370 * running, it will also updated when it is added to the DAMON context,
371 * or damon_attrs are updated.
372 */
373 scheme->next_apply_sis = 0;
374 INIT_LIST_HEAD(&scheme->filters);
375 scheme->stat = (struct damos_stat){};
376 INIT_LIST_HEAD(&scheme->list);
377
378 scheme->quota = *(damos_quota_init(quota));
379 /* quota.goals should be separately set by caller */
380 INIT_LIST_HEAD(&scheme->quota.goals);
381
382 scheme->wmarks = *wmarks;
383 scheme->wmarks.activated = true;
384
385 scheme->target_nid = target_nid;
386
387 return scheme;
388}
389
390static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
391{
392 unsigned long sample_interval = ctx->attrs.sample_interval ?
393 ctx->attrs.sample_interval : 1;
394 unsigned long apply_interval = s->apply_interval_us ?
395 s->apply_interval_us : ctx->attrs.aggr_interval;
396
397 s->next_apply_sis = ctx->passed_sample_intervals +
398 apply_interval / sample_interval;
399}
400
401void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
402{
403 list_add_tail(&s->list, &ctx->schemes);
404 damos_set_next_apply_sis(s, ctx);
405}
406
407static void damon_del_scheme(struct damos *s)
408{
409 list_del(&s->list);
410}
411
412static void damon_free_scheme(struct damos *s)
413{
414 kfree(s);
415}
416
417void damon_destroy_scheme(struct damos *s)
418{
419 struct damos_quota_goal *g, *g_next;
420 struct damos_filter *f, *next;
421
422 damos_for_each_quota_goal_safe(g, g_next, &s->quota)
423 damos_destroy_quota_goal(g);
424
425 damos_for_each_filter_safe(f, next, s)
426 damos_destroy_filter(f);
427 damon_del_scheme(s);
428 damon_free_scheme(s);
429}
430
431/*
432 * Construct a damon_target struct
433 *
434 * Returns the pointer to the new struct if success, or NULL otherwise
435 */
436struct damon_target *damon_new_target(void)
437{
438 struct damon_target *t;
439
440 t = kmalloc(sizeof(*t), GFP_KERNEL);
441 if (!t)
442 return NULL;
443
444 t->pid = NULL;
445 t->nr_regions = 0;
446 INIT_LIST_HEAD(&t->regions_list);
447 INIT_LIST_HEAD(&t->list);
448
449 return t;
450}
451
452void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
453{
454 list_add_tail(&t->list, &ctx->adaptive_targets);
455}
456
457bool damon_targets_empty(struct damon_ctx *ctx)
458{
459 return list_empty(&ctx->adaptive_targets);
460}
461
462static void damon_del_target(struct damon_target *t)
463{
464 list_del(&t->list);
465}
466
467void damon_free_target(struct damon_target *t)
468{
469 struct damon_region *r, *next;
470
471 damon_for_each_region_safe(r, next, t)
472 damon_free_region(r);
473 kfree(t);
474}
475
476void damon_destroy_target(struct damon_target *t)
477{
478 damon_del_target(t);
479 damon_free_target(t);
480}
481
482unsigned int damon_nr_regions(struct damon_target *t)
483{
484 return t->nr_regions;
485}
486
487struct damon_ctx *damon_new_ctx(void)
488{
489 struct damon_ctx *ctx;
490
491 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
492 if (!ctx)
493 return NULL;
494
495 init_completion(&ctx->kdamond_started);
496
497 ctx->attrs.sample_interval = 5 * 1000;
498 ctx->attrs.aggr_interval = 100 * 1000;
499 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
500
501 ctx->passed_sample_intervals = 0;
502 /* These will be set from kdamond_init_intervals_sis() */
503 ctx->next_aggregation_sis = 0;
504 ctx->next_ops_update_sis = 0;
505
506 mutex_init(&ctx->kdamond_lock);
507
508 ctx->attrs.min_nr_regions = 10;
509 ctx->attrs.max_nr_regions = 1000;
510
511 INIT_LIST_HEAD(&ctx->adaptive_targets);
512 INIT_LIST_HEAD(&ctx->schemes);
513
514 return ctx;
515}
516
517static void damon_destroy_targets(struct damon_ctx *ctx)
518{
519 struct damon_target *t, *next_t;
520
521 if (ctx->ops.cleanup) {
522 ctx->ops.cleanup(ctx);
523 return;
524 }
525
526 damon_for_each_target_safe(t, next_t, ctx)
527 damon_destroy_target(t);
528}
529
530void damon_destroy_ctx(struct damon_ctx *ctx)
531{
532 struct damos *s, *next_s;
533
534 damon_destroy_targets(ctx);
535
536 damon_for_each_scheme_safe(s, next_s, ctx)
537 damon_destroy_scheme(s);
538
539 kfree(ctx);
540}
541
542static unsigned int damon_age_for_new_attrs(unsigned int age,
543 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
544{
545 return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
546}
547
548/* convert access ratio in bp (per 10,000) to nr_accesses */
549static unsigned int damon_accesses_bp_to_nr_accesses(
550 unsigned int accesses_bp, struct damon_attrs *attrs)
551{
552 return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
553}
554
555/*
556 * Convert nr_accesses to access ratio in bp (per 10,000).
557 *
558 * Callers should ensure attrs.aggr_interval is not zero, like
559 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would
560 * happen.
561 */
562static unsigned int damon_nr_accesses_to_accesses_bp(
563 unsigned int nr_accesses, struct damon_attrs *attrs)
564{
565 return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
566}
567
568static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
569 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
570{
571 return damon_accesses_bp_to_nr_accesses(
572 damon_nr_accesses_to_accesses_bp(
573 nr_accesses, old_attrs),
574 new_attrs);
575}
576
577static void damon_update_monitoring_result(struct damon_region *r,
578 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
579{
580 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
581 old_attrs, new_attrs);
582 r->nr_accesses_bp = r->nr_accesses * 10000;
583 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
584}
585
586/*
587 * region->nr_accesses is the number of sampling intervals in the last
588 * aggregation interval that access to the region has found, and region->age is
589 * the number of aggregation intervals that its access pattern has maintained.
590 * For the reason, the real meaning of the two fields depend on current
591 * sampling interval and aggregation interval. This function updates
592 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
593 */
594static void damon_update_monitoring_results(struct damon_ctx *ctx,
595 struct damon_attrs *new_attrs)
596{
597 struct damon_attrs *old_attrs = &ctx->attrs;
598 struct damon_target *t;
599 struct damon_region *r;
600
601 /* if any interval is zero, simply forgive conversion */
602 if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
603 !new_attrs->sample_interval ||
604 !new_attrs->aggr_interval)
605 return;
606
607 damon_for_each_target(t, ctx)
608 damon_for_each_region(r, t)
609 damon_update_monitoring_result(
610 r, old_attrs, new_attrs);
611}
612
613/**
614 * damon_set_attrs() - Set attributes for the monitoring.
615 * @ctx: monitoring context
616 * @attrs: monitoring attributes
617 *
618 * This function should be called while the kdamond is not running, or an
619 * access check results aggregation is not ongoing (e.g., from
620 * &struct damon_callback->after_aggregation or
621 * &struct damon_callback->after_wmarks_check callbacks).
622 *
623 * Every time interval is in micro-seconds.
624 *
625 * Return: 0 on success, negative error code otherwise.
626 */
627int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
628{
629 unsigned long sample_interval = attrs->sample_interval ?
630 attrs->sample_interval : 1;
631 struct damos *s;
632
633 if (attrs->min_nr_regions < 3)
634 return -EINVAL;
635 if (attrs->min_nr_regions > attrs->max_nr_regions)
636 return -EINVAL;
637 if (attrs->sample_interval > attrs->aggr_interval)
638 return -EINVAL;
639
640 ctx->next_aggregation_sis = ctx->passed_sample_intervals +
641 attrs->aggr_interval / sample_interval;
642 ctx->next_ops_update_sis = ctx->passed_sample_intervals +
643 attrs->ops_update_interval / sample_interval;
644
645 damon_update_monitoring_results(ctx, attrs);
646 ctx->attrs = *attrs;
647
648 damon_for_each_scheme(s, ctx)
649 damos_set_next_apply_sis(s, ctx);
650
651 return 0;
652}
653
654/**
655 * damon_set_schemes() - Set data access monitoring based operation schemes.
656 * @ctx: monitoring context
657 * @schemes: array of the schemes
658 * @nr_schemes: number of entries in @schemes
659 *
660 * This function should not be called while the kdamond of the context is
661 * running.
662 */
663void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
664 ssize_t nr_schemes)
665{
666 struct damos *s, *next;
667 ssize_t i;
668
669 damon_for_each_scheme_safe(s, next, ctx)
670 damon_destroy_scheme(s);
671 for (i = 0; i < nr_schemes; i++)
672 damon_add_scheme(ctx, schemes[i]);
673}
674
675static struct damos_quota_goal *damos_nth_quota_goal(
676 int n, struct damos_quota *q)
677{
678 struct damos_quota_goal *goal;
679 int i = 0;
680
681 damos_for_each_quota_goal(goal, q) {
682 if (i++ == n)
683 return goal;
684 }
685 return NULL;
686}
687
688static void damos_commit_quota_goal(
689 struct damos_quota_goal *dst, struct damos_quota_goal *src)
690{
691 dst->metric = src->metric;
692 dst->target_value = src->target_value;
693 if (dst->metric == DAMOS_QUOTA_USER_INPUT)
694 dst->current_value = src->current_value;
695 /* keep last_psi_total as is, since it will be updated in next cycle */
696}
697
698/**
699 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
700 * @dst: The commit destination DAMOS quota.
701 * @src: The commit source DAMOS quota.
702 *
703 * Copies user-specified parameters for quota goals from @src to @dst. Users
704 * should use this function for quota goals-level parameters update of running
705 * DAMON contexts, instead of manual in-place updates.
706 *
707 * This function should be called from parameters-update safe context, like
708 * DAMON callbacks.
709 */
710int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
711{
712 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
713 int i = 0, j = 0;
714
715 damos_for_each_quota_goal_safe(dst_goal, next, dst) {
716 src_goal = damos_nth_quota_goal(i++, src);
717 if (src_goal)
718 damos_commit_quota_goal(dst_goal, src_goal);
719 else
720 damos_destroy_quota_goal(dst_goal);
721 }
722 damos_for_each_quota_goal_safe(src_goal, next, src) {
723 if (j++ < i)
724 continue;
725 new_goal = damos_new_quota_goal(
726 src_goal->metric, src_goal->target_value);
727 if (!new_goal)
728 return -ENOMEM;
729 damos_add_quota_goal(dst, new_goal);
730 }
731 return 0;
732}
733
734static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
735{
736 int err;
737
738 dst->reset_interval = src->reset_interval;
739 dst->ms = src->ms;
740 dst->sz = src->sz;
741 err = damos_commit_quota_goals(dst, src);
742 if (err)
743 return err;
744 dst->weight_sz = src->weight_sz;
745 dst->weight_nr_accesses = src->weight_nr_accesses;
746 dst->weight_age = src->weight_age;
747 return 0;
748}
749
750static struct damos_filter *damos_nth_filter(int n, struct damos *s)
751{
752 struct damos_filter *filter;
753 int i = 0;
754
755 damos_for_each_filter(filter, s) {
756 if (i++ == n)
757 return filter;
758 }
759 return NULL;
760}
761
762static void damos_commit_filter_arg(
763 struct damos_filter *dst, struct damos_filter *src)
764{
765 switch (dst->type) {
766 case DAMOS_FILTER_TYPE_MEMCG:
767 dst->memcg_id = src->memcg_id;
768 break;
769 case DAMOS_FILTER_TYPE_ADDR:
770 dst->addr_range = src->addr_range;
771 break;
772 case DAMOS_FILTER_TYPE_TARGET:
773 dst->target_idx = src->target_idx;
774 break;
775 default:
776 break;
777 }
778}
779
780static void damos_commit_filter(
781 struct damos_filter *dst, struct damos_filter *src)
782{
783 dst->type = src->type;
784 dst->matching = src->matching;
785 damos_commit_filter_arg(dst, src);
786}
787
788static int damos_commit_filters(struct damos *dst, struct damos *src)
789{
790 struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
791 int i = 0, j = 0;
792
793 damos_for_each_filter_safe(dst_filter, next, dst) {
794 src_filter = damos_nth_filter(i++, src);
795 if (src_filter)
796 damos_commit_filter(dst_filter, src_filter);
797 else
798 damos_destroy_filter(dst_filter);
799 }
800
801 damos_for_each_filter_safe(src_filter, next, src) {
802 if (j++ < i)
803 continue;
804
805 new_filter = damos_new_filter(
806 src_filter->type, src_filter->matching);
807 if (!new_filter)
808 return -ENOMEM;
809 damos_commit_filter_arg(new_filter, src_filter);
810 damos_add_filter(dst, new_filter);
811 }
812 return 0;
813}
814
815static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
816{
817 struct damos *s;
818 int i = 0;
819
820 damon_for_each_scheme(s, ctx) {
821 if (i++ == n)
822 return s;
823 }
824 return NULL;
825}
826
827static int damos_commit(struct damos *dst, struct damos *src)
828{
829 int err;
830
831 dst->pattern = src->pattern;
832 dst->action = src->action;
833 dst->apply_interval_us = src->apply_interval_us;
834
835 err = damos_commit_quota(&dst->quota, &src->quota);
836 if (err)
837 return err;
838
839 dst->wmarks = src->wmarks;
840
841 err = damos_commit_filters(dst, src);
842 return err;
843}
844
845static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
846{
847 struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
848 int i = 0, j = 0, err;
849
850 damon_for_each_scheme_safe(dst_scheme, next, dst) {
851 src_scheme = damon_nth_scheme(i++, src);
852 if (src_scheme) {
853 err = damos_commit(dst_scheme, src_scheme);
854 if (err)
855 return err;
856 } else {
857 damon_destroy_scheme(dst_scheme);
858 }
859 }
860
861 damon_for_each_scheme_safe(src_scheme, next, src) {
862 if (j++ < i)
863 continue;
864 new_scheme = damon_new_scheme(&src_scheme->pattern,
865 src_scheme->action,
866 src_scheme->apply_interval_us,
867 &src_scheme->quota, &src_scheme->wmarks,
868 NUMA_NO_NODE);
869 if (!new_scheme)
870 return -ENOMEM;
871 err = damos_commit(new_scheme, src_scheme);
872 if (err) {
873 damon_destroy_scheme(new_scheme);
874 return err;
875 }
876 damon_add_scheme(dst, new_scheme);
877 }
878 return 0;
879}
880
881static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
882{
883 struct damon_target *t;
884 int i = 0;
885
886 damon_for_each_target(t, ctx) {
887 if (i++ == n)
888 return t;
889 }
890 return NULL;
891}
892
893/*
894 * The caller should ensure the regions of @src are
895 * 1. valid (end >= src) and
896 * 2. sorted by starting address.
897 *
898 * If @src has no region, @dst keeps current regions.
899 */
900static int damon_commit_target_regions(
901 struct damon_target *dst, struct damon_target *src)
902{
903 struct damon_region *src_region;
904 struct damon_addr_range *ranges;
905 int i = 0, err;
906
907 damon_for_each_region(src_region, src)
908 i++;
909 if (!i)
910 return 0;
911
912 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
913 if (!ranges)
914 return -ENOMEM;
915 i = 0;
916 damon_for_each_region(src_region, src)
917 ranges[i++] = src_region->ar;
918 err = damon_set_regions(dst, ranges, i);
919 kfree(ranges);
920 return err;
921}
922
923static int damon_commit_target(
924 struct damon_target *dst, bool dst_has_pid,
925 struct damon_target *src, bool src_has_pid)
926{
927 int err;
928
929 err = damon_commit_target_regions(dst, src);
930 if (err)
931 return err;
932 if (dst_has_pid)
933 put_pid(dst->pid);
934 if (src_has_pid)
935 get_pid(src->pid);
936 dst->pid = src->pid;
937 return 0;
938}
939
940static int damon_commit_targets(
941 struct damon_ctx *dst, struct damon_ctx *src)
942{
943 struct damon_target *dst_target, *next, *src_target, *new_target;
944 int i = 0, j = 0, err;
945
946 damon_for_each_target_safe(dst_target, next, dst) {
947 src_target = damon_nth_target(i++, src);
948 if (src_target) {
949 err = damon_commit_target(
950 dst_target, damon_target_has_pid(dst),
951 src_target, damon_target_has_pid(src));
952 if (err)
953 return err;
954 } else {
955 if (damon_target_has_pid(dst))
956 put_pid(dst_target->pid);
957 damon_destroy_target(dst_target);
958 }
959 }
960
961 damon_for_each_target_safe(src_target, next, src) {
962 if (j++ < i)
963 continue;
964 new_target = damon_new_target();
965 if (!new_target)
966 return -ENOMEM;
967 err = damon_commit_target(new_target, false,
968 src_target, damon_target_has_pid(src));
969 if (err) {
970 damon_destroy_target(new_target);
971 return err;
972 }
973 damon_add_target(dst, new_target);
974 }
975 return 0;
976}
977
978/**
979 * damon_commit_ctx() - Commit parameters of a DAMON context to another.
980 * @dst: The commit destination DAMON context.
981 * @src: The commit source DAMON context.
982 *
983 * This function copies user-specified parameters from @src to @dst and update
984 * the internal status and results accordingly. Users should use this function
985 * for context-level parameters update of running context, instead of manual
986 * in-place updates.
987 *
988 * This function should be called from parameters-update safe context, like
989 * DAMON callbacks.
990 */
991int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
992{
993 int err;
994
995 err = damon_commit_schemes(dst, src);
996 if (err)
997 return err;
998 err = damon_commit_targets(dst, src);
999 if (err)
1000 return err;
1001 /*
1002 * schemes and targets should be updated first, since
1003 * 1. damon_set_attrs() updates monitoring results of targets and
1004 * next_apply_sis of schemes, and
1005 * 2. ops update should be done after pid handling is done (target
1006 * committing require putting pids).
1007 */
1008 err = damon_set_attrs(dst, &src->attrs);
1009 if (err)
1010 return err;
1011 dst->ops = src->ops;
1012
1013 return 0;
1014}
1015
1016/**
1017 * damon_nr_running_ctxs() - Return number of currently running contexts.
1018 */
1019int damon_nr_running_ctxs(void)
1020{
1021 int nr_ctxs;
1022
1023 mutex_lock(&damon_lock);
1024 nr_ctxs = nr_running_ctxs;
1025 mutex_unlock(&damon_lock);
1026
1027 return nr_ctxs;
1028}
1029
1030/* Returns the size upper limit for each monitoring region */
1031static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1032{
1033 struct damon_target *t;
1034 struct damon_region *r;
1035 unsigned long sz = 0;
1036
1037 damon_for_each_target(t, ctx) {
1038 damon_for_each_region(r, t)
1039 sz += damon_sz_region(r);
1040 }
1041
1042 if (ctx->attrs.min_nr_regions)
1043 sz /= ctx->attrs.min_nr_regions;
1044 if (sz < DAMON_MIN_REGION)
1045 sz = DAMON_MIN_REGION;
1046
1047 return sz;
1048}
1049
1050static int kdamond_fn(void *data);
1051
1052/*
1053 * __damon_start() - Starts monitoring with given context.
1054 * @ctx: monitoring context
1055 *
1056 * This function should be called while damon_lock is hold.
1057 *
1058 * Return: 0 on success, negative error code otherwise.
1059 */
1060static int __damon_start(struct damon_ctx *ctx)
1061{
1062 int err = -EBUSY;
1063
1064 mutex_lock(&ctx->kdamond_lock);
1065 if (!ctx->kdamond) {
1066 err = 0;
1067 reinit_completion(&ctx->kdamond_started);
1068 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1069 nr_running_ctxs);
1070 if (IS_ERR(ctx->kdamond)) {
1071 err = PTR_ERR(ctx->kdamond);
1072 ctx->kdamond = NULL;
1073 } else {
1074 wait_for_completion(&ctx->kdamond_started);
1075 }
1076 }
1077 mutex_unlock(&ctx->kdamond_lock);
1078
1079 return err;
1080}
1081
1082/**
1083 * damon_start() - Starts the monitorings for a given group of contexts.
1084 * @ctxs: an array of the pointers for contexts to start monitoring
1085 * @nr_ctxs: size of @ctxs
1086 * @exclusive: exclusiveness of this contexts group
1087 *
1088 * This function starts a group of monitoring threads for a group of monitoring
1089 * contexts. One thread per each context is created and run in parallel. The
1090 * caller should handle synchronization between the threads by itself. If
1091 * @exclusive is true and a group of threads that created by other
1092 * 'damon_start()' call is currently running, this function does nothing but
1093 * returns -EBUSY.
1094 *
1095 * Return: 0 on success, negative error code otherwise.
1096 */
1097int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1098{
1099 int i;
1100 int err = 0;
1101
1102 mutex_lock(&damon_lock);
1103 if ((exclusive && nr_running_ctxs) ||
1104 (!exclusive && running_exclusive_ctxs)) {
1105 mutex_unlock(&damon_lock);
1106 return -EBUSY;
1107 }
1108
1109 for (i = 0; i < nr_ctxs; i++) {
1110 err = __damon_start(ctxs[i]);
1111 if (err)
1112 break;
1113 nr_running_ctxs++;
1114 }
1115 if (exclusive && nr_running_ctxs)
1116 running_exclusive_ctxs = true;
1117 mutex_unlock(&damon_lock);
1118
1119 return err;
1120}
1121
1122/*
1123 * __damon_stop() - Stops monitoring of a given context.
1124 * @ctx: monitoring context
1125 *
1126 * Return: 0 on success, negative error code otherwise.
1127 */
1128static int __damon_stop(struct damon_ctx *ctx)
1129{
1130 struct task_struct *tsk;
1131
1132 mutex_lock(&ctx->kdamond_lock);
1133 tsk = ctx->kdamond;
1134 if (tsk) {
1135 get_task_struct(tsk);
1136 mutex_unlock(&ctx->kdamond_lock);
1137 kthread_stop_put(tsk);
1138 return 0;
1139 }
1140 mutex_unlock(&ctx->kdamond_lock);
1141
1142 return -EPERM;
1143}
1144
1145/**
1146 * damon_stop() - Stops the monitorings for a given group of contexts.
1147 * @ctxs: an array of the pointers for contexts to stop monitoring
1148 * @nr_ctxs: size of @ctxs
1149 *
1150 * Return: 0 on success, negative error code otherwise.
1151 */
1152int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1153{
1154 int i, err = 0;
1155
1156 for (i = 0; i < nr_ctxs; i++) {
1157 /* nr_running_ctxs is decremented in kdamond_fn */
1158 err = __damon_stop(ctxs[i]);
1159 if (err)
1160 break;
1161 }
1162 return err;
1163}
1164
1165/*
1166 * Reset the aggregated monitoring results ('nr_accesses' of each region).
1167 */
1168static void kdamond_reset_aggregated(struct damon_ctx *c)
1169{
1170 struct damon_target *t;
1171 unsigned int ti = 0; /* target's index */
1172
1173 damon_for_each_target(t, c) {
1174 struct damon_region *r;
1175
1176 damon_for_each_region(r, t) {
1177 trace_damon_aggregated(ti, r, damon_nr_regions(t));
1178 r->last_nr_accesses = r->nr_accesses;
1179 r->nr_accesses = 0;
1180 }
1181 ti++;
1182 }
1183}
1184
1185static void damon_split_region_at(struct damon_target *t,
1186 struct damon_region *r, unsigned long sz_r);
1187
1188static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1189{
1190 unsigned long sz;
1191 unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1192
1193 sz = damon_sz_region(r);
1194 return s->pattern.min_sz_region <= sz &&
1195 sz <= s->pattern.max_sz_region &&
1196 s->pattern.min_nr_accesses <= nr_accesses &&
1197 nr_accesses <= s->pattern.max_nr_accesses &&
1198 s->pattern.min_age_region <= r->age &&
1199 r->age <= s->pattern.max_age_region;
1200}
1201
1202static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1203 struct damon_region *r, struct damos *s)
1204{
1205 bool ret = __damos_valid_target(r, s);
1206
1207 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1208 return ret;
1209
1210 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1211}
1212
1213/*
1214 * damos_skip_charged_region() - Check if the given region or starting part of
1215 * it is already charged for the DAMOS quota.
1216 * @t: The target of the region.
1217 * @rp: The pointer to the region.
1218 * @s: The scheme to be applied.
1219 *
1220 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1221 * action would applied to only a part of the target access pattern fulfilling
1222 * regions. To avoid applying the scheme action to only already applied
1223 * regions, DAMON skips applying the scheme action to the regions that charged
1224 * in the previous charge window.
1225 *
1226 * This function checks if a given region should be skipped or not for the
1227 * reason. If only the starting part of the region has previously charged,
1228 * this function splits the region into two so that the second one covers the
1229 * area that not charged in the previous charge widnow and saves the second
1230 * region in *rp and returns false, so that the caller can apply DAMON action
1231 * to the second one.
1232 *
1233 * Return: true if the region should be entirely skipped, false otherwise.
1234 */
1235static bool damos_skip_charged_region(struct damon_target *t,
1236 struct damon_region **rp, struct damos *s)
1237{
1238 struct damon_region *r = *rp;
1239 struct damos_quota *quota = &s->quota;
1240 unsigned long sz_to_skip;
1241
1242 /* Skip previously charged regions */
1243 if (quota->charge_target_from) {
1244 if (t != quota->charge_target_from)
1245 return true;
1246 if (r == damon_last_region(t)) {
1247 quota->charge_target_from = NULL;
1248 quota->charge_addr_from = 0;
1249 return true;
1250 }
1251 if (quota->charge_addr_from &&
1252 r->ar.end <= quota->charge_addr_from)
1253 return true;
1254
1255 if (quota->charge_addr_from && r->ar.start <
1256 quota->charge_addr_from) {
1257 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1258 r->ar.start, DAMON_MIN_REGION);
1259 if (!sz_to_skip) {
1260 if (damon_sz_region(r) <= DAMON_MIN_REGION)
1261 return true;
1262 sz_to_skip = DAMON_MIN_REGION;
1263 }
1264 damon_split_region_at(t, r, sz_to_skip);
1265 r = damon_next_region(r);
1266 *rp = r;
1267 }
1268 quota->charge_target_from = NULL;
1269 quota->charge_addr_from = 0;
1270 }
1271 return false;
1272}
1273
1274static void damos_update_stat(struct damos *s,
1275 unsigned long sz_tried, unsigned long sz_applied)
1276{
1277 s->stat.nr_tried++;
1278 s->stat.sz_tried += sz_tried;
1279 if (sz_applied)
1280 s->stat.nr_applied++;
1281 s->stat.sz_applied += sz_applied;
1282}
1283
1284static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1285 struct damon_region *r, struct damos_filter *filter)
1286{
1287 bool matched = false;
1288 struct damon_target *ti;
1289 int target_idx = 0;
1290 unsigned long start, end;
1291
1292 switch (filter->type) {
1293 case DAMOS_FILTER_TYPE_TARGET:
1294 damon_for_each_target(ti, ctx) {
1295 if (ti == t)
1296 break;
1297 target_idx++;
1298 }
1299 matched = target_idx == filter->target_idx;
1300 break;
1301 case DAMOS_FILTER_TYPE_ADDR:
1302 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
1303 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
1304
1305 /* inside the range */
1306 if (start <= r->ar.start && r->ar.end <= end) {
1307 matched = true;
1308 break;
1309 }
1310 /* outside of the range */
1311 if (r->ar.end <= start || end <= r->ar.start) {
1312 matched = false;
1313 break;
1314 }
1315 /* start before the range and overlap */
1316 if (r->ar.start < start) {
1317 damon_split_region_at(t, r, start - r->ar.start);
1318 matched = false;
1319 break;
1320 }
1321 /* start inside the range */
1322 damon_split_region_at(t, r, end - r->ar.start);
1323 matched = true;
1324 break;
1325 default:
1326 return false;
1327 }
1328
1329 return matched == filter->matching;
1330}
1331
1332static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1333 struct damon_region *r, struct damos *s)
1334{
1335 struct damos_filter *filter;
1336
1337 damos_for_each_filter(filter, s) {
1338 if (__damos_filter_out(ctx, t, r, filter))
1339 return true;
1340 }
1341 return false;
1342}
1343
1344static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1345 struct damon_region *r, struct damos *s)
1346{
1347 struct damos_quota *quota = &s->quota;
1348 unsigned long sz = damon_sz_region(r);
1349 struct timespec64 begin, end;
1350 unsigned long sz_applied = 0;
1351 int err = 0;
1352 /*
1353 * We plan to support multiple context per kdamond, as DAMON sysfs
1354 * implies with 'nr_contexts' file. Nevertheless, only single context
1355 * per kdamond is supported for now. So, we can simply use '0' context
1356 * index here.
1357 */
1358 unsigned int cidx = 0;
1359 struct damos *siter; /* schemes iterator */
1360 unsigned int sidx = 0;
1361 struct damon_target *titer; /* targets iterator */
1362 unsigned int tidx = 0;
1363 bool do_trace = false;
1364
1365 /* get indices for trace_damos_before_apply() */
1366 if (trace_damos_before_apply_enabled()) {
1367 damon_for_each_scheme(siter, c) {
1368 if (siter == s)
1369 break;
1370 sidx++;
1371 }
1372 damon_for_each_target(titer, c) {
1373 if (titer == t)
1374 break;
1375 tidx++;
1376 }
1377 do_trace = true;
1378 }
1379
1380 if (c->ops.apply_scheme) {
1381 if (quota->esz && quota->charged_sz + sz > quota->esz) {
1382 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1383 DAMON_MIN_REGION);
1384 if (!sz)
1385 goto update_stat;
1386 damon_split_region_at(t, r, sz);
1387 }
1388 if (damos_filter_out(c, t, r, s))
1389 return;
1390 ktime_get_coarse_ts64(&begin);
1391 if (c->callback.before_damos_apply)
1392 err = c->callback.before_damos_apply(c, t, r, s);
1393 if (!err) {
1394 trace_damos_before_apply(cidx, sidx, tidx, r,
1395 damon_nr_regions(t), do_trace);
1396 sz_applied = c->ops.apply_scheme(c, t, r, s);
1397 }
1398 ktime_get_coarse_ts64(&end);
1399 quota->total_charged_ns += timespec64_to_ns(&end) -
1400 timespec64_to_ns(&begin);
1401 quota->charged_sz += sz;
1402 if (quota->esz && quota->charged_sz >= quota->esz) {
1403 quota->charge_target_from = t;
1404 quota->charge_addr_from = r->ar.end + 1;
1405 }
1406 }
1407 if (s->action != DAMOS_STAT)
1408 r->age = 0;
1409
1410update_stat:
1411 damos_update_stat(s, sz, sz_applied);
1412}
1413
1414static void damon_do_apply_schemes(struct damon_ctx *c,
1415 struct damon_target *t,
1416 struct damon_region *r)
1417{
1418 struct damos *s;
1419
1420 damon_for_each_scheme(s, c) {
1421 struct damos_quota *quota = &s->quota;
1422
1423 if (c->passed_sample_intervals < s->next_apply_sis)
1424 continue;
1425
1426 if (!s->wmarks.activated)
1427 continue;
1428
1429 /* Check the quota */
1430 if (quota->esz && quota->charged_sz >= quota->esz)
1431 continue;
1432
1433 if (damos_skip_charged_region(t, &r, s))
1434 continue;
1435
1436 if (!damos_valid_target(c, t, r, s))
1437 continue;
1438
1439 damos_apply_scheme(c, t, r, s);
1440 }
1441}
1442
1443/*
1444 * damon_feed_loop_next_input() - get next input to achieve a target score.
1445 * @last_input The last input.
1446 * @score Current score that made with @last_input.
1447 *
1448 * Calculate next input to achieve the target score, based on the last input
1449 * and current score. Assuming the input and the score are positively
1450 * proportional, calculate how much compensation should be added to or
1451 * subtracted from the last input as a proportion of the last input. Avoid
1452 * next input always being zero by setting it non-zero always. In short form
1453 * (assuming support of float and signed calculations), the algorithm is as
1454 * below.
1455 *
1456 * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1457 *
1458 * For simple implementation, we assume the target score is always 10,000. The
1459 * caller should adjust @score for this.
1460 *
1461 * Returns next input that assumed to achieve the target score.
1462 */
1463static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1464 unsigned long score)
1465{
1466 const unsigned long goal = 10000;
1467 /* Set minimum input as 10000 to avoid compensation be zero */
1468 const unsigned long min_input = 10000;
1469 unsigned long score_goal_diff, compensation;
1470 bool over_achieving = score > goal;
1471
1472 if (score == goal)
1473 return last_input;
1474 if (score >= goal * 2)
1475 return min_input;
1476
1477 if (over_achieving)
1478 score_goal_diff = score - goal;
1479 else
1480 score_goal_diff = goal - score;
1481
1482 if (last_input < ULONG_MAX / score_goal_diff)
1483 compensation = last_input * score_goal_diff / goal;
1484 else
1485 compensation = last_input / goal * score_goal_diff;
1486
1487 if (over_achieving)
1488 return max(last_input - compensation, min_input);
1489 if (last_input < ULONG_MAX - compensation)
1490 return last_input + compensation;
1491 return ULONG_MAX;
1492}
1493
1494#ifdef CONFIG_PSI
1495
1496static u64 damos_get_some_mem_psi_total(void)
1497{
1498 if (static_branch_likely(&psi_disabled))
1499 return 0;
1500 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1501 NSEC_PER_USEC);
1502}
1503
1504#else /* CONFIG_PSI */
1505
1506static inline u64 damos_get_some_mem_psi_total(void)
1507{
1508 return 0;
1509};
1510
1511#endif /* CONFIG_PSI */
1512
1513static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
1514{
1515 u64 now_psi_total;
1516
1517 switch (goal->metric) {
1518 case DAMOS_QUOTA_USER_INPUT:
1519 /* User should already set goal->current_value */
1520 break;
1521 case DAMOS_QUOTA_SOME_MEM_PSI_US:
1522 now_psi_total = damos_get_some_mem_psi_total();
1523 goal->current_value = now_psi_total - goal->last_psi_total;
1524 goal->last_psi_total = now_psi_total;
1525 break;
1526 default:
1527 break;
1528 }
1529}
1530
1531/* Return the highest score since it makes schemes least aggressive */
1532static unsigned long damos_quota_score(struct damos_quota *quota)
1533{
1534 struct damos_quota_goal *goal;
1535 unsigned long highest_score = 0;
1536
1537 damos_for_each_quota_goal(goal, quota) {
1538 damos_set_quota_goal_current_value(goal);
1539 highest_score = max(highest_score,
1540 goal->current_value * 10000 /
1541 goal->target_value);
1542 }
1543
1544 return highest_score;
1545}
1546
1547/*
1548 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1549 */
1550static void damos_set_effective_quota(struct damos_quota *quota)
1551{
1552 unsigned long throughput;
1553 unsigned long esz;
1554
1555 if (!quota->ms && list_empty("a->goals)) {
1556 quota->esz = quota->sz;
1557 return;
1558 }
1559
1560 if (!list_empty("a->goals)) {
1561 unsigned long score = damos_quota_score(quota);
1562
1563 quota->esz_bp = damon_feed_loop_next_input(
1564 max(quota->esz_bp, 10000UL),
1565 score);
1566 esz = quota->esz_bp / 10000;
1567 }
1568
1569 if (quota->ms) {
1570 if (quota->total_charged_ns)
1571 throughput = quota->total_charged_sz * 1000000 /
1572 quota->total_charged_ns;
1573 else
1574 throughput = PAGE_SIZE * 1024;
1575 if (!list_empty("a->goals))
1576 esz = min(throughput * quota->ms, esz);
1577 else
1578 esz = throughput * quota->ms;
1579 }
1580
1581 if (quota->sz && quota->sz < esz)
1582 esz = quota->sz;
1583
1584 quota->esz = esz;
1585}
1586
1587static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1588{
1589 struct damos_quota *quota = &s->quota;
1590 struct damon_target *t;
1591 struct damon_region *r;
1592 unsigned long cumulated_sz;
1593 unsigned int score, max_score = 0;
1594
1595 if (!quota->ms && !quota->sz && list_empty("a->goals))
1596 return;
1597
1598 /* New charge window starts */
1599 if (time_after_eq(jiffies, quota->charged_from +
1600 msecs_to_jiffies(quota->reset_interval))) {
1601 if (quota->esz && quota->charged_sz >= quota->esz)
1602 s->stat.qt_exceeds++;
1603 quota->total_charged_sz += quota->charged_sz;
1604 quota->charged_from = jiffies;
1605 quota->charged_sz = 0;
1606 damos_set_effective_quota(quota);
1607 }
1608
1609 if (!c->ops.get_scheme_score)
1610 return;
1611
1612 /* Fill up the score histogram */
1613 memset(c->regions_score_histogram, 0,
1614 sizeof(*c->regions_score_histogram) *
1615 (DAMOS_MAX_SCORE + 1));
1616 damon_for_each_target(t, c) {
1617 damon_for_each_region(r, t) {
1618 if (!__damos_valid_target(r, s))
1619 continue;
1620 score = c->ops.get_scheme_score(c, t, r, s);
1621 c->regions_score_histogram[score] +=
1622 damon_sz_region(r);
1623 if (score > max_score)
1624 max_score = score;
1625 }
1626 }
1627
1628 /* Set the min score limit */
1629 for (cumulated_sz = 0, score = max_score; ; score--) {
1630 cumulated_sz += c->regions_score_histogram[score];
1631 if (cumulated_sz >= quota->esz || !score)
1632 break;
1633 }
1634 quota->min_score = score;
1635}
1636
1637static void kdamond_apply_schemes(struct damon_ctx *c)
1638{
1639 struct damon_target *t;
1640 struct damon_region *r, *next_r;
1641 struct damos *s;
1642 unsigned long sample_interval = c->attrs.sample_interval ?
1643 c->attrs.sample_interval : 1;
1644 bool has_schemes_to_apply = false;
1645
1646 damon_for_each_scheme(s, c) {
1647 if (c->passed_sample_intervals < s->next_apply_sis)
1648 continue;
1649
1650 if (!s->wmarks.activated)
1651 continue;
1652
1653 has_schemes_to_apply = true;
1654
1655 damos_adjust_quota(c, s);
1656 }
1657
1658 if (!has_schemes_to_apply)
1659 return;
1660
1661 damon_for_each_target(t, c) {
1662 damon_for_each_region_safe(r, next_r, t)
1663 damon_do_apply_schemes(c, t, r);
1664 }
1665
1666 damon_for_each_scheme(s, c) {
1667 if (c->passed_sample_intervals < s->next_apply_sis)
1668 continue;
1669 s->next_apply_sis = c->passed_sample_intervals +
1670 (s->apply_interval_us ? s->apply_interval_us :
1671 c->attrs.aggr_interval) / sample_interval;
1672 }
1673}
1674
1675/*
1676 * Merge two adjacent regions into one region
1677 */
1678static void damon_merge_two_regions(struct damon_target *t,
1679 struct damon_region *l, struct damon_region *r)
1680{
1681 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1682
1683 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1684 (sz_l + sz_r);
1685 l->nr_accesses_bp = l->nr_accesses * 10000;
1686 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1687 l->ar.end = r->ar.end;
1688 damon_destroy_region(r, t);
1689}
1690
1691/*
1692 * Merge adjacent regions having similar access frequencies
1693 *
1694 * t target affected by this merge operation
1695 * thres '->nr_accesses' diff threshold for the merge
1696 * sz_limit size upper limit of each region
1697 */
1698static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1699 unsigned long sz_limit)
1700{
1701 struct damon_region *r, *prev = NULL, *next;
1702
1703 damon_for_each_region_safe(r, next, t) {
1704 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1705 r->age = 0;
1706 else
1707 r->age++;
1708
1709 if (prev && prev->ar.end == r->ar.start &&
1710 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1711 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1712 damon_merge_two_regions(t, prev, r);
1713 else
1714 prev = r;
1715 }
1716}
1717
1718/*
1719 * Merge adjacent regions having similar access frequencies
1720 *
1721 * threshold '->nr_accesses' diff threshold for the merge
1722 * sz_limit size upper limit of each region
1723 *
1724 * This function merges monitoring target regions which are adjacent and their
1725 * access frequencies are similar. This is for minimizing the monitoring
1726 * overhead under the dynamically changeable access pattern. If a merge was
1727 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1728 *
1729 * The total number of regions could be higher than the user-defined limit,
1730 * max_nr_regions for some cases. For example, the user can update
1731 * max_nr_regions to a number that lower than the current number of regions
1732 * while DAMON is running. For such a case, repeat merging until the limit is
1733 * met while increasing @threshold up to possible maximum level.
1734 */
1735static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1736 unsigned long sz_limit)
1737{
1738 struct damon_target *t;
1739 unsigned int nr_regions;
1740 unsigned int max_thres;
1741
1742 max_thres = c->attrs.aggr_interval /
1743 (c->attrs.sample_interval ? c->attrs.sample_interval : 1);
1744 do {
1745 nr_regions = 0;
1746 damon_for_each_target(t, c) {
1747 damon_merge_regions_of(t, threshold, sz_limit);
1748 nr_regions += damon_nr_regions(t);
1749 }
1750 threshold = max(1, threshold * 2);
1751 } while (nr_regions > c->attrs.max_nr_regions &&
1752 threshold / 2 < max_thres);
1753}
1754
1755/*
1756 * Split a region in two
1757 *
1758 * r the region to be split
1759 * sz_r size of the first sub-region that will be made
1760 */
1761static void damon_split_region_at(struct damon_target *t,
1762 struct damon_region *r, unsigned long sz_r)
1763{
1764 struct damon_region *new;
1765
1766 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1767 if (!new)
1768 return;
1769
1770 r->ar.end = new->ar.start;
1771
1772 new->age = r->age;
1773 new->last_nr_accesses = r->last_nr_accesses;
1774 new->nr_accesses_bp = r->nr_accesses_bp;
1775 new->nr_accesses = r->nr_accesses;
1776
1777 damon_insert_region(new, r, damon_next_region(r), t);
1778}
1779
1780/* Split every region in the given target into 'nr_subs' regions */
1781static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1782{
1783 struct damon_region *r, *next;
1784 unsigned long sz_region, sz_sub = 0;
1785 int i;
1786
1787 damon_for_each_region_safe(r, next, t) {
1788 sz_region = damon_sz_region(r);
1789
1790 for (i = 0; i < nr_subs - 1 &&
1791 sz_region > 2 * DAMON_MIN_REGION; i++) {
1792 /*
1793 * Randomly select size of left sub-region to be at
1794 * least 10 percent and at most 90% of original region
1795 */
1796 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1797 sz_region / 10, DAMON_MIN_REGION);
1798 /* Do not allow blank region */
1799 if (sz_sub == 0 || sz_sub >= sz_region)
1800 continue;
1801
1802 damon_split_region_at(t, r, sz_sub);
1803 sz_region = sz_sub;
1804 }
1805 }
1806}
1807
1808/*
1809 * Split every target region into randomly-sized small regions
1810 *
1811 * This function splits every target region into random-sized small regions if
1812 * current total number of the regions is equal or smaller than half of the
1813 * user-specified maximum number of regions. This is for maximizing the
1814 * monitoring accuracy under the dynamically changeable access patterns. If a
1815 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1816 * it.
1817 */
1818static void kdamond_split_regions(struct damon_ctx *ctx)
1819{
1820 struct damon_target *t;
1821 unsigned int nr_regions = 0;
1822 static unsigned int last_nr_regions;
1823 int nr_subregions = 2;
1824
1825 damon_for_each_target(t, ctx)
1826 nr_regions += damon_nr_regions(t);
1827
1828 if (nr_regions > ctx->attrs.max_nr_regions / 2)
1829 return;
1830
1831 /* Maybe the middle of the region has different access frequency */
1832 if (last_nr_regions == nr_regions &&
1833 nr_regions < ctx->attrs.max_nr_regions / 3)
1834 nr_subregions = 3;
1835
1836 damon_for_each_target(t, ctx)
1837 damon_split_regions_of(t, nr_subregions);
1838
1839 last_nr_regions = nr_regions;
1840}
1841
1842/*
1843 * Check whether current monitoring should be stopped
1844 *
1845 * The monitoring is stopped when either the user requested to stop, or all
1846 * monitoring targets are invalid.
1847 *
1848 * Returns true if need to stop current monitoring.
1849 */
1850static bool kdamond_need_stop(struct damon_ctx *ctx)
1851{
1852 struct damon_target *t;
1853
1854 if (kthread_should_stop())
1855 return true;
1856
1857 if (!ctx->ops.target_valid)
1858 return false;
1859
1860 damon_for_each_target(t, ctx) {
1861 if (ctx->ops.target_valid(t))
1862 return false;
1863 }
1864
1865 return true;
1866}
1867
1868static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
1869 unsigned long *metric_value)
1870{
1871 switch (metric) {
1872 case DAMOS_WMARK_FREE_MEM_RATE:
1873 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
1874 totalram_pages();
1875 return 0;
1876 default:
1877 break;
1878 }
1879 return -EINVAL;
1880}
1881
1882/*
1883 * Returns zero if the scheme is active. Else, returns time to wait for next
1884 * watermark check in micro-seconds.
1885 */
1886static unsigned long damos_wmark_wait_us(struct damos *scheme)
1887{
1888 unsigned long metric;
1889
1890 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
1891 return 0;
1892
1893 /* higher than high watermark or lower than low watermark */
1894 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1895 if (scheme->wmarks.activated)
1896 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1897 scheme->action,
1898 metric > scheme->wmarks.high ?
1899 "high" : "low");
1900 scheme->wmarks.activated = false;
1901 return scheme->wmarks.interval;
1902 }
1903
1904 /* inactive and higher than middle watermark */
1905 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1906 !scheme->wmarks.activated)
1907 return scheme->wmarks.interval;
1908
1909 if (!scheme->wmarks.activated)
1910 pr_debug("activate a scheme (%d)\n", scheme->action);
1911 scheme->wmarks.activated = true;
1912 return 0;
1913}
1914
1915static void kdamond_usleep(unsigned long usecs)
1916{
1917 if (usecs >= USLEEP_RANGE_UPPER_BOUND)
1918 schedule_timeout_idle(usecs_to_jiffies(usecs));
1919 else
1920 usleep_range_idle(usecs, usecs + 1);
1921}
1922
1923/* Returns negative error code if it's not activated but should return */
1924static int kdamond_wait_activation(struct damon_ctx *ctx)
1925{
1926 struct damos *s;
1927 unsigned long wait_time;
1928 unsigned long min_wait_time = 0;
1929 bool init_wait_time = false;
1930
1931 while (!kdamond_need_stop(ctx)) {
1932 damon_for_each_scheme(s, ctx) {
1933 wait_time = damos_wmark_wait_us(s);
1934 if (!init_wait_time || wait_time < min_wait_time) {
1935 init_wait_time = true;
1936 min_wait_time = wait_time;
1937 }
1938 }
1939 if (!min_wait_time)
1940 return 0;
1941
1942 kdamond_usleep(min_wait_time);
1943
1944 if (ctx->callback.after_wmarks_check &&
1945 ctx->callback.after_wmarks_check(ctx))
1946 break;
1947 }
1948 return -EBUSY;
1949}
1950
1951static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
1952{
1953 unsigned long sample_interval = ctx->attrs.sample_interval ?
1954 ctx->attrs.sample_interval : 1;
1955 unsigned long apply_interval;
1956 struct damos *scheme;
1957
1958 ctx->passed_sample_intervals = 0;
1959 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
1960 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
1961 sample_interval;
1962
1963 damon_for_each_scheme(scheme, ctx) {
1964 apply_interval = scheme->apply_interval_us ?
1965 scheme->apply_interval_us : ctx->attrs.aggr_interval;
1966 scheme->next_apply_sis = apply_interval / sample_interval;
1967 }
1968}
1969
1970/*
1971 * The monitoring daemon that runs as a kernel thread
1972 */
1973static int kdamond_fn(void *data)
1974{
1975 struct damon_ctx *ctx = data;
1976 struct damon_target *t;
1977 struct damon_region *r, *next;
1978 unsigned int max_nr_accesses = 0;
1979 unsigned long sz_limit = 0;
1980
1981 pr_debug("kdamond (%d) starts\n", current->pid);
1982
1983 complete(&ctx->kdamond_started);
1984 kdamond_init_intervals_sis(ctx);
1985
1986 if (ctx->ops.init)
1987 ctx->ops.init(ctx);
1988 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1989 goto done;
1990 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
1991 sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
1992 if (!ctx->regions_score_histogram)
1993 goto done;
1994
1995 sz_limit = damon_region_sz_limit(ctx);
1996
1997 while (!kdamond_need_stop(ctx)) {
1998 /*
1999 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2000 * be changed from after_wmarks_check() or after_aggregation()
2001 * callbacks. Read the values here, and use those for this
2002 * iteration. That is, damon_set_attrs() updated new values
2003 * are respected from next iteration.
2004 */
2005 unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2006 unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2007 unsigned long sample_interval = ctx->attrs.sample_interval;
2008
2009 if (kdamond_wait_activation(ctx))
2010 break;
2011
2012 if (ctx->ops.prepare_access_checks)
2013 ctx->ops.prepare_access_checks(ctx);
2014 if (ctx->callback.after_sampling &&
2015 ctx->callback.after_sampling(ctx))
2016 break;
2017
2018 kdamond_usleep(sample_interval);
2019 ctx->passed_sample_intervals++;
2020
2021 if (ctx->ops.check_accesses)
2022 max_nr_accesses = ctx->ops.check_accesses(ctx);
2023
2024 if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2025 kdamond_merge_regions(ctx,
2026 max_nr_accesses / 10,
2027 sz_limit);
2028 if (ctx->callback.after_aggregation &&
2029 ctx->callback.after_aggregation(ctx))
2030 break;
2031 }
2032
2033 /*
2034 * do kdamond_apply_schemes() after kdamond_merge_regions() if
2035 * possible, to reduce overhead
2036 */
2037 if (!list_empty(&ctx->schemes))
2038 kdamond_apply_schemes(ctx);
2039
2040 sample_interval = ctx->attrs.sample_interval ?
2041 ctx->attrs.sample_interval : 1;
2042 if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2043 ctx->next_aggregation_sis = next_aggregation_sis +
2044 ctx->attrs.aggr_interval / sample_interval;
2045
2046 kdamond_reset_aggregated(ctx);
2047 kdamond_split_regions(ctx);
2048 if (ctx->ops.reset_aggregated)
2049 ctx->ops.reset_aggregated(ctx);
2050 }
2051
2052 if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2053 ctx->next_ops_update_sis = next_ops_update_sis +
2054 ctx->attrs.ops_update_interval /
2055 sample_interval;
2056 if (ctx->ops.update)
2057 ctx->ops.update(ctx);
2058 sz_limit = damon_region_sz_limit(ctx);
2059 }
2060 }
2061done:
2062 damon_for_each_target(t, ctx) {
2063 damon_for_each_region_safe(r, next, t)
2064 damon_destroy_region(r, t);
2065 }
2066
2067 if (ctx->callback.before_terminate)
2068 ctx->callback.before_terminate(ctx);
2069 if (ctx->ops.cleanup)
2070 ctx->ops.cleanup(ctx);
2071 kfree(ctx->regions_score_histogram);
2072
2073 pr_debug("kdamond (%d) finishes\n", current->pid);
2074 mutex_lock(&ctx->kdamond_lock);
2075 ctx->kdamond = NULL;
2076 mutex_unlock(&ctx->kdamond_lock);
2077
2078 mutex_lock(&damon_lock);
2079 nr_running_ctxs--;
2080 if (!nr_running_ctxs && running_exclusive_ctxs)
2081 running_exclusive_ctxs = false;
2082 mutex_unlock(&damon_lock);
2083
2084 return 0;
2085}
2086
2087/*
2088 * struct damon_system_ram_region - System RAM resource address region of
2089 * [@start, @end).
2090 * @start: Start address of the region (inclusive).
2091 * @end: End address of the region (exclusive).
2092 */
2093struct damon_system_ram_region {
2094 unsigned long start;
2095 unsigned long end;
2096};
2097
2098static int walk_system_ram(struct resource *res, void *arg)
2099{
2100 struct damon_system_ram_region *a = arg;
2101
2102 if (a->end - a->start < resource_size(res)) {
2103 a->start = res->start;
2104 a->end = res->end;
2105 }
2106 return 0;
2107}
2108
2109/*
2110 * Find biggest 'System RAM' resource and store its start and end address in
2111 * @start and @end, respectively. If no System RAM is found, returns false.
2112 */
2113static bool damon_find_biggest_system_ram(unsigned long *start,
2114 unsigned long *end)
2115
2116{
2117 struct damon_system_ram_region arg = {};
2118
2119 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2120 if (arg.end <= arg.start)
2121 return false;
2122
2123 *start = arg.start;
2124 *end = arg.end;
2125 return true;
2126}
2127
2128/**
2129 * damon_set_region_biggest_system_ram_default() - Set the region of the given
2130 * monitoring target as requested, or biggest 'System RAM'.
2131 * @t: The monitoring target to set the region.
2132 * @start: The pointer to the start address of the region.
2133 * @end: The pointer to the end address of the region.
2134 *
2135 * This function sets the region of @t as requested by @start and @end. If the
2136 * values of @start and @end are zero, however, this function finds the biggest
2137 * 'System RAM' resource and sets the region to cover the resource. In the
2138 * latter case, this function saves the start and end addresses of the resource
2139 * in @start and @end, respectively.
2140 *
2141 * Return: 0 on success, negative error code otherwise.
2142 */
2143int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2144 unsigned long *start, unsigned long *end)
2145{
2146 struct damon_addr_range addr_range;
2147
2148 if (*start > *end)
2149 return -EINVAL;
2150
2151 if (!*start && !*end &&
2152 !damon_find_biggest_system_ram(start, end))
2153 return -EINVAL;
2154
2155 addr_range.start = *start;
2156 addr_range.end = *end;
2157 return damon_set_regions(t, &addr_range, 1);
2158}
2159
2160/*
2161 * damon_moving_sum() - Calculate an inferred moving sum value.
2162 * @mvsum: Inferred sum of the last @len_window values.
2163 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
2164 * @len_window: The number of last values to take care of.
2165 * @new_value: New value that will be added to the pseudo moving sum.
2166 *
2167 * Moving sum (moving average * window size) is good for handling noise, but
2168 * the cost of keeping past values can be high for arbitrary window size. This
2169 * function implements a lightweight pseudo moving sum function that doesn't
2170 * keep the past window values.
2171 *
2172 * It simply assumes there was no noise in the past, and get the no-noise
2173 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
2174 * non-moving sum of the last window. For example, if @len_window is 10 and we
2175 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2176 * values. Hence, this function simply drops @nomvsum / @len_window from
2177 * given @mvsum and add @new_value.
2178 *
2179 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2180 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
2181 * calculating next moving sum with a new value, we should drop 0 from 50 and
2182 * add the new value. However, this function assumes it got value 5 for each
2183 * of the last ten times. Based on the assumption, when the next value is
2184 * measured, it drops the assumed past value, 5 from the current sum, and add
2185 * the new value to get the updated pseduo-moving average.
2186 *
2187 * This means the value could have errors, but the errors will be disappeared
2188 * for every @len_window aligned calls. For example, if @len_window is 10, the
2189 * pseudo moving sum with 11th value to 19th value would have an error. But
2190 * the sum with 20th value will not have the error.
2191 *
2192 * Return: Pseudo-moving average after getting the @new_value.
2193 */
2194static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2195 unsigned int len_window, unsigned int new_value)
2196{
2197 return mvsum - nomvsum / len_window + new_value;
2198}
2199
2200/**
2201 * damon_update_region_access_rate() - Update the access rate of a region.
2202 * @r: The DAMON region to update for its access check result.
2203 * @accessed: Whether the region has accessed during last sampling interval.
2204 * @attrs: The damon_attrs of the DAMON context.
2205 *
2206 * Update the access rate of a region with the region's last sampling interval
2207 * access check result.
2208 *
2209 * Usually this will be called by &damon_operations->check_accesses callback.
2210 */
2211void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2212 struct damon_attrs *attrs)
2213{
2214 unsigned int len_window = 1;
2215
2216 /*
2217 * sample_interval can be zero, but cannot be larger than
2218 * aggr_interval, owing to validation of damon_set_attrs().
2219 */
2220 if (attrs->sample_interval)
2221 len_window = damon_max_nr_accesses(attrs);
2222 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2223 r->last_nr_accesses * 10000, len_window,
2224 accessed ? 10000 : 0);
2225
2226 if (accessed)
2227 r->nr_accesses++;
2228}
2229
2230static int __init damon_init(void)
2231{
2232 damon_region_cache = KMEM_CACHE(damon_region, 0);
2233 if (unlikely(!damon_region_cache)) {
2234 pr_err("creating damon_region_cache fails\n");
2235 return -ENOMEM;
2236 }
2237
2238 return 0;
2239}
2240
2241subsys_initcall(damon_init);
2242
2243#include "tests/core-kunit.h"