Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon: " fmt
9
10#include <linux/damon.h>
11#include <linux/delay.h>
12#include <linux/kthread.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16
17#define CREATE_TRACE_POINTS
18#include <trace/events/damon.h>
19
20#ifdef CONFIG_DAMON_KUNIT_TEST
21#undef DAMON_MIN_REGION
22#define DAMON_MIN_REGION 1
23#endif
24
25static DEFINE_MUTEX(damon_lock);
26static int nr_running_ctxs;
27static bool running_exclusive_ctxs;
28
29static DEFINE_MUTEX(damon_ops_lock);
30static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
31
32static struct kmem_cache *damon_region_cache __ro_after_init;
33
34/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
35static bool __damon_is_registered_ops(enum damon_ops_id id)
36{
37 struct damon_operations empty_ops = {};
38
39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
40 return false;
41 return true;
42}
43
44/**
45 * damon_is_registered_ops() - Check if a given damon_operations is registered.
46 * @id: Id of the damon_operations to check if registered.
47 *
48 * Return: true if the ops is set, false otherwise.
49 */
50bool damon_is_registered_ops(enum damon_ops_id id)
51{
52 bool registered;
53
54 if (id >= NR_DAMON_OPS)
55 return false;
56 mutex_lock(&damon_ops_lock);
57 registered = __damon_is_registered_ops(id);
58 mutex_unlock(&damon_ops_lock);
59 return registered;
60}
61
62/**
63 * damon_register_ops() - Register a monitoring operations set to DAMON.
64 * @ops: monitoring operations set to register.
65 *
66 * This function registers a monitoring operations set of valid &struct
67 * damon_operations->id so that others can find and use them later.
68 *
69 * Return: 0 on success, negative error code otherwise.
70 */
71int damon_register_ops(struct damon_operations *ops)
72{
73 int err = 0;
74
75 if (ops->id >= NR_DAMON_OPS)
76 return -EINVAL;
77 mutex_lock(&damon_ops_lock);
78 /* Fail for already registered ops */
79 if (__damon_is_registered_ops(ops->id)) {
80 err = -EINVAL;
81 goto out;
82 }
83 damon_registered_ops[ops->id] = *ops;
84out:
85 mutex_unlock(&damon_ops_lock);
86 return err;
87}
88
89/**
90 * damon_select_ops() - Select a monitoring operations to use with the context.
91 * @ctx: monitoring context to use the operations.
92 * @id: id of the registered monitoring operations to select.
93 *
94 * This function finds registered monitoring operations set of @id and make
95 * @ctx to use it.
96 *
97 * Return: 0 on success, negative error code otherwise.
98 */
99int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
100{
101 int err = 0;
102
103 if (id >= NR_DAMON_OPS)
104 return -EINVAL;
105
106 mutex_lock(&damon_ops_lock);
107 if (!__damon_is_registered_ops(id))
108 err = -EINVAL;
109 else
110 ctx->ops = damon_registered_ops[id];
111 mutex_unlock(&damon_ops_lock);
112 return err;
113}
114
115/*
116 * Construct a damon_region struct
117 *
118 * Returns the pointer to the new struct if success, or NULL otherwise
119 */
120struct damon_region *damon_new_region(unsigned long start, unsigned long end)
121{
122 struct damon_region *region;
123
124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
125 if (!region)
126 return NULL;
127
128 region->ar.start = start;
129 region->ar.end = end;
130 region->nr_accesses = 0;
131 region->nr_accesses_bp = 0;
132 INIT_LIST_HEAD(®ion->list);
133
134 region->age = 0;
135 region->last_nr_accesses = 0;
136
137 return region;
138}
139
140void damon_add_region(struct damon_region *r, struct damon_target *t)
141{
142 list_add_tail(&r->list, &t->regions_list);
143 t->nr_regions++;
144}
145
146static void damon_del_region(struct damon_region *r, struct damon_target *t)
147{
148 list_del(&r->list);
149 t->nr_regions--;
150}
151
152static void damon_free_region(struct damon_region *r)
153{
154 kmem_cache_free(damon_region_cache, r);
155}
156
157void damon_destroy_region(struct damon_region *r, struct damon_target *t)
158{
159 damon_del_region(r, t);
160 damon_free_region(r);
161}
162
163/*
164 * Check whether a region is intersecting an address range
165 *
166 * Returns true if it is.
167 */
168static bool damon_intersect(struct damon_region *r,
169 struct damon_addr_range *re)
170{
171 return !(r->ar.end <= re->start || re->end <= r->ar.start);
172}
173
174/*
175 * Fill holes in regions with new regions.
176 */
177static int damon_fill_regions_holes(struct damon_region *first,
178 struct damon_region *last, struct damon_target *t)
179{
180 struct damon_region *r = first;
181
182 damon_for_each_region_from(r, t) {
183 struct damon_region *next, *newr;
184
185 if (r == last)
186 break;
187 next = damon_next_region(r);
188 if (r->ar.end != next->ar.start) {
189 newr = damon_new_region(r->ar.end, next->ar.start);
190 if (!newr)
191 return -ENOMEM;
192 damon_insert_region(newr, r, next, t);
193 }
194 }
195 return 0;
196}
197
198/*
199 * damon_set_regions() - Set regions of a target for given address ranges.
200 * @t: the given target.
201 * @ranges: array of new monitoring target ranges.
202 * @nr_ranges: length of @ranges.
203 *
204 * This function adds new regions to, or modify existing regions of a
205 * monitoring target to fit in specific ranges.
206 *
207 * Return: 0 if success, or negative error code otherwise.
208 */
209int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
210 unsigned int nr_ranges)
211{
212 struct damon_region *r, *next;
213 unsigned int i;
214 int err;
215
216 /* Remove regions which are not in the new ranges */
217 damon_for_each_region_safe(r, next, t) {
218 for (i = 0; i < nr_ranges; i++) {
219 if (damon_intersect(r, &ranges[i]))
220 break;
221 }
222 if (i == nr_ranges)
223 damon_destroy_region(r, t);
224 }
225
226 r = damon_first_region(t);
227 /* Add new regions or resize existing regions to fit in the ranges */
228 for (i = 0; i < nr_ranges; i++) {
229 struct damon_region *first = NULL, *last, *newr;
230 struct damon_addr_range *range;
231
232 range = &ranges[i];
233 /* Get the first/last regions intersecting with the range */
234 damon_for_each_region_from(r, t) {
235 if (damon_intersect(r, range)) {
236 if (!first)
237 first = r;
238 last = r;
239 }
240 if (r->ar.start >= range->end)
241 break;
242 }
243 if (!first) {
244 /* no region intersects with this range */
245 newr = damon_new_region(
246 ALIGN_DOWN(range->start,
247 DAMON_MIN_REGION),
248 ALIGN(range->end, DAMON_MIN_REGION));
249 if (!newr)
250 return -ENOMEM;
251 damon_insert_region(newr, damon_prev_region(r), r, t);
252 } else {
253 /* resize intersecting regions to fit in this range */
254 first->ar.start = ALIGN_DOWN(range->start,
255 DAMON_MIN_REGION);
256 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
257
258 /* fill possible holes in the range */
259 err = damon_fill_regions_holes(first, last, t);
260 if (err)
261 return err;
262 }
263 }
264 return 0;
265}
266
267struct damos_filter *damos_new_filter(enum damos_filter_type type,
268 bool matching)
269{
270 struct damos_filter *filter;
271
272 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
273 if (!filter)
274 return NULL;
275 filter->type = type;
276 filter->matching = matching;
277 INIT_LIST_HEAD(&filter->list);
278 return filter;
279}
280
281void damos_add_filter(struct damos *s, struct damos_filter *f)
282{
283 list_add_tail(&f->list, &s->filters);
284}
285
286static void damos_del_filter(struct damos_filter *f)
287{
288 list_del(&f->list);
289}
290
291static void damos_free_filter(struct damos_filter *f)
292{
293 kfree(f);
294}
295
296void damos_destroy_filter(struct damos_filter *f)
297{
298 damos_del_filter(f);
299 damos_free_filter(f);
300}
301
302/* initialize private fields of damos_quota and return the pointer */
303static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
304{
305 quota->total_charged_sz = 0;
306 quota->total_charged_ns = 0;
307 quota->esz = 0;
308 quota->charged_sz = 0;
309 quota->charged_from = 0;
310 quota->charge_target_from = NULL;
311 quota->charge_addr_from = 0;
312 return quota;
313}
314
315struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
316 enum damos_action action,
317 unsigned long apply_interval_us,
318 struct damos_quota *quota,
319 struct damos_watermarks *wmarks)
320{
321 struct damos *scheme;
322
323 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
324 if (!scheme)
325 return NULL;
326 scheme->pattern = *pattern;
327 scheme->action = action;
328 scheme->apply_interval_us = apply_interval_us;
329 /*
330 * next_apply_sis will be set when kdamond starts. While kdamond is
331 * running, it will also updated when it is added to the DAMON context,
332 * or damon_attrs are updated.
333 */
334 scheme->next_apply_sis = 0;
335 INIT_LIST_HEAD(&scheme->filters);
336 scheme->stat = (struct damos_stat){};
337 INIT_LIST_HEAD(&scheme->list);
338
339 scheme->quota = *(damos_quota_init_priv(quota));
340
341 scheme->wmarks = *wmarks;
342 scheme->wmarks.activated = true;
343
344 return scheme;
345}
346
347static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
348{
349 unsigned long sample_interval = ctx->attrs.sample_interval ?
350 ctx->attrs.sample_interval : 1;
351 unsigned long apply_interval = s->apply_interval_us ?
352 s->apply_interval_us : ctx->attrs.aggr_interval;
353
354 s->next_apply_sis = ctx->passed_sample_intervals +
355 apply_interval / sample_interval;
356}
357
358void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
359{
360 list_add_tail(&s->list, &ctx->schemes);
361 damos_set_next_apply_sis(s, ctx);
362}
363
364static void damon_del_scheme(struct damos *s)
365{
366 list_del(&s->list);
367}
368
369static void damon_free_scheme(struct damos *s)
370{
371 kfree(s);
372}
373
374void damon_destroy_scheme(struct damos *s)
375{
376 struct damos_filter *f, *next;
377
378 damos_for_each_filter_safe(f, next, s)
379 damos_destroy_filter(f);
380 damon_del_scheme(s);
381 damon_free_scheme(s);
382}
383
384/*
385 * Construct a damon_target struct
386 *
387 * Returns the pointer to the new struct if success, or NULL otherwise
388 */
389struct damon_target *damon_new_target(void)
390{
391 struct damon_target *t;
392
393 t = kmalloc(sizeof(*t), GFP_KERNEL);
394 if (!t)
395 return NULL;
396
397 t->pid = NULL;
398 t->nr_regions = 0;
399 INIT_LIST_HEAD(&t->regions_list);
400 INIT_LIST_HEAD(&t->list);
401
402 return t;
403}
404
405void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
406{
407 list_add_tail(&t->list, &ctx->adaptive_targets);
408}
409
410bool damon_targets_empty(struct damon_ctx *ctx)
411{
412 return list_empty(&ctx->adaptive_targets);
413}
414
415static void damon_del_target(struct damon_target *t)
416{
417 list_del(&t->list);
418}
419
420void damon_free_target(struct damon_target *t)
421{
422 struct damon_region *r, *next;
423
424 damon_for_each_region_safe(r, next, t)
425 damon_free_region(r);
426 kfree(t);
427}
428
429void damon_destroy_target(struct damon_target *t)
430{
431 damon_del_target(t);
432 damon_free_target(t);
433}
434
435unsigned int damon_nr_regions(struct damon_target *t)
436{
437 return t->nr_regions;
438}
439
440struct damon_ctx *damon_new_ctx(void)
441{
442 struct damon_ctx *ctx;
443
444 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
445 if (!ctx)
446 return NULL;
447
448 init_completion(&ctx->kdamond_started);
449
450 ctx->attrs.sample_interval = 5 * 1000;
451 ctx->attrs.aggr_interval = 100 * 1000;
452 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
453
454 ctx->passed_sample_intervals = 0;
455 /* These will be set from kdamond_init_intervals_sis() */
456 ctx->next_aggregation_sis = 0;
457 ctx->next_ops_update_sis = 0;
458
459 mutex_init(&ctx->kdamond_lock);
460
461 ctx->attrs.min_nr_regions = 10;
462 ctx->attrs.max_nr_regions = 1000;
463
464 INIT_LIST_HEAD(&ctx->adaptive_targets);
465 INIT_LIST_HEAD(&ctx->schemes);
466
467 return ctx;
468}
469
470static void damon_destroy_targets(struct damon_ctx *ctx)
471{
472 struct damon_target *t, *next_t;
473
474 if (ctx->ops.cleanup) {
475 ctx->ops.cleanup(ctx);
476 return;
477 }
478
479 damon_for_each_target_safe(t, next_t, ctx)
480 damon_destroy_target(t);
481}
482
483void damon_destroy_ctx(struct damon_ctx *ctx)
484{
485 struct damos *s, *next_s;
486
487 damon_destroy_targets(ctx);
488
489 damon_for_each_scheme_safe(s, next_s, ctx)
490 damon_destroy_scheme(s);
491
492 kfree(ctx);
493}
494
495static unsigned int damon_age_for_new_attrs(unsigned int age,
496 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
497{
498 return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
499}
500
501/* convert access ratio in bp (per 10,000) to nr_accesses */
502static unsigned int damon_accesses_bp_to_nr_accesses(
503 unsigned int accesses_bp, struct damon_attrs *attrs)
504{
505 return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
506}
507
508/* convert nr_accesses to access ratio in bp (per 10,000) */
509static unsigned int damon_nr_accesses_to_accesses_bp(
510 unsigned int nr_accesses, struct damon_attrs *attrs)
511{
512 return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
513}
514
515static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
516 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
517{
518 return damon_accesses_bp_to_nr_accesses(
519 damon_nr_accesses_to_accesses_bp(
520 nr_accesses, old_attrs),
521 new_attrs);
522}
523
524static void damon_update_monitoring_result(struct damon_region *r,
525 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
526{
527 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
528 old_attrs, new_attrs);
529 r->nr_accesses_bp = r->nr_accesses * 10000;
530 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
531}
532
533/*
534 * region->nr_accesses is the number of sampling intervals in the last
535 * aggregation interval that access to the region has found, and region->age is
536 * the number of aggregation intervals that its access pattern has maintained.
537 * For the reason, the real meaning of the two fields depend on current
538 * sampling interval and aggregation interval. This function updates
539 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
540 */
541static void damon_update_monitoring_results(struct damon_ctx *ctx,
542 struct damon_attrs *new_attrs)
543{
544 struct damon_attrs *old_attrs = &ctx->attrs;
545 struct damon_target *t;
546 struct damon_region *r;
547
548 /* if any interval is zero, simply forgive conversion */
549 if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
550 !new_attrs->sample_interval ||
551 !new_attrs->aggr_interval)
552 return;
553
554 damon_for_each_target(t, ctx)
555 damon_for_each_region(r, t)
556 damon_update_monitoring_result(
557 r, old_attrs, new_attrs);
558}
559
560/**
561 * damon_set_attrs() - Set attributes for the monitoring.
562 * @ctx: monitoring context
563 * @attrs: monitoring attributes
564 *
565 * This function should be called while the kdamond is not running, or an
566 * access check results aggregation is not ongoing (e.g., from
567 * &struct damon_callback->after_aggregation or
568 * &struct damon_callback->after_wmarks_check callbacks).
569 *
570 * Every time interval is in micro-seconds.
571 *
572 * Return: 0 on success, negative error code otherwise.
573 */
574int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
575{
576 unsigned long sample_interval = attrs->sample_interval ?
577 attrs->sample_interval : 1;
578 struct damos *s;
579
580 if (attrs->min_nr_regions < 3)
581 return -EINVAL;
582 if (attrs->min_nr_regions > attrs->max_nr_regions)
583 return -EINVAL;
584 if (attrs->sample_interval > attrs->aggr_interval)
585 return -EINVAL;
586
587 ctx->next_aggregation_sis = ctx->passed_sample_intervals +
588 attrs->aggr_interval / sample_interval;
589 ctx->next_ops_update_sis = ctx->passed_sample_intervals +
590 attrs->ops_update_interval / sample_interval;
591
592 damon_update_monitoring_results(ctx, attrs);
593 ctx->attrs = *attrs;
594
595 damon_for_each_scheme(s, ctx)
596 damos_set_next_apply_sis(s, ctx);
597
598 return 0;
599}
600
601/**
602 * damon_set_schemes() - Set data access monitoring based operation schemes.
603 * @ctx: monitoring context
604 * @schemes: array of the schemes
605 * @nr_schemes: number of entries in @schemes
606 *
607 * This function should not be called while the kdamond of the context is
608 * running.
609 */
610void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
611 ssize_t nr_schemes)
612{
613 struct damos *s, *next;
614 ssize_t i;
615
616 damon_for_each_scheme_safe(s, next, ctx)
617 damon_destroy_scheme(s);
618 for (i = 0; i < nr_schemes; i++)
619 damon_add_scheme(ctx, schemes[i]);
620}
621
622/**
623 * damon_nr_running_ctxs() - Return number of currently running contexts.
624 */
625int damon_nr_running_ctxs(void)
626{
627 int nr_ctxs;
628
629 mutex_lock(&damon_lock);
630 nr_ctxs = nr_running_ctxs;
631 mutex_unlock(&damon_lock);
632
633 return nr_ctxs;
634}
635
636/* Returns the size upper limit for each monitoring region */
637static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
638{
639 struct damon_target *t;
640 struct damon_region *r;
641 unsigned long sz = 0;
642
643 damon_for_each_target(t, ctx) {
644 damon_for_each_region(r, t)
645 sz += damon_sz_region(r);
646 }
647
648 if (ctx->attrs.min_nr_regions)
649 sz /= ctx->attrs.min_nr_regions;
650 if (sz < DAMON_MIN_REGION)
651 sz = DAMON_MIN_REGION;
652
653 return sz;
654}
655
656static int kdamond_fn(void *data);
657
658/*
659 * __damon_start() - Starts monitoring with given context.
660 * @ctx: monitoring context
661 *
662 * This function should be called while damon_lock is hold.
663 *
664 * Return: 0 on success, negative error code otherwise.
665 */
666static int __damon_start(struct damon_ctx *ctx)
667{
668 int err = -EBUSY;
669
670 mutex_lock(&ctx->kdamond_lock);
671 if (!ctx->kdamond) {
672 err = 0;
673 reinit_completion(&ctx->kdamond_started);
674 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
675 nr_running_ctxs);
676 if (IS_ERR(ctx->kdamond)) {
677 err = PTR_ERR(ctx->kdamond);
678 ctx->kdamond = NULL;
679 } else {
680 wait_for_completion(&ctx->kdamond_started);
681 }
682 }
683 mutex_unlock(&ctx->kdamond_lock);
684
685 return err;
686}
687
688/**
689 * damon_start() - Starts the monitorings for a given group of contexts.
690 * @ctxs: an array of the pointers for contexts to start monitoring
691 * @nr_ctxs: size of @ctxs
692 * @exclusive: exclusiveness of this contexts group
693 *
694 * This function starts a group of monitoring threads for a group of monitoring
695 * contexts. One thread per each context is created and run in parallel. The
696 * caller should handle synchronization between the threads by itself. If
697 * @exclusive is true and a group of threads that created by other
698 * 'damon_start()' call is currently running, this function does nothing but
699 * returns -EBUSY.
700 *
701 * Return: 0 on success, negative error code otherwise.
702 */
703int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
704{
705 int i;
706 int err = 0;
707
708 mutex_lock(&damon_lock);
709 if ((exclusive && nr_running_ctxs) ||
710 (!exclusive && running_exclusive_ctxs)) {
711 mutex_unlock(&damon_lock);
712 return -EBUSY;
713 }
714
715 for (i = 0; i < nr_ctxs; i++) {
716 err = __damon_start(ctxs[i]);
717 if (err)
718 break;
719 nr_running_ctxs++;
720 }
721 if (exclusive && nr_running_ctxs)
722 running_exclusive_ctxs = true;
723 mutex_unlock(&damon_lock);
724
725 return err;
726}
727
728/*
729 * __damon_stop() - Stops monitoring of a given context.
730 * @ctx: monitoring context
731 *
732 * Return: 0 on success, negative error code otherwise.
733 */
734static int __damon_stop(struct damon_ctx *ctx)
735{
736 struct task_struct *tsk;
737
738 mutex_lock(&ctx->kdamond_lock);
739 tsk = ctx->kdamond;
740 if (tsk) {
741 get_task_struct(tsk);
742 mutex_unlock(&ctx->kdamond_lock);
743 kthread_stop_put(tsk);
744 return 0;
745 }
746 mutex_unlock(&ctx->kdamond_lock);
747
748 return -EPERM;
749}
750
751/**
752 * damon_stop() - Stops the monitorings for a given group of contexts.
753 * @ctxs: an array of the pointers for contexts to stop monitoring
754 * @nr_ctxs: size of @ctxs
755 *
756 * Return: 0 on success, negative error code otherwise.
757 */
758int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
759{
760 int i, err = 0;
761
762 for (i = 0; i < nr_ctxs; i++) {
763 /* nr_running_ctxs is decremented in kdamond_fn */
764 err = __damon_stop(ctxs[i]);
765 if (err)
766 break;
767 }
768 return err;
769}
770
771/*
772 * Reset the aggregated monitoring results ('nr_accesses' of each region).
773 */
774static void kdamond_reset_aggregated(struct damon_ctx *c)
775{
776 struct damon_target *t;
777 unsigned int ti = 0; /* target's index */
778
779 damon_for_each_target(t, c) {
780 struct damon_region *r;
781
782 damon_for_each_region(r, t) {
783 trace_damon_aggregated(ti, r, damon_nr_regions(t));
784 r->last_nr_accesses = r->nr_accesses;
785 r->nr_accesses = 0;
786 }
787 ti++;
788 }
789}
790
791static void damon_split_region_at(struct damon_target *t,
792 struct damon_region *r, unsigned long sz_r);
793
794static bool __damos_valid_target(struct damon_region *r, struct damos *s)
795{
796 unsigned long sz;
797 unsigned int nr_accesses = r->nr_accesses_bp / 10000;
798
799 sz = damon_sz_region(r);
800 return s->pattern.min_sz_region <= sz &&
801 sz <= s->pattern.max_sz_region &&
802 s->pattern.min_nr_accesses <= nr_accesses &&
803 nr_accesses <= s->pattern.max_nr_accesses &&
804 s->pattern.min_age_region <= r->age &&
805 r->age <= s->pattern.max_age_region;
806}
807
808static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
809 struct damon_region *r, struct damos *s)
810{
811 bool ret = __damos_valid_target(r, s);
812
813 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
814 return ret;
815
816 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
817}
818
819/*
820 * damos_skip_charged_region() - Check if the given region or starting part of
821 * it is already charged for the DAMOS quota.
822 * @t: The target of the region.
823 * @rp: The pointer to the region.
824 * @s: The scheme to be applied.
825 *
826 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
827 * action would applied to only a part of the target access pattern fulfilling
828 * regions. To avoid applying the scheme action to only already applied
829 * regions, DAMON skips applying the scheme action to the regions that charged
830 * in the previous charge window.
831 *
832 * This function checks if a given region should be skipped or not for the
833 * reason. If only the starting part of the region has previously charged,
834 * this function splits the region into two so that the second one covers the
835 * area that not charged in the previous charge widnow and saves the second
836 * region in *rp and returns false, so that the caller can apply DAMON action
837 * to the second one.
838 *
839 * Return: true if the region should be entirely skipped, false otherwise.
840 */
841static bool damos_skip_charged_region(struct damon_target *t,
842 struct damon_region **rp, struct damos *s)
843{
844 struct damon_region *r = *rp;
845 struct damos_quota *quota = &s->quota;
846 unsigned long sz_to_skip;
847
848 /* Skip previously charged regions */
849 if (quota->charge_target_from) {
850 if (t != quota->charge_target_from)
851 return true;
852 if (r == damon_last_region(t)) {
853 quota->charge_target_from = NULL;
854 quota->charge_addr_from = 0;
855 return true;
856 }
857 if (quota->charge_addr_from &&
858 r->ar.end <= quota->charge_addr_from)
859 return true;
860
861 if (quota->charge_addr_from && r->ar.start <
862 quota->charge_addr_from) {
863 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
864 r->ar.start, DAMON_MIN_REGION);
865 if (!sz_to_skip) {
866 if (damon_sz_region(r) <= DAMON_MIN_REGION)
867 return true;
868 sz_to_skip = DAMON_MIN_REGION;
869 }
870 damon_split_region_at(t, r, sz_to_skip);
871 r = damon_next_region(r);
872 *rp = r;
873 }
874 quota->charge_target_from = NULL;
875 quota->charge_addr_from = 0;
876 }
877 return false;
878}
879
880static void damos_update_stat(struct damos *s,
881 unsigned long sz_tried, unsigned long sz_applied)
882{
883 s->stat.nr_tried++;
884 s->stat.sz_tried += sz_tried;
885 if (sz_applied)
886 s->stat.nr_applied++;
887 s->stat.sz_applied += sz_applied;
888}
889
890static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
891 struct damon_region *r, struct damos_filter *filter)
892{
893 bool matched = false;
894 struct damon_target *ti;
895 int target_idx = 0;
896 unsigned long start, end;
897
898 switch (filter->type) {
899 case DAMOS_FILTER_TYPE_TARGET:
900 damon_for_each_target(ti, ctx) {
901 if (ti == t)
902 break;
903 target_idx++;
904 }
905 matched = target_idx == filter->target_idx;
906 break;
907 case DAMOS_FILTER_TYPE_ADDR:
908 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
909 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
910
911 /* inside the range */
912 if (start <= r->ar.start && r->ar.end <= end) {
913 matched = true;
914 break;
915 }
916 /* outside of the range */
917 if (r->ar.end <= start || end <= r->ar.start) {
918 matched = false;
919 break;
920 }
921 /* start before the range and overlap */
922 if (r->ar.start < start) {
923 damon_split_region_at(t, r, start - r->ar.start);
924 matched = false;
925 break;
926 }
927 /* start inside the range */
928 damon_split_region_at(t, r, end - r->ar.start);
929 matched = true;
930 break;
931 default:
932 return false;
933 }
934
935 return matched == filter->matching;
936}
937
938static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
939 struct damon_region *r, struct damos *s)
940{
941 struct damos_filter *filter;
942
943 damos_for_each_filter(filter, s) {
944 if (__damos_filter_out(ctx, t, r, filter))
945 return true;
946 }
947 return false;
948}
949
950static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
951 struct damon_region *r, struct damos *s)
952{
953 struct damos_quota *quota = &s->quota;
954 unsigned long sz = damon_sz_region(r);
955 struct timespec64 begin, end;
956 unsigned long sz_applied = 0;
957 int err = 0;
958 /*
959 * We plan to support multiple context per kdamond, as DAMON sysfs
960 * implies with 'nr_contexts' file. Nevertheless, only single context
961 * per kdamond is supported for now. So, we can simply use '0' context
962 * index here.
963 */
964 unsigned int cidx = 0;
965 struct damos *siter; /* schemes iterator */
966 unsigned int sidx = 0;
967 struct damon_target *titer; /* targets iterator */
968 unsigned int tidx = 0;
969 bool do_trace = false;
970
971 /* get indices for trace_damos_before_apply() */
972 if (trace_damos_before_apply_enabled()) {
973 damon_for_each_scheme(siter, c) {
974 if (siter == s)
975 break;
976 sidx++;
977 }
978 damon_for_each_target(titer, c) {
979 if (titer == t)
980 break;
981 tidx++;
982 }
983 do_trace = true;
984 }
985
986 if (c->ops.apply_scheme) {
987 if (quota->esz && quota->charged_sz + sz > quota->esz) {
988 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
989 DAMON_MIN_REGION);
990 if (!sz)
991 goto update_stat;
992 damon_split_region_at(t, r, sz);
993 }
994 if (damos_filter_out(c, t, r, s))
995 return;
996 ktime_get_coarse_ts64(&begin);
997 if (c->callback.before_damos_apply)
998 err = c->callback.before_damos_apply(c, t, r, s);
999 if (!err) {
1000 trace_damos_before_apply(cidx, sidx, tidx, r,
1001 damon_nr_regions(t), do_trace);
1002 sz_applied = c->ops.apply_scheme(c, t, r, s);
1003 }
1004 ktime_get_coarse_ts64(&end);
1005 quota->total_charged_ns += timespec64_to_ns(&end) -
1006 timespec64_to_ns(&begin);
1007 quota->charged_sz += sz;
1008 if (quota->esz && quota->charged_sz >= quota->esz) {
1009 quota->charge_target_from = t;
1010 quota->charge_addr_from = r->ar.end + 1;
1011 }
1012 }
1013 if (s->action != DAMOS_STAT)
1014 r->age = 0;
1015
1016update_stat:
1017 damos_update_stat(s, sz, sz_applied);
1018}
1019
1020static void damon_do_apply_schemes(struct damon_ctx *c,
1021 struct damon_target *t,
1022 struct damon_region *r)
1023{
1024 struct damos *s;
1025
1026 damon_for_each_scheme(s, c) {
1027 struct damos_quota *quota = &s->quota;
1028
1029 if (c->passed_sample_intervals != s->next_apply_sis)
1030 continue;
1031
1032 if (!s->wmarks.activated)
1033 continue;
1034
1035 /* Check the quota */
1036 if (quota->esz && quota->charged_sz >= quota->esz)
1037 continue;
1038
1039 if (damos_skip_charged_region(t, &r, s))
1040 continue;
1041
1042 if (!damos_valid_target(c, t, r, s))
1043 continue;
1044
1045 damos_apply_scheme(c, t, r, s);
1046 }
1047}
1048
1049/*
1050 * damon_feed_loop_next_input() - get next input to achieve a target score.
1051 * @last_input The last input.
1052 * @score Current score that made with @last_input.
1053 *
1054 * Calculate next input to achieve the target score, based on the last input
1055 * and current score. Assuming the input and the score are positively
1056 * proportional, calculate how much compensation should be added to or
1057 * subtracted from the last input as a proportion of the last input. Avoid
1058 * next input always being zero by setting it non-zero always. In short form
1059 * (assuming support of float and signed calculations), the algorithm is as
1060 * below.
1061 *
1062 * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1063 *
1064 * For simple implementation, we assume the target score is always 10,000. The
1065 * caller should adjust @score for this.
1066 *
1067 * Returns next input that assumed to achieve the target score.
1068 */
1069static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1070 unsigned long score)
1071{
1072 const unsigned long goal = 10000;
1073 unsigned long score_goal_diff = max(goal, score) - min(goal, score);
1074 unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal;
1075 unsigned long compensation = last_input * score_goal_diff_bp / 10000;
1076 /* Set minimum input as 10000 to avoid compensation be zero */
1077 const unsigned long min_input = 10000;
1078
1079 if (goal > score)
1080 return last_input + compensation;
1081 if (last_input > compensation + min_input)
1082 return last_input - compensation;
1083 return min_input;
1084}
1085
1086/* Shouldn't be called if quota->ms, quota->sz, and quota->get_score unset */
1087static void damos_set_effective_quota(struct damos_quota *quota)
1088{
1089 unsigned long throughput;
1090 unsigned long esz;
1091
1092 if (!quota->ms && !quota->get_score) {
1093 quota->esz = quota->sz;
1094 return;
1095 }
1096
1097 if (quota->get_score) {
1098 quota->esz_bp = damon_feed_loop_next_input(
1099 max(quota->esz_bp, 10000UL),
1100 quota->get_score(quota->get_score_arg));
1101 esz = quota->esz_bp / 10000;
1102 }
1103
1104 if (quota->ms) {
1105 if (quota->total_charged_ns)
1106 throughput = quota->total_charged_sz * 1000000 /
1107 quota->total_charged_ns;
1108 else
1109 throughput = PAGE_SIZE * 1024;
1110 if (quota->get_score)
1111 esz = min(throughput * quota->ms, esz);
1112 else
1113 esz = throughput * quota->ms;
1114 }
1115
1116 if (quota->sz && quota->sz < esz)
1117 esz = quota->sz;
1118
1119 quota->esz = esz;
1120}
1121
1122static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1123{
1124 struct damos_quota *quota = &s->quota;
1125 struct damon_target *t;
1126 struct damon_region *r;
1127 unsigned long cumulated_sz;
1128 unsigned int score, max_score = 0;
1129
1130 if (!quota->ms && !quota->sz && !quota->get_score)
1131 return;
1132
1133 /* New charge window starts */
1134 if (time_after_eq(jiffies, quota->charged_from +
1135 msecs_to_jiffies(quota->reset_interval))) {
1136 if (quota->esz && quota->charged_sz >= quota->esz)
1137 s->stat.qt_exceeds++;
1138 quota->total_charged_sz += quota->charged_sz;
1139 quota->charged_from = jiffies;
1140 quota->charged_sz = 0;
1141 damos_set_effective_quota(quota);
1142 }
1143
1144 if (!c->ops.get_scheme_score)
1145 return;
1146
1147 /* Fill up the score histogram */
1148 memset(quota->histogram, 0, sizeof(quota->histogram));
1149 damon_for_each_target(t, c) {
1150 damon_for_each_region(r, t) {
1151 if (!__damos_valid_target(r, s))
1152 continue;
1153 score = c->ops.get_scheme_score(c, t, r, s);
1154 quota->histogram[score] += damon_sz_region(r);
1155 if (score > max_score)
1156 max_score = score;
1157 }
1158 }
1159
1160 /* Set the min score limit */
1161 for (cumulated_sz = 0, score = max_score; ; score--) {
1162 cumulated_sz += quota->histogram[score];
1163 if (cumulated_sz >= quota->esz || !score)
1164 break;
1165 }
1166 quota->min_score = score;
1167}
1168
1169static void kdamond_apply_schemes(struct damon_ctx *c)
1170{
1171 struct damon_target *t;
1172 struct damon_region *r, *next_r;
1173 struct damos *s;
1174 unsigned long sample_interval = c->attrs.sample_interval ?
1175 c->attrs.sample_interval : 1;
1176 bool has_schemes_to_apply = false;
1177
1178 damon_for_each_scheme(s, c) {
1179 if (c->passed_sample_intervals != s->next_apply_sis)
1180 continue;
1181
1182 if (!s->wmarks.activated)
1183 continue;
1184
1185 has_schemes_to_apply = true;
1186
1187 damos_adjust_quota(c, s);
1188 }
1189
1190 if (!has_schemes_to_apply)
1191 return;
1192
1193 damon_for_each_target(t, c) {
1194 damon_for_each_region_safe(r, next_r, t)
1195 damon_do_apply_schemes(c, t, r);
1196 }
1197
1198 damon_for_each_scheme(s, c) {
1199 if (c->passed_sample_intervals != s->next_apply_sis)
1200 continue;
1201 s->next_apply_sis +=
1202 (s->apply_interval_us ? s->apply_interval_us :
1203 c->attrs.aggr_interval) / sample_interval;
1204 }
1205}
1206
1207/*
1208 * Merge two adjacent regions into one region
1209 */
1210static void damon_merge_two_regions(struct damon_target *t,
1211 struct damon_region *l, struct damon_region *r)
1212{
1213 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1214
1215 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1216 (sz_l + sz_r);
1217 l->nr_accesses_bp = l->nr_accesses * 10000;
1218 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1219 l->ar.end = r->ar.end;
1220 damon_destroy_region(r, t);
1221}
1222
1223/*
1224 * Merge adjacent regions having similar access frequencies
1225 *
1226 * t target affected by this merge operation
1227 * thres '->nr_accesses' diff threshold for the merge
1228 * sz_limit size upper limit of each region
1229 */
1230static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1231 unsigned long sz_limit)
1232{
1233 struct damon_region *r, *prev = NULL, *next;
1234
1235 damon_for_each_region_safe(r, next, t) {
1236 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1237 r->age = 0;
1238 else
1239 r->age++;
1240
1241 if (prev && prev->ar.end == r->ar.start &&
1242 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1243 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1244 damon_merge_two_regions(t, prev, r);
1245 else
1246 prev = r;
1247 }
1248}
1249
1250/*
1251 * Merge adjacent regions having similar access frequencies
1252 *
1253 * threshold '->nr_accesses' diff threshold for the merge
1254 * sz_limit size upper limit of each region
1255 *
1256 * This function merges monitoring target regions which are adjacent and their
1257 * access frequencies are similar. This is for minimizing the monitoring
1258 * overhead under the dynamically changeable access pattern. If a merge was
1259 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1260 */
1261static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1262 unsigned long sz_limit)
1263{
1264 struct damon_target *t;
1265
1266 damon_for_each_target(t, c)
1267 damon_merge_regions_of(t, threshold, sz_limit);
1268}
1269
1270/*
1271 * Split a region in two
1272 *
1273 * r the region to be split
1274 * sz_r size of the first sub-region that will be made
1275 */
1276static void damon_split_region_at(struct damon_target *t,
1277 struct damon_region *r, unsigned long sz_r)
1278{
1279 struct damon_region *new;
1280
1281 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1282 if (!new)
1283 return;
1284
1285 r->ar.end = new->ar.start;
1286
1287 new->age = r->age;
1288 new->last_nr_accesses = r->last_nr_accesses;
1289 new->nr_accesses_bp = r->nr_accesses_bp;
1290 new->nr_accesses = r->nr_accesses;
1291
1292 damon_insert_region(new, r, damon_next_region(r), t);
1293}
1294
1295/* Split every region in the given target into 'nr_subs' regions */
1296static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1297{
1298 struct damon_region *r, *next;
1299 unsigned long sz_region, sz_sub = 0;
1300 int i;
1301
1302 damon_for_each_region_safe(r, next, t) {
1303 sz_region = damon_sz_region(r);
1304
1305 for (i = 0; i < nr_subs - 1 &&
1306 sz_region > 2 * DAMON_MIN_REGION; i++) {
1307 /*
1308 * Randomly select size of left sub-region to be at
1309 * least 10 percent and at most 90% of original region
1310 */
1311 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1312 sz_region / 10, DAMON_MIN_REGION);
1313 /* Do not allow blank region */
1314 if (sz_sub == 0 || sz_sub >= sz_region)
1315 continue;
1316
1317 damon_split_region_at(t, r, sz_sub);
1318 sz_region = sz_sub;
1319 }
1320 }
1321}
1322
1323/*
1324 * Split every target region into randomly-sized small regions
1325 *
1326 * This function splits every target region into random-sized small regions if
1327 * current total number of the regions is equal or smaller than half of the
1328 * user-specified maximum number of regions. This is for maximizing the
1329 * monitoring accuracy under the dynamically changeable access patterns. If a
1330 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1331 * it.
1332 */
1333static void kdamond_split_regions(struct damon_ctx *ctx)
1334{
1335 struct damon_target *t;
1336 unsigned int nr_regions = 0;
1337 static unsigned int last_nr_regions;
1338 int nr_subregions = 2;
1339
1340 damon_for_each_target(t, ctx)
1341 nr_regions += damon_nr_regions(t);
1342
1343 if (nr_regions > ctx->attrs.max_nr_regions / 2)
1344 return;
1345
1346 /* Maybe the middle of the region has different access frequency */
1347 if (last_nr_regions == nr_regions &&
1348 nr_regions < ctx->attrs.max_nr_regions / 3)
1349 nr_subregions = 3;
1350
1351 damon_for_each_target(t, ctx)
1352 damon_split_regions_of(t, nr_subregions);
1353
1354 last_nr_regions = nr_regions;
1355}
1356
1357/*
1358 * Check whether current monitoring should be stopped
1359 *
1360 * The monitoring is stopped when either the user requested to stop, or all
1361 * monitoring targets are invalid.
1362 *
1363 * Returns true if need to stop current monitoring.
1364 */
1365static bool kdamond_need_stop(struct damon_ctx *ctx)
1366{
1367 struct damon_target *t;
1368
1369 if (kthread_should_stop())
1370 return true;
1371
1372 if (!ctx->ops.target_valid)
1373 return false;
1374
1375 damon_for_each_target(t, ctx) {
1376 if (ctx->ops.target_valid(t))
1377 return false;
1378 }
1379
1380 return true;
1381}
1382
1383static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
1384{
1385 switch (metric) {
1386 case DAMOS_WMARK_FREE_MEM_RATE:
1387 return global_zone_page_state(NR_FREE_PAGES) * 1000 /
1388 totalram_pages();
1389 default:
1390 break;
1391 }
1392 return -EINVAL;
1393}
1394
1395/*
1396 * Returns zero if the scheme is active. Else, returns time to wait for next
1397 * watermark check in micro-seconds.
1398 */
1399static unsigned long damos_wmark_wait_us(struct damos *scheme)
1400{
1401 unsigned long metric;
1402
1403 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
1404 return 0;
1405
1406 metric = damos_wmark_metric_value(scheme->wmarks.metric);
1407 /* higher than high watermark or lower than low watermark */
1408 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1409 if (scheme->wmarks.activated)
1410 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1411 scheme->action,
1412 metric > scheme->wmarks.high ?
1413 "high" : "low");
1414 scheme->wmarks.activated = false;
1415 return scheme->wmarks.interval;
1416 }
1417
1418 /* inactive and higher than middle watermark */
1419 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1420 !scheme->wmarks.activated)
1421 return scheme->wmarks.interval;
1422
1423 if (!scheme->wmarks.activated)
1424 pr_debug("activate a scheme (%d)\n", scheme->action);
1425 scheme->wmarks.activated = true;
1426 return 0;
1427}
1428
1429static void kdamond_usleep(unsigned long usecs)
1430{
1431 /* See Documentation/timers/timers-howto.rst for the thresholds */
1432 if (usecs > 20 * USEC_PER_MSEC)
1433 schedule_timeout_idle(usecs_to_jiffies(usecs));
1434 else
1435 usleep_idle_range(usecs, usecs + 1);
1436}
1437
1438/* Returns negative error code if it's not activated but should return */
1439static int kdamond_wait_activation(struct damon_ctx *ctx)
1440{
1441 struct damos *s;
1442 unsigned long wait_time;
1443 unsigned long min_wait_time = 0;
1444 bool init_wait_time = false;
1445
1446 while (!kdamond_need_stop(ctx)) {
1447 damon_for_each_scheme(s, ctx) {
1448 wait_time = damos_wmark_wait_us(s);
1449 if (!init_wait_time || wait_time < min_wait_time) {
1450 init_wait_time = true;
1451 min_wait_time = wait_time;
1452 }
1453 }
1454 if (!min_wait_time)
1455 return 0;
1456
1457 kdamond_usleep(min_wait_time);
1458
1459 if (ctx->callback.after_wmarks_check &&
1460 ctx->callback.after_wmarks_check(ctx))
1461 break;
1462 }
1463 return -EBUSY;
1464}
1465
1466static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
1467{
1468 unsigned long sample_interval = ctx->attrs.sample_interval ?
1469 ctx->attrs.sample_interval : 1;
1470 unsigned long apply_interval;
1471 struct damos *scheme;
1472
1473 ctx->passed_sample_intervals = 0;
1474 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
1475 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
1476 sample_interval;
1477
1478 damon_for_each_scheme(scheme, ctx) {
1479 apply_interval = scheme->apply_interval_us ?
1480 scheme->apply_interval_us : ctx->attrs.aggr_interval;
1481 scheme->next_apply_sis = apply_interval / sample_interval;
1482 }
1483}
1484
1485/*
1486 * The monitoring daemon that runs as a kernel thread
1487 */
1488static int kdamond_fn(void *data)
1489{
1490 struct damon_ctx *ctx = data;
1491 struct damon_target *t;
1492 struct damon_region *r, *next;
1493 unsigned int max_nr_accesses = 0;
1494 unsigned long sz_limit = 0;
1495
1496 pr_debug("kdamond (%d) starts\n", current->pid);
1497
1498 complete(&ctx->kdamond_started);
1499 kdamond_init_intervals_sis(ctx);
1500
1501 if (ctx->ops.init)
1502 ctx->ops.init(ctx);
1503 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1504 goto done;
1505
1506 sz_limit = damon_region_sz_limit(ctx);
1507
1508 while (!kdamond_need_stop(ctx)) {
1509 /*
1510 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
1511 * be changed from after_wmarks_check() or after_aggregation()
1512 * callbacks. Read the values here, and use those for this
1513 * iteration. That is, damon_set_attrs() updated new values
1514 * are respected from next iteration.
1515 */
1516 unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
1517 unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
1518 unsigned long sample_interval = ctx->attrs.sample_interval;
1519
1520 if (kdamond_wait_activation(ctx))
1521 break;
1522
1523 if (ctx->ops.prepare_access_checks)
1524 ctx->ops.prepare_access_checks(ctx);
1525 if (ctx->callback.after_sampling &&
1526 ctx->callback.after_sampling(ctx))
1527 break;
1528
1529 kdamond_usleep(sample_interval);
1530 ctx->passed_sample_intervals++;
1531
1532 if (ctx->ops.check_accesses)
1533 max_nr_accesses = ctx->ops.check_accesses(ctx);
1534
1535 if (ctx->passed_sample_intervals == next_aggregation_sis) {
1536 kdamond_merge_regions(ctx,
1537 max_nr_accesses / 10,
1538 sz_limit);
1539 if (ctx->callback.after_aggregation &&
1540 ctx->callback.after_aggregation(ctx))
1541 break;
1542 }
1543
1544 /*
1545 * do kdamond_apply_schemes() after kdamond_merge_regions() if
1546 * possible, to reduce overhead
1547 */
1548 if (!list_empty(&ctx->schemes))
1549 kdamond_apply_schemes(ctx);
1550
1551 sample_interval = ctx->attrs.sample_interval ?
1552 ctx->attrs.sample_interval : 1;
1553 if (ctx->passed_sample_intervals == next_aggregation_sis) {
1554 ctx->next_aggregation_sis = next_aggregation_sis +
1555 ctx->attrs.aggr_interval / sample_interval;
1556
1557 kdamond_reset_aggregated(ctx);
1558 kdamond_split_regions(ctx);
1559 if (ctx->ops.reset_aggregated)
1560 ctx->ops.reset_aggregated(ctx);
1561 }
1562
1563 if (ctx->passed_sample_intervals == next_ops_update_sis) {
1564 ctx->next_ops_update_sis = next_ops_update_sis +
1565 ctx->attrs.ops_update_interval /
1566 sample_interval;
1567 if (ctx->ops.update)
1568 ctx->ops.update(ctx);
1569 sz_limit = damon_region_sz_limit(ctx);
1570 }
1571 }
1572done:
1573 damon_for_each_target(t, ctx) {
1574 damon_for_each_region_safe(r, next, t)
1575 damon_destroy_region(r, t);
1576 }
1577
1578 if (ctx->callback.before_terminate)
1579 ctx->callback.before_terminate(ctx);
1580 if (ctx->ops.cleanup)
1581 ctx->ops.cleanup(ctx);
1582
1583 pr_debug("kdamond (%d) finishes\n", current->pid);
1584 mutex_lock(&ctx->kdamond_lock);
1585 ctx->kdamond = NULL;
1586 mutex_unlock(&ctx->kdamond_lock);
1587
1588 mutex_lock(&damon_lock);
1589 nr_running_ctxs--;
1590 if (!nr_running_ctxs && running_exclusive_ctxs)
1591 running_exclusive_ctxs = false;
1592 mutex_unlock(&damon_lock);
1593
1594 return 0;
1595}
1596
1597/*
1598 * struct damon_system_ram_region - System RAM resource address region of
1599 * [@start, @end).
1600 * @start: Start address of the region (inclusive).
1601 * @end: End address of the region (exclusive).
1602 */
1603struct damon_system_ram_region {
1604 unsigned long start;
1605 unsigned long end;
1606};
1607
1608static int walk_system_ram(struct resource *res, void *arg)
1609{
1610 struct damon_system_ram_region *a = arg;
1611
1612 if (a->end - a->start < resource_size(res)) {
1613 a->start = res->start;
1614 a->end = res->end;
1615 }
1616 return 0;
1617}
1618
1619/*
1620 * Find biggest 'System RAM' resource and store its start and end address in
1621 * @start and @end, respectively. If no System RAM is found, returns false.
1622 */
1623static bool damon_find_biggest_system_ram(unsigned long *start,
1624 unsigned long *end)
1625
1626{
1627 struct damon_system_ram_region arg = {};
1628
1629 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
1630 if (arg.end <= arg.start)
1631 return false;
1632
1633 *start = arg.start;
1634 *end = arg.end;
1635 return true;
1636}
1637
1638/**
1639 * damon_set_region_biggest_system_ram_default() - Set the region of the given
1640 * monitoring target as requested, or biggest 'System RAM'.
1641 * @t: The monitoring target to set the region.
1642 * @start: The pointer to the start address of the region.
1643 * @end: The pointer to the end address of the region.
1644 *
1645 * This function sets the region of @t as requested by @start and @end. If the
1646 * values of @start and @end are zero, however, this function finds the biggest
1647 * 'System RAM' resource and sets the region to cover the resource. In the
1648 * latter case, this function saves the start and end addresses of the resource
1649 * in @start and @end, respectively.
1650 *
1651 * Return: 0 on success, negative error code otherwise.
1652 */
1653int damon_set_region_biggest_system_ram_default(struct damon_target *t,
1654 unsigned long *start, unsigned long *end)
1655{
1656 struct damon_addr_range addr_range;
1657
1658 if (*start > *end)
1659 return -EINVAL;
1660
1661 if (!*start && !*end &&
1662 !damon_find_biggest_system_ram(start, end))
1663 return -EINVAL;
1664
1665 addr_range.start = *start;
1666 addr_range.end = *end;
1667 return damon_set_regions(t, &addr_range, 1);
1668}
1669
1670/*
1671 * damon_moving_sum() - Calculate an inferred moving sum value.
1672 * @mvsum: Inferred sum of the last @len_window values.
1673 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
1674 * @len_window: The number of last values to take care of.
1675 * @new_value: New value that will be added to the pseudo moving sum.
1676 *
1677 * Moving sum (moving average * window size) is good for handling noise, but
1678 * the cost of keeping past values can be high for arbitrary window size. This
1679 * function implements a lightweight pseudo moving sum function that doesn't
1680 * keep the past window values.
1681 *
1682 * It simply assumes there was no noise in the past, and get the no-noise
1683 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
1684 * non-moving sum of the last window. For example, if @len_window is 10 and we
1685 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
1686 * values. Hence, this function simply drops @nomvsum / @len_window from
1687 * given @mvsum and add @new_value.
1688 *
1689 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
1690 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
1691 * calculating next moving sum with a new value, we should drop 0 from 50 and
1692 * add the new value. However, this function assumes it got value 5 for each
1693 * of the last ten times. Based on the assumption, when the next value is
1694 * measured, it drops the assumed past value, 5 from the current sum, and add
1695 * the new value to get the updated pseduo-moving average.
1696 *
1697 * This means the value could have errors, but the errors will be disappeared
1698 * for every @len_window aligned calls. For example, if @len_window is 10, the
1699 * pseudo moving sum with 11th value to 19th value would have an error. But
1700 * the sum with 20th value will not have the error.
1701 *
1702 * Return: Pseudo-moving average after getting the @new_value.
1703 */
1704static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
1705 unsigned int len_window, unsigned int new_value)
1706{
1707 return mvsum - nomvsum / len_window + new_value;
1708}
1709
1710/**
1711 * damon_update_region_access_rate() - Update the access rate of a region.
1712 * @r: The DAMON region to update for its access check result.
1713 * @accessed: Whether the region has accessed during last sampling interval.
1714 * @attrs: The damon_attrs of the DAMON context.
1715 *
1716 * Update the access rate of a region with the region's last sampling interval
1717 * access check result.
1718 *
1719 * Usually this will be called by &damon_operations->check_accesses callback.
1720 */
1721void damon_update_region_access_rate(struct damon_region *r, bool accessed,
1722 struct damon_attrs *attrs)
1723{
1724 unsigned int len_window = 1;
1725
1726 /*
1727 * sample_interval can be zero, but cannot be larger than
1728 * aggr_interval, owing to validation of damon_set_attrs().
1729 */
1730 if (attrs->sample_interval)
1731 len_window = damon_max_nr_accesses(attrs);
1732 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
1733 r->last_nr_accesses * 10000, len_window,
1734 accessed ? 10000 : 0);
1735
1736 if (accessed)
1737 r->nr_accesses++;
1738}
1739
1740static int __init damon_init(void)
1741{
1742 damon_region_cache = KMEM_CACHE(damon_region, 0);
1743 if (unlikely(!damon_region_cache)) {
1744 pr_err("creating damon_region_cache fails\n");
1745 return -ENOMEM;
1746 }
1747
1748 return 0;
1749}
1750
1751subsys_initcall(damon_init);
1752
1753#include "core-test.h"
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon: " fmt
9
10#include <linux/damon.h>
11#include <linux/delay.h>
12#include <linux/kthread.h>
13#include <linux/mm.h>
14#include <linux/psi.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17
18#define CREATE_TRACE_POINTS
19#include <trace/events/damon.h>
20
21#ifdef CONFIG_DAMON_KUNIT_TEST
22#undef DAMON_MIN_REGION
23#define DAMON_MIN_REGION 1
24#endif
25
26static DEFINE_MUTEX(damon_lock);
27static int nr_running_ctxs;
28static bool running_exclusive_ctxs;
29
30static DEFINE_MUTEX(damon_ops_lock);
31static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
32
33static struct kmem_cache *damon_region_cache __ro_after_init;
34
35/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
36static bool __damon_is_registered_ops(enum damon_ops_id id)
37{
38 struct damon_operations empty_ops = {};
39
40 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
41 return false;
42 return true;
43}
44
45/**
46 * damon_is_registered_ops() - Check if a given damon_operations is registered.
47 * @id: Id of the damon_operations to check if registered.
48 *
49 * Return: true if the ops is set, false otherwise.
50 */
51bool damon_is_registered_ops(enum damon_ops_id id)
52{
53 bool registered;
54
55 if (id >= NR_DAMON_OPS)
56 return false;
57 mutex_lock(&damon_ops_lock);
58 registered = __damon_is_registered_ops(id);
59 mutex_unlock(&damon_ops_lock);
60 return registered;
61}
62
63/**
64 * damon_register_ops() - Register a monitoring operations set to DAMON.
65 * @ops: monitoring operations set to register.
66 *
67 * This function registers a monitoring operations set of valid &struct
68 * damon_operations->id so that others can find and use them later.
69 *
70 * Return: 0 on success, negative error code otherwise.
71 */
72int damon_register_ops(struct damon_operations *ops)
73{
74 int err = 0;
75
76 if (ops->id >= NR_DAMON_OPS)
77 return -EINVAL;
78 mutex_lock(&damon_ops_lock);
79 /* Fail for already registered ops */
80 if (__damon_is_registered_ops(ops->id)) {
81 err = -EINVAL;
82 goto out;
83 }
84 damon_registered_ops[ops->id] = *ops;
85out:
86 mutex_unlock(&damon_ops_lock);
87 return err;
88}
89
90/**
91 * damon_select_ops() - Select a monitoring operations to use with the context.
92 * @ctx: monitoring context to use the operations.
93 * @id: id of the registered monitoring operations to select.
94 *
95 * This function finds registered monitoring operations set of @id and make
96 * @ctx to use it.
97 *
98 * Return: 0 on success, negative error code otherwise.
99 */
100int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101{
102 int err = 0;
103
104 if (id >= NR_DAMON_OPS)
105 return -EINVAL;
106
107 mutex_lock(&damon_ops_lock);
108 if (!__damon_is_registered_ops(id))
109 err = -EINVAL;
110 else
111 ctx->ops = damon_registered_ops[id];
112 mutex_unlock(&damon_ops_lock);
113 return err;
114}
115
116/*
117 * Construct a damon_region struct
118 *
119 * Returns the pointer to the new struct if success, or NULL otherwise
120 */
121struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122{
123 struct damon_region *region;
124
125 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 if (!region)
127 return NULL;
128
129 region->ar.start = start;
130 region->ar.end = end;
131 region->nr_accesses = 0;
132 region->nr_accesses_bp = 0;
133 INIT_LIST_HEAD(®ion->list);
134
135 region->age = 0;
136 region->last_nr_accesses = 0;
137
138 return region;
139}
140
141void damon_add_region(struct damon_region *r, struct damon_target *t)
142{
143 list_add_tail(&r->list, &t->regions_list);
144 t->nr_regions++;
145}
146
147static void damon_del_region(struct damon_region *r, struct damon_target *t)
148{
149 list_del(&r->list);
150 t->nr_regions--;
151}
152
153static void damon_free_region(struct damon_region *r)
154{
155 kmem_cache_free(damon_region_cache, r);
156}
157
158void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159{
160 damon_del_region(r, t);
161 damon_free_region(r);
162}
163
164/*
165 * Check whether a region is intersecting an address range
166 *
167 * Returns true if it is.
168 */
169static bool damon_intersect(struct damon_region *r,
170 struct damon_addr_range *re)
171{
172 return !(r->ar.end <= re->start || re->end <= r->ar.start);
173}
174
175/*
176 * Fill holes in regions with new regions.
177 */
178static int damon_fill_regions_holes(struct damon_region *first,
179 struct damon_region *last, struct damon_target *t)
180{
181 struct damon_region *r = first;
182
183 damon_for_each_region_from(r, t) {
184 struct damon_region *next, *newr;
185
186 if (r == last)
187 break;
188 next = damon_next_region(r);
189 if (r->ar.end != next->ar.start) {
190 newr = damon_new_region(r->ar.end, next->ar.start);
191 if (!newr)
192 return -ENOMEM;
193 damon_insert_region(newr, r, next, t);
194 }
195 }
196 return 0;
197}
198
199/*
200 * damon_set_regions() - Set regions of a target for given address ranges.
201 * @t: the given target.
202 * @ranges: array of new monitoring target ranges.
203 * @nr_ranges: length of @ranges.
204 *
205 * This function adds new regions to, or modify existing regions of a
206 * monitoring target to fit in specific ranges.
207 *
208 * Return: 0 if success, or negative error code otherwise.
209 */
210int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
211 unsigned int nr_ranges)
212{
213 struct damon_region *r, *next;
214 unsigned int i;
215 int err;
216
217 /* Remove regions which are not in the new ranges */
218 damon_for_each_region_safe(r, next, t) {
219 for (i = 0; i < nr_ranges; i++) {
220 if (damon_intersect(r, &ranges[i]))
221 break;
222 }
223 if (i == nr_ranges)
224 damon_destroy_region(r, t);
225 }
226
227 r = damon_first_region(t);
228 /* Add new regions or resize existing regions to fit in the ranges */
229 for (i = 0; i < nr_ranges; i++) {
230 struct damon_region *first = NULL, *last, *newr;
231 struct damon_addr_range *range;
232
233 range = &ranges[i];
234 /* Get the first/last regions intersecting with the range */
235 damon_for_each_region_from(r, t) {
236 if (damon_intersect(r, range)) {
237 if (!first)
238 first = r;
239 last = r;
240 }
241 if (r->ar.start >= range->end)
242 break;
243 }
244 if (!first) {
245 /* no region intersects with this range */
246 newr = damon_new_region(
247 ALIGN_DOWN(range->start,
248 DAMON_MIN_REGION),
249 ALIGN(range->end, DAMON_MIN_REGION));
250 if (!newr)
251 return -ENOMEM;
252 damon_insert_region(newr, damon_prev_region(r), r, t);
253 } else {
254 /* resize intersecting regions to fit in this range */
255 first->ar.start = ALIGN_DOWN(range->start,
256 DAMON_MIN_REGION);
257 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
258
259 /* fill possible holes in the range */
260 err = damon_fill_regions_holes(first, last, t);
261 if (err)
262 return err;
263 }
264 }
265 return 0;
266}
267
268struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 bool matching)
270{
271 struct damos_filter *filter;
272
273 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
274 if (!filter)
275 return NULL;
276 filter->type = type;
277 filter->matching = matching;
278 INIT_LIST_HEAD(&filter->list);
279 return filter;
280}
281
282void damos_add_filter(struct damos *s, struct damos_filter *f)
283{
284 list_add_tail(&f->list, &s->filters);
285}
286
287static void damos_del_filter(struct damos_filter *f)
288{
289 list_del(&f->list);
290}
291
292static void damos_free_filter(struct damos_filter *f)
293{
294 kfree(f);
295}
296
297void damos_destroy_filter(struct damos_filter *f)
298{
299 damos_del_filter(f);
300 damos_free_filter(f);
301}
302
303struct damos_quota_goal *damos_new_quota_goal(
304 enum damos_quota_goal_metric metric,
305 unsigned long target_value)
306{
307 struct damos_quota_goal *goal;
308
309 goal = kmalloc(sizeof(*goal), GFP_KERNEL);
310 if (!goal)
311 return NULL;
312 goal->metric = metric;
313 goal->target_value = target_value;
314 INIT_LIST_HEAD(&goal->list);
315 return goal;
316}
317
318void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
319{
320 list_add_tail(&g->list, &q->goals);
321}
322
323static void damos_del_quota_goal(struct damos_quota_goal *g)
324{
325 list_del(&g->list);
326}
327
328static void damos_free_quota_goal(struct damos_quota_goal *g)
329{
330 kfree(g);
331}
332
333void damos_destroy_quota_goal(struct damos_quota_goal *g)
334{
335 damos_del_quota_goal(g);
336 damos_free_quota_goal(g);
337}
338
339/* initialize fields of @quota that normally API users wouldn't set */
340static struct damos_quota *damos_quota_init(struct damos_quota *quota)
341{
342 quota->esz = 0;
343 quota->total_charged_sz = 0;
344 quota->total_charged_ns = 0;
345 quota->charged_sz = 0;
346 quota->charged_from = 0;
347 quota->charge_target_from = NULL;
348 quota->charge_addr_from = 0;
349 return quota;
350}
351
352struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
353 enum damos_action action,
354 unsigned long apply_interval_us,
355 struct damos_quota *quota,
356 struct damos_watermarks *wmarks)
357{
358 struct damos *scheme;
359
360 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
361 if (!scheme)
362 return NULL;
363 scheme->pattern = *pattern;
364 scheme->action = action;
365 scheme->apply_interval_us = apply_interval_us;
366 /*
367 * next_apply_sis will be set when kdamond starts. While kdamond is
368 * running, it will also updated when it is added to the DAMON context,
369 * or damon_attrs are updated.
370 */
371 scheme->next_apply_sis = 0;
372 INIT_LIST_HEAD(&scheme->filters);
373 scheme->stat = (struct damos_stat){};
374 INIT_LIST_HEAD(&scheme->list);
375
376 scheme->quota = *(damos_quota_init(quota));
377 /* quota.goals should be separately set by caller */
378 INIT_LIST_HEAD(&scheme->quota.goals);
379
380 scheme->wmarks = *wmarks;
381 scheme->wmarks.activated = true;
382
383 return scheme;
384}
385
386static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
387{
388 unsigned long sample_interval = ctx->attrs.sample_interval ?
389 ctx->attrs.sample_interval : 1;
390 unsigned long apply_interval = s->apply_interval_us ?
391 s->apply_interval_us : ctx->attrs.aggr_interval;
392
393 s->next_apply_sis = ctx->passed_sample_intervals +
394 apply_interval / sample_interval;
395}
396
397void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
398{
399 list_add_tail(&s->list, &ctx->schemes);
400 damos_set_next_apply_sis(s, ctx);
401}
402
403static void damon_del_scheme(struct damos *s)
404{
405 list_del(&s->list);
406}
407
408static void damon_free_scheme(struct damos *s)
409{
410 kfree(s);
411}
412
413void damon_destroy_scheme(struct damos *s)
414{
415 struct damos_quota_goal *g, *g_next;
416 struct damos_filter *f, *next;
417
418 damos_for_each_quota_goal_safe(g, g_next, &s->quota)
419 damos_destroy_quota_goal(g);
420
421 damos_for_each_filter_safe(f, next, s)
422 damos_destroy_filter(f);
423 damon_del_scheme(s);
424 damon_free_scheme(s);
425}
426
427/*
428 * Construct a damon_target struct
429 *
430 * Returns the pointer to the new struct if success, or NULL otherwise
431 */
432struct damon_target *damon_new_target(void)
433{
434 struct damon_target *t;
435
436 t = kmalloc(sizeof(*t), GFP_KERNEL);
437 if (!t)
438 return NULL;
439
440 t->pid = NULL;
441 t->nr_regions = 0;
442 INIT_LIST_HEAD(&t->regions_list);
443 INIT_LIST_HEAD(&t->list);
444
445 return t;
446}
447
448void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
449{
450 list_add_tail(&t->list, &ctx->adaptive_targets);
451}
452
453bool damon_targets_empty(struct damon_ctx *ctx)
454{
455 return list_empty(&ctx->adaptive_targets);
456}
457
458static void damon_del_target(struct damon_target *t)
459{
460 list_del(&t->list);
461}
462
463void damon_free_target(struct damon_target *t)
464{
465 struct damon_region *r, *next;
466
467 damon_for_each_region_safe(r, next, t)
468 damon_free_region(r);
469 kfree(t);
470}
471
472void damon_destroy_target(struct damon_target *t)
473{
474 damon_del_target(t);
475 damon_free_target(t);
476}
477
478unsigned int damon_nr_regions(struct damon_target *t)
479{
480 return t->nr_regions;
481}
482
483struct damon_ctx *damon_new_ctx(void)
484{
485 struct damon_ctx *ctx;
486
487 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
488 if (!ctx)
489 return NULL;
490
491 init_completion(&ctx->kdamond_started);
492
493 ctx->attrs.sample_interval = 5 * 1000;
494 ctx->attrs.aggr_interval = 100 * 1000;
495 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
496
497 ctx->passed_sample_intervals = 0;
498 /* These will be set from kdamond_init_intervals_sis() */
499 ctx->next_aggregation_sis = 0;
500 ctx->next_ops_update_sis = 0;
501
502 mutex_init(&ctx->kdamond_lock);
503
504 ctx->attrs.min_nr_regions = 10;
505 ctx->attrs.max_nr_regions = 1000;
506
507 INIT_LIST_HEAD(&ctx->adaptive_targets);
508 INIT_LIST_HEAD(&ctx->schemes);
509
510 return ctx;
511}
512
513static void damon_destroy_targets(struct damon_ctx *ctx)
514{
515 struct damon_target *t, *next_t;
516
517 if (ctx->ops.cleanup) {
518 ctx->ops.cleanup(ctx);
519 return;
520 }
521
522 damon_for_each_target_safe(t, next_t, ctx)
523 damon_destroy_target(t);
524}
525
526void damon_destroy_ctx(struct damon_ctx *ctx)
527{
528 struct damos *s, *next_s;
529
530 damon_destroy_targets(ctx);
531
532 damon_for_each_scheme_safe(s, next_s, ctx)
533 damon_destroy_scheme(s);
534
535 kfree(ctx);
536}
537
538static unsigned int damon_age_for_new_attrs(unsigned int age,
539 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
540{
541 return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
542}
543
544/* convert access ratio in bp (per 10,000) to nr_accesses */
545static unsigned int damon_accesses_bp_to_nr_accesses(
546 unsigned int accesses_bp, struct damon_attrs *attrs)
547{
548 return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
549}
550
551/* convert nr_accesses to access ratio in bp (per 10,000) */
552static unsigned int damon_nr_accesses_to_accesses_bp(
553 unsigned int nr_accesses, struct damon_attrs *attrs)
554{
555 return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
556}
557
558static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
559 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
560{
561 return damon_accesses_bp_to_nr_accesses(
562 damon_nr_accesses_to_accesses_bp(
563 nr_accesses, old_attrs),
564 new_attrs);
565}
566
567static void damon_update_monitoring_result(struct damon_region *r,
568 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
569{
570 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
571 old_attrs, new_attrs);
572 r->nr_accesses_bp = r->nr_accesses * 10000;
573 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
574}
575
576/*
577 * region->nr_accesses is the number of sampling intervals in the last
578 * aggregation interval that access to the region has found, and region->age is
579 * the number of aggregation intervals that its access pattern has maintained.
580 * For the reason, the real meaning of the two fields depend on current
581 * sampling interval and aggregation interval. This function updates
582 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
583 */
584static void damon_update_monitoring_results(struct damon_ctx *ctx,
585 struct damon_attrs *new_attrs)
586{
587 struct damon_attrs *old_attrs = &ctx->attrs;
588 struct damon_target *t;
589 struct damon_region *r;
590
591 /* if any interval is zero, simply forgive conversion */
592 if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
593 !new_attrs->sample_interval ||
594 !new_attrs->aggr_interval)
595 return;
596
597 damon_for_each_target(t, ctx)
598 damon_for_each_region(r, t)
599 damon_update_monitoring_result(
600 r, old_attrs, new_attrs);
601}
602
603/**
604 * damon_set_attrs() - Set attributes for the monitoring.
605 * @ctx: monitoring context
606 * @attrs: monitoring attributes
607 *
608 * This function should be called while the kdamond is not running, or an
609 * access check results aggregation is not ongoing (e.g., from
610 * &struct damon_callback->after_aggregation or
611 * &struct damon_callback->after_wmarks_check callbacks).
612 *
613 * Every time interval is in micro-seconds.
614 *
615 * Return: 0 on success, negative error code otherwise.
616 */
617int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
618{
619 unsigned long sample_interval = attrs->sample_interval ?
620 attrs->sample_interval : 1;
621 struct damos *s;
622
623 if (attrs->min_nr_regions < 3)
624 return -EINVAL;
625 if (attrs->min_nr_regions > attrs->max_nr_regions)
626 return -EINVAL;
627 if (attrs->sample_interval > attrs->aggr_interval)
628 return -EINVAL;
629
630 ctx->next_aggregation_sis = ctx->passed_sample_intervals +
631 attrs->aggr_interval / sample_interval;
632 ctx->next_ops_update_sis = ctx->passed_sample_intervals +
633 attrs->ops_update_interval / sample_interval;
634
635 damon_update_monitoring_results(ctx, attrs);
636 ctx->attrs = *attrs;
637
638 damon_for_each_scheme(s, ctx)
639 damos_set_next_apply_sis(s, ctx);
640
641 return 0;
642}
643
644/**
645 * damon_set_schemes() - Set data access monitoring based operation schemes.
646 * @ctx: monitoring context
647 * @schemes: array of the schemes
648 * @nr_schemes: number of entries in @schemes
649 *
650 * This function should not be called while the kdamond of the context is
651 * running.
652 */
653void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
654 ssize_t nr_schemes)
655{
656 struct damos *s, *next;
657 ssize_t i;
658
659 damon_for_each_scheme_safe(s, next, ctx)
660 damon_destroy_scheme(s);
661 for (i = 0; i < nr_schemes; i++)
662 damon_add_scheme(ctx, schemes[i]);
663}
664
665/**
666 * damon_nr_running_ctxs() - Return number of currently running contexts.
667 */
668int damon_nr_running_ctxs(void)
669{
670 int nr_ctxs;
671
672 mutex_lock(&damon_lock);
673 nr_ctxs = nr_running_ctxs;
674 mutex_unlock(&damon_lock);
675
676 return nr_ctxs;
677}
678
679/* Returns the size upper limit for each monitoring region */
680static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
681{
682 struct damon_target *t;
683 struct damon_region *r;
684 unsigned long sz = 0;
685
686 damon_for_each_target(t, ctx) {
687 damon_for_each_region(r, t)
688 sz += damon_sz_region(r);
689 }
690
691 if (ctx->attrs.min_nr_regions)
692 sz /= ctx->attrs.min_nr_regions;
693 if (sz < DAMON_MIN_REGION)
694 sz = DAMON_MIN_REGION;
695
696 return sz;
697}
698
699static int kdamond_fn(void *data);
700
701/*
702 * __damon_start() - Starts monitoring with given context.
703 * @ctx: monitoring context
704 *
705 * This function should be called while damon_lock is hold.
706 *
707 * Return: 0 on success, negative error code otherwise.
708 */
709static int __damon_start(struct damon_ctx *ctx)
710{
711 int err = -EBUSY;
712
713 mutex_lock(&ctx->kdamond_lock);
714 if (!ctx->kdamond) {
715 err = 0;
716 reinit_completion(&ctx->kdamond_started);
717 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
718 nr_running_ctxs);
719 if (IS_ERR(ctx->kdamond)) {
720 err = PTR_ERR(ctx->kdamond);
721 ctx->kdamond = NULL;
722 } else {
723 wait_for_completion(&ctx->kdamond_started);
724 }
725 }
726 mutex_unlock(&ctx->kdamond_lock);
727
728 return err;
729}
730
731/**
732 * damon_start() - Starts the monitorings for a given group of contexts.
733 * @ctxs: an array of the pointers for contexts to start monitoring
734 * @nr_ctxs: size of @ctxs
735 * @exclusive: exclusiveness of this contexts group
736 *
737 * This function starts a group of monitoring threads for a group of monitoring
738 * contexts. One thread per each context is created and run in parallel. The
739 * caller should handle synchronization between the threads by itself. If
740 * @exclusive is true and a group of threads that created by other
741 * 'damon_start()' call is currently running, this function does nothing but
742 * returns -EBUSY.
743 *
744 * Return: 0 on success, negative error code otherwise.
745 */
746int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
747{
748 int i;
749 int err = 0;
750
751 mutex_lock(&damon_lock);
752 if ((exclusive && nr_running_ctxs) ||
753 (!exclusive && running_exclusive_ctxs)) {
754 mutex_unlock(&damon_lock);
755 return -EBUSY;
756 }
757
758 for (i = 0; i < nr_ctxs; i++) {
759 err = __damon_start(ctxs[i]);
760 if (err)
761 break;
762 nr_running_ctxs++;
763 }
764 if (exclusive && nr_running_ctxs)
765 running_exclusive_ctxs = true;
766 mutex_unlock(&damon_lock);
767
768 return err;
769}
770
771/*
772 * __damon_stop() - Stops monitoring of a given context.
773 * @ctx: monitoring context
774 *
775 * Return: 0 on success, negative error code otherwise.
776 */
777static int __damon_stop(struct damon_ctx *ctx)
778{
779 struct task_struct *tsk;
780
781 mutex_lock(&ctx->kdamond_lock);
782 tsk = ctx->kdamond;
783 if (tsk) {
784 get_task_struct(tsk);
785 mutex_unlock(&ctx->kdamond_lock);
786 kthread_stop_put(tsk);
787 return 0;
788 }
789 mutex_unlock(&ctx->kdamond_lock);
790
791 return -EPERM;
792}
793
794/**
795 * damon_stop() - Stops the monitorings for a given group of contexts.
796 * @ctxs: an array of the pointers for contexts to stop monitoring
797 * @nr_ctxs: size of @ctxs
798 *
799 * Return: 0 on success, negative error code otherwise.
800 */
801int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
802{
803 int i, err = 0;
804
805 for (i = 0; i < nr_ctxs; i++) {
806 /* nr_running_ctxs is decremented in kdamond_fn */
807 err = __damon_stop(ctxs[i]);
808 if (err)
809 break;
810 }
811 return err;
812}
813
814/*
815 * Reset the aggregated monitoring results ('nr_accesses' of each region).
816 */
817static void kdamond_reset_aggregated(struct damon_ctx *c)
818{
819 struct damon_target *t;
820 unsigned int ti = 0; /* target's index */
821
822 damon_for_each_target(t, c) {
823 struct damon_region *r;
824
825 damon_for_each_region(r, t) {
826 trace_damon_aggregated(ti, r, damon_nr_regions(t));
827 r->last_nr_accesses = r->nr_accesses;
828 r->nr_accesses = 0;
829 }
830 ti++;
831 }
832}
833
834static void damon_split_region_at(struct damon_target *t,
835 struct damon_region *r, unsigned long sz_r);
836
837static bool __damos_valid_target(struct damon_region *r, struct damos *s)
838{
839 unsigned long sz;
840 unsigned int nr_accesses = r->nr_accesses_bp / 10000;
841
842 sz = damon_sz_region(r);
843 return s->pattern.min_sz_region <= sz &&
844 sz <= s->pattern.max_sz_region &&
845 s->pattern.min_nr_accesses <= nr_accesses &&
846 nr_accesses <= s->pattern.max_nr_accesses &&
847 s->pattern.min_age_region <= r->age &&
848 r->age <= s->pattern.max_age_region;
849}
850
851static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
852 struct damon_region *r, struct damos *s)
853{
854 bool ret = __damos_valid_target(r, s);
855
856 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
857 return ret;
858
859 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
860}
861
862/*
863 * damos_skip_charged_region() - Check if the given region or starting part of
864 * it is already charged for the DAMOS quota.
865 * @t: The target of the region.
866 * @rp: The pointer to the region.
867 * @s: The scheme to be applied.
868 *
869 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
870 * action would applied to only a part of the target access pattern fulfilling
871 * regions. To avoid applying the scheme action to only already applied
872 * regions, DAMON skips applying the scheme action to the regions that charged
873 * in the previous charge window.
874 *
875 * This function checks if a given region should be skipped or not for the
876 * reason. If only the starting part of the region has previously charged,
877 * this function splits the region into two so that the second one covers the
878 * area that not charged in the previous charge widnow and saves the second
879 * region in *rp and returns false, so that the caller can apply DAMON action
880 * to the second one.
881 *
882 * Return: true if the region should be entirely skipped, false otherwise.
883 */
884static bool damos_skip_charged_region(struct damon_target *t,
885 struct damon_region **rp, struct damos *s)
886{
887 struct damon_region *r = *rp;
888 struct damos_quota *quota = &s->quota;
889 unsigned long sz_to_skip;
890
891 /* Skip previously charged regions */
892 if (quota->charge_target_from) {
893 if (t != quota->charge_target_from)
894 return true;
895 if (r == damon_last_region(t)) {
896 quota->charge_target_from = NULL;
897 quota->charge_addr_from = 0;
898 return true;
899 }
900 if (quota->charge_addr_from &&
901 r->ar.end <= quota->charge_addr_from)
902 return true;
903
904 if (quota->charge_addr_from && r->ar.start <
905 quota->charge_addr_from) {
906 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
907 r->ar.start, DAMON_MIN_REGION);
908 if (!sz_to_skip) {
909 if (damon_sz_region(r) <= DAMON_MIN_REGION)
910 return true;
911 sz_to_skip = DAMON_MIN_REGION;
912 }
913 damon_split_region_at(t, r, sz_to_skip);
914 r = damon_next_region(r);
915 *rp = r;
916 }
917 quota->charge_target_from = NULL;
918 quota->charge_addr_from = 0;
919 }
920 return false;
921}
922
923static void damos_update_stat(struct damos *s,
924 unsigned long sz_tried, unsigned long sz_applied)
925{
926 s->stat.nr_tried++;
927 s->stat.sz_tried += sz_tried;
928 if (sz_applied)
929 s->stat.nr_applied++;
930 s->stat.sz_applied += sz_applied;
931}
932
933static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
934 struct damon_region *r, struct damos_filter *filter)
935{
936 bool matched = false;
937 struct damon_target *ti;
938 int target_idx = 0;
939 unsigned long start, end;
940
941 switch (filter->type) {
942 case DAMOS_FILTER_TYPE_TARGET:
943 damon_for_each_target(ti, ctx) {
944 if (ti == t)
945 break;
946 target_idx++;
947 }
948 matched = target_idx == filter->target_idx;
949 break;
950 case DAMOS_FILTER_TYPE_ADDR:
951 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
952 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
953
954 /* inside the range */
955 if (start <= r->ar.start && r->ar.end <= end) {
956 matched = true;
957 break;
958 }
959 /* outside of the range */
960 if (r->ar.end <= start || end <= r->ar.start) {
961 matched = false;
962 break;
963 }
964 /* start before the range and overlap */
965 if (r->ar.start < start) {
966 damon_split_region_at(t, r, start - r->ar.start);
967 matched = false;
968 break;
969 }
970 /* start inside the range */
971 damon_split_region_at(t, r, end - r->ar.start);
972 matched = true;
973 break;
974 default:
975 return false;
976 }
977
978 return matched == filter->matching;
979}
980
981static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
982 struct damon_region *r, struct damos *s)
983{
984 struct damos_filter *filter;
985
986 damos_for_each_filter(filter, s) {
987 if (__damos_filter_out(ctx, t, r, filter))
988 return true;
989 }
990 return false;
991}
992
993static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
994 struct damon_region *r, struct damos *s)
995{
996 struct damos_quota *quota = &s->quota;
997 unsigned long sz = damon_sz_region(r);
998 struct timespec64 begin, end;
999 unsigned long sz_applied = 0;
1000 int err = 0;
1001 /*
1002 * We plan to support multiple context per kdamond, as DAMON sysfs
1003 * implies with 'nr_contexts' file. Nevertheless, only single context
1004 * per kdamond is supported for now. So, we can simply use '0' context
1005 * index here.
1006 */
1007 unsigned int cidx = 0;
1008 struct damos *siter; /* schemes iterator */
1009 unsigned int sidx = 0;
1010 struct damon_target *titer; /* targets iterator */
1011 unsigned int tidx = 0;
1012 bool do_trace = false;
1013
1014 /* get indices for trace_damos_before_apply() */
1015 if (trace_damos_before_apply_enabled()) {
1016 damon_for_each_scheme(siter, c) {
1017 if (siter == s)
1018 break;
1019 sidx++;
1020 }
1021 damon_for_each_target(titer, c) {
1022 if (titer == t)
1023 break;
1024 tidx++;
1025 }
1026 do_trace = true;
1027 }
1028
1029 if (c->ops.apply_scheme) {
1030 if (quota->esz && quota->charged_sz + sz > quota->esz) {
1031 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1032 DAMON_MIN_REGION);
1033 if (!sz)
1034 goto update_stat;
1035 damon_split_region_at(t, r, sz);
1036 }
1037 if (damos_filter_out(c, t, r, s))
1038 return;
1039 ktime_get_coarse_ts64(&begin);
1040 if (c->callback.before_damos_apply)
1041 err = c->callback.before_damos_apply(c, t, r, s);
1042 if (!err) {
1043 trace_damos_before_apply(cidx, sidx, tidx, r,
1044 damon_nr_regions(t), do_trace);
1045 sz_applied = c->ops.apply_scheme(c, t, r, s);
1046 }
1047 ktime_get_coarse_ts64(&end);
1048 quota->total_charged_ns += timespec64_to_ns(&end) -
1049 timespec64_to_ns(&begin);
1050 quota->charged_sz += sz;
1051 if (quota->esz && quota->charged_sz >= quota->esz) {
1052 quota->charge_target_from = t;
1053 quota->charge_addr_from = r->ar.end + 1;
1054 }
1055 }
1056 if (s->action != DAMOS_STAT)
1057 r->age = 0;
1058
1059update_stat:
1060 damos_update_stat(s, sz, sz_applied);
1061}
1062
1063static void damon_do_apply_schemes(struct damon_ctx *c,
1064 struct damon_target *t,
1065 struct damon_region *r)
1066{
1067 struct damos *s;
1068
1069 damon_for_each_scheme(s, c) {
1070 struct damos_quota *quota = &s->quota;
1071
1072 if (c->passed_sample_intervals != s->next_apply_sis)
1073 continue;
1074
1075 if (!s->wmarks.activated)
1076 continue;
1077
1078 /* Check the quota */
1079 if (quota->esz && quota->charged_sz >= quota->esz)
1080 continue;
1081
1082 if (damos_skip_charged_region(t, &r, s))
1083 continue;
1084
1085 if (!damos_valid_target(c, t, r, s))
1086 continue;
1087
1088 damos_apply_scheme(c, t, r, s);
1089 }
1090}
1091
1092/*
1093 * damon_feed_loop_next_input() - get next input to achieve a target score.
1094 * @last_input The last input.
1095 * @score Current score that made with @last_input.
1096 *
1097 * Calculate next input to achieve the target score, based on the last input
1098 * and current score. Assuming the input and the score are positively
1099 * proportional, calculate how much compensation should be added to or
1100 * subtracted from the last input as a proportion of the last input. Avoid
1101 * next input always being zero by setting it non-zero always. In short form
1102 * (assuming support of float and signed calculations), the algorithm is as
1103 * below.
1104 *
1105 * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1106 *
1107 * For simple implementation, we assume the target score is always 10,000. The
1108 * caller should adjust @score for this.
1109 *
1110 * Returns next input that assumed to achieve the target score.
1111 */
1112static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1113 unsigned long score)
1114{
1115 const unsigned long goal = 10000;
1116 unsigned long score_goal_diff = max(goal, score) - min(goal, score);
1117 unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal;
1118 unsigned long compensation = last_input * score_goal_diff_bp / 10000;
1119 /* Set minimum input as 10000 to avoid compensation be zero */
1120 const unsigned long min_input = 10000;
1121
1122 if (goal > score)
1123 return last_input + compensation;
1124 if (last_input > compensation + min_input)
1125 return last_input - compensation;
1126 return min_input;
1127}
1128
1129#ifdef CONFIG_PSI
1130
1131static u64 damos_get_some_mem_psi_total(void)
1132{
1133 if (static_branch_likely(&psi_disabled))
1134 return 0;
1135 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1136 NSEC_PER_USEC);
1137}
1138
1139#else /* CONFIG_PSI */
1140
1141static inline u64 damos_get_some_mem_psi_total(void)
1142{
1143 return 0;
1144};
1145
1146#endif /* CONFIG_PSI */
1147
1148static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
1149{
1150 u64 now_psi_total;
1151
1152 switch (goal->metric) {
1153 case DAMOS_QUOTA_USER_INPUT:
1154 /* User should already set goal->current_value */
1155 break;
1156 case DAMOS_QUOTA_SOME_MEM_PSI_US:
1157 now_psi_total = damos_get_some_mem_psi_total();
1158 goal->current_value = now_psi_total - goal->last_psi_total;
1159 goal->last_psi_total = now_psi_total;
1160 break;
1161 default:
1162 break;
1163 }
1164}
1165
1166/* Return the highest score since it makes schemes least aggressive */
1167static unsigned long damos_quota_score(struct damos_quota *quota)
1168{
1169 struct damos_quota_goal *goal;
1170 unsigned long highest_score = 0;
1171
1172 damos_for_each_quota_goal(goal, quota) {
1173 damos_set_quota_goal_current_value(goal);
1174 highest_score = max(highest_score,
1175 goal->current_value * 10000 /
1176 goal->target_value);
1177 }
1178
1179 return highest_score;
1180}
1181
1182/*
1183 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1184 */
1185static void damos_set_effective_quota(struct damos_quota *quota)
1186{
1187 unsigned long throughput;
1188 unsigned long esz;
1189
1190 if (!quota->ms && list_empty("a->goals)) {
1191 quota->esz = quota->sz;
1192 return;
1193 }
1194
1195 if (!list_empty("a->goals)) {
1196 unsigned long score = damos_quota_score(quota);
1197
1198 quota->esz_bp = damon_feed_loop_next_input(
1199 max(quota->esz_bp, 10000UL),
1200 score);
1201 esz = quota->esz_bp / 10000;
1202 }
1203
1204 if (quota->ms) {
1205 if (quota->total_charged_ns)
1206 throughput = quota->total_charged_sz * 1000000 /
1207 quota->total_charged_ns;
1208 else
1209 throughput = PAGE_SIZE * 1024;
1210 if (!list_empty("a->goals))
1211 esz = min(throughput * quota->ms, esz);
1212 else
1213 esz = throughput * quota->ms;
1214 }
1215
1216 if (quota->sz && quota->sz < esz)
1217 esz = quota->sz;
1218
1219 quota->esz = esz;
1220}
1221
1222static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1223{
1224 struct damos_quota *quota = &s->quota;
1225 struct damon_target *t;
1226 struct damon_region *r;
1227 unsigned long cumulated_sz;
1228 unsigned int score, max_score = 0;
1229
1230 if (!quota->ms && !quota->sz && list_empty("a->goals))
1231 return;
1232
1233 /* New charge window starts */
1234 if (time_after_eq(jiffies, quota->charged_from +
1235 msecs_to_jiffies(quota->reset_interval))) {
1236 if (quota->esz && quota->charged_sz >= quota->esz)
1237 s->stat.qt_exceeds++;
1238 quota->total_charged_sz += quota->charged_sz;
1239 quota->charged_from = jiffies;
1240 quota->charged_sz = 0;
1241 damos_set_effective_quota(quota);
1242 }
1243
1244 if (!c->ops.get_scheme_score)
1245 return;
1246
1247 /* Fill up the score histogram */
1248 memset(quota->histogram, 0, sizeof(quota->histogram));
1249 damon_for_each_target(t, c) {
1250 damon_for_each_region(r, t) {
1251 if (!__damos_valid_target(r, s))
1252 continue;
1253 score = c->ops.get_scheme_score(c, t, r, s);
1254 quota->histogram[score] += damon_sz_region(r);
1255 if (score > max_score)
1256 max_score = score;
1257 }
1258 }
1259
1260 /* Set the min score limit */
1261 for (cumulated_sz = 0, score = max_score; ; score--) {
1262 cumulated_sz += quota->histogram[score];
1263 if (cumulated_sz >= quota->esz || !score)
1264 break;
1265 }
1266 quota->min_score = score;
1267}
1268
1269static void kdamond_apply_schemes(struct damon_ctx *c)
1270{
1271 struct damon_target *t;
1272 struct damon_region *r, *next_r;
1273 struct damos *s;
1274 unsigned long sample_interval = c->attrs.sample_interval ?
1275 c->attrs.sample_interval : 1;
1276 bool has_schemes_to_apply = false;
1277
1278 damon_for_each_scheme(s, c) {
1279 if (c->passed_sample_intervals != s->next_apply_sis)
1280 continue;
1281
1282 if (!s->wmarks.activated)
1283 continue;
1284
1285 has_schemes_to_apply = true;
1286
1287 damos_adjust_quota(c, s);
1288 }
1289
1290 if (!has_schemes_to_apply)
1291 return;
1292
1293 damon_for_each_target(t, c) {
1294 damon_for_each_region_safe(r, next_r, t)
1295 damon_do_apply_schemes(c, t, r);
1296 }
1297
1298 damon_for_each_scheme(s, c) {
1299 if (c->passed_sample_intervals != s->next_apply_sis)
1300 continue;
1301 s->next_apply_sis +=
1302 (s->apply_interval_us ? s->apply_interval_us :
1303 c->attrs.aggr_interval) / sample_interval;
1304 }
1305}
1306
1307/*
1308 * Merge two adjacent regions into one region
1309 */
1310static void damon_merge_two_regions(struct damon_target *t,
1311 struct damon_region *l, struct damon_region *r)
1312{
1313 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1314
1315 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1316 (sz_l + sz_r);
1317 l->nr_accesses_bp = l->nr_accesses * 10000;
1318 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1319 l->ar.end = r->ar.end;
1320 damon_destroy_region(r, t);
1321}
1322
1323/*
1324 * Merge adjacent regions having similar access frequencies
1325 *
1326 * t target affected by this merge operation
1327 * thres '->nr_accesses' diff threshold for the merge
1328 * sz_limit size upper limit of each region
1329 */
1330static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1331 unsigned long sz_limit)
1332{
1333 struct damon_region *r, *prev = NULL, *next;
1334
1335 damon_for_each_region_safe(r, next, t) {
1336 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1337 r->age = 0;
1338 else
1339 r->age++;
1340
1341 if (prev && prev->ar.end == r->ar.start &&
1342 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1343 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1344 damon_merge_two_regions(t, prev, r);
1345 else
1346 prev = r;
1347 }
1348}
1349
1350/*
1351 * Merge adjacent regions having similar access frequencies
1352 *
1353 * threshold '->nr_accesses' diff threshold for the merge
1354 * sz_limit size upper limit of each region
1355 *
1356 * This function merges monitoring target regions which are adjacent and their
1357 * access frequencies are similar. This is for minimizing the monitoring
1358 * overhead under the dynamically changeable access pattern. If a merge was
1359 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1360 */
1361static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1362 unsigned long sz_limit)
1363{
1364 struct damon_target *t;
1365
1366 damon_for_each_target(t, c)
1367 damon_merge_regions_of(t, threshold, sz_limit);
1368}
1369
1370/*
1371 * Split a region in two
1372 *
1373 * r the region to be split
1374 * sz_r size of the first sub-region that will be made
1375 */
1376static void damon_split_region_at(struct damon_target *t,
1377 struct damon_region *r, unsigned long sz_r)
1378{
1379 struct damon_region *new;
1380
1381 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1382 if (!new)
1383 return;
1384
1385 r->ar.end = new->ar.start;
1386
1387 new->age = r->age;
1388 new->last_nr_accesses = r->last_nr_accesses;
1389 new->nr_accesses_bp = r->nr_accesses_bp;
1390 new->nr_accesses = r->nr_accesses;
1391
1392 damon_insert_region(new, r, damon_next_region(r), t);
1393}
1394
1395/* Split every region in the given target into 'nr_subs' regions */
1396static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1397{
1398 struct damon_region *r, *next;
1399 unsigned long sz_region, sz_sub = 0;
1400 int i;
1401
1402 damon_for_each_region_safe(r, next, t) {
1403 sz_region = damon_sz_region(r);
1404
1405 for (i = 0; i < nr_subs - 1 &&
1406 sz_region > 2 * DAMON_MIN_REGION; i++) {
1407 /*
1408 * Randomly select size of left sub-region to be at
1409 * least 10 percent and at most 90% of original region
1410 */
1411 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1412 sz_region / 10, DAMON_MIN_REGION);
1413 /* Do not allow blank region */
1414 if (sz_sub == 0 || sz_sub >= sz_region)
1415 continue;
1416
1417 damon_split_region_at(t, r, sz_sub);
1418 sz_region = sz_sub;
1419 }
1420 }
1421}
1422
1423/*
1424 * Split every target region into randomly-sized small regions
1425 *
1426 * This function splits every target region into random-sized small regions if
1427 * current total number of the regions is equal or smaller than half of the
1428 * user-specified maximum number of regions. This is for maximizing the
1429 * monitoring accuracy under the dynamically changeable access patterns. If a
1430 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1431 * it.
1432 */
1433static void kdamond_split_regions(struct damon_ctx *ctx)
1434{
1435 struct damon_target *t;
1436 unsigned int nr_regions = 0;
1437 static unsigned int last_nr_regions;
1438 int nr_subregions = 2;
1439
1440 damon_for_each_target(t, ctx)
1441 nr_regions += damon_nr_regions(t);
1442
1443 if (nr_regions > ctx->attrs.max_nr_regions / 2)
1444 return;
1445
1446 /* Maybe the middle of the region has different access frequency */
1447 if (last_nr_regions == nr_regions &&
1448 nr_regions < ctx->attrs.max_nr_regions / 3)
1449 nr_subregions = 3;
1450
1451 damon_for_each_target(t, ctx)
1452 damon_split_regions_of(t, nr_subregions);
1453
1454 last_nr_regions = nr_regions;
1455}
1456
1457/*
1458 * Check whether current monitoring should be stopped
1459 *
1460 * The monitoring is stopped when either the user requested to stop, or all
1461 * monitoring targets are invalid.
1462 *
1463 * Returns true if need to stop current monitoring.
1464 */
1465static bool kdamond_need_stop(struct damon_ctx *ctx)
1466{
1467 struct damon_target *t;
1468
1469 if (kthread_should_stop())
1470 return true;
1471
1472 if (!ctx->ops.target_valid)
1473 return false;
1474
1475 damon_for_each_target(t, ctx) {
1476 if (ctx->ops.target_valid(t))
1477 return false;
1478 }
1479
1480 return true;
1481}
1482
1483static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
1484{
1485 switch (metric) {
1486 case DAMOS_WMARK_FREE_MEM_RATE:
1487 return global_zone_page_state(NR_FREE_PAGES) * 1000 /
1488 totalram_pages();
1489 default:
1490 break;
1491 }
1492 return -EINVAL;
1493}
1494
1495/*
1496 * Returns zero if the scheme is active. Else, returns time to wait for next
1497 * watermark check in micro-seconds.
1498 */
1499static unsigned long damos_wmark_wait_us(struct damos *scheme)
1500{
1501 unsigned long metric;
1502
1503 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
1504 return 0;
1505
1506 metric = damos_wmark_metric_value(scheme->wmarks.metric);
1507 /* higher than high watermark or lower than low watermark */
1508 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1509 if (scheme->wmarks.activated)
1510 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1511 scheme->action,
1512 metric > scheme->wmarks.high ?
1513 "high" : "low");
1514 scheme->wmarks.activated = false;
1515 return scheme->wmarks.interval;
1516 }
1517
1518 /* inactive and higher than middle watermark */
1519 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1520 !scheme->wmarks.activated)
1521 return scheme->wmarks.interval;
1522
1523 if (!scheme->wmarks.activated)
1524 pr_debug("activate a scheme (%d)\n", scheme->action);
1525 scheme->wmarks.activated = true;
1526 return 0;
1527}
1528
1529static void kdamond_usleep(unsigned long usecs)
1530{
1531 /* See Documentation/timers/timers-howto.rst for the thresholds */
1532 if (usecs > 20 * USEC_PER_MSEC)
1533 schedule_timeout_idle(usecs_to_jiffies(usecs));
1534 else
1535 usleep_idle_range(usecs, usecs + 1);
1536}
1537
1538/* Returns negative error code if it's not activated but should return */
1539static int kdamond_wait_activation(struct damon_ctx *ctx)
1540{
1541 struct damos *s;
1542 unsigned long wait_time;
1543 unsigned long min_wait_time = 0;
1544 bool init_wait_time = false;
1545
1546 while (!kdamond_need_stop(ctx)) {
1547 damon_for_each_scheme(s, ctx) {
1548 wait_time = damos_wmark_wait_us(s);
1549 if (!init_wait_time || wait_time < min_wait_time) {
1550 init_wait_time = true;
1551 min_wait_time = wait_time;
1552 }
1553 }
1554 if (!min_wait_time)
1555 return 0;
1556
1557 kdamond_usleep(min_wait_time);
1558
1559 if (ctx->callback.after_wmarks_check &&
1560 ctx->callback.after_wmarks_check(ctx))
1561 break;
1562 }
1563 return -EBUSY;
1564}
1565
1566static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
1567{
1568 unsigned long sample_interval = ctx->attrs.sample_interval ?
1569 ctx->attrs.sample_interval : 1;
1570 unsigned long apply_interval;
1571 struct damos *scheme;
1572
1573 ctx->passed_sample_intervals = 0;
1574 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
1575 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
1576 sample_interval;
1577
1578 damon_for_each_scheme(scheme, ctx) {
1579 apply_interval = scheme->apply_interval_us ?
1580 scheme->apply_interval_us : ctx->attrs.aggr_interval;
1581 scheme->next_apply_sis = apply_interval / sample_interval;
1582 }
1583}
1584
1585/*
1586 * The monitoring daemon that runs as a kernel thread
1587 */
1588static int kdamond_fn(void *data)
1589{
1590 struct damon_ctx *ctx = data;
1591 struct damon_target *t;
1592 struct damon_region *r, *next;
1593 unsigned int max_nr_accesses = 0;
1594 unsigned long sz_limit = 0;
1595
1596 pr_debug("kdamond (%d) starts\n", current->pid);
1597
1598 complete(&ctx->kdamond_started);
1599 kdamond_init_intervals_sis(ctx);
1600
1601 if (ctx->ops.init)
1602 ctx->ops.init(ctx);
1603 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1604 goto done;
1605
1606 sz_limit = damon_region_sz_limit(ctx);
1607
1608 while (!kdamond_need_stop(ctx)) {
1609 /*
1610 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
1611 * be changed from after_wmarks_check() or after_aggregation()
1612 * callbacks. Read the values here, and use those for this
1613 * iteration. That is, damon_set_attrs() updated new values
1614 * are respected from next iteration.
1615 */
1616 unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
1617 unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
1618 unsigned long sample_interval = ctx->attrs.sample_interval;
1619
1620 if (kdamond_wait_activation(ctx))
1621 break;
1622
1623 if (ctx->ops.prepare_access_checks)
1624 ctx->ops.prepare_access_checks(ctx);
1625 if (ctx->callback.after_sampling &&
1626 ctx->callback.after_sampling(ctx))
1627 break;
1628
1629 kdamond_usleep(sample_interval);
1630 ctx->passed_sample_intervals++;
1631
1632 if (ctx->ops.check_accesses)
1633 max_nr_accesses = ctx->ops.check_accesses(ctx);
1634
1635 if (ctx->passed_sample_intervals == next_aggregation_sis) {
1636 kdamond_merge_regions(ctx,
1637 max_nr_accesses / 10,
1638 sz_limit);
1639 if (ctx->callback.after_aggregation &&
1640 ctx->callback.after_aggregation(ctx))
1641 break;
1642 }
1643
1644 /*
1645 * do kdamond_apply_schemes() after kdamond_merge_regions() if
1646 * possible, to reduce overhead
1647 */
1648 if (!list_empty(&ctx->schemes))
1649 kdamond_apply_schemes(ctx);
1650
1651 sample_interval = ctx->attrs.sample_interval ?
1652 ctx->attrs.sample_interval : 1;
1653 if (ctx->passed_sample_intervals == next_aggregation_sis) {
1654 ctx->next_aggregation_sis = next_aggregation_sis +
1655 ctx->attrs.aggr_interval / sample_interval;
1656
1657 kdamond_reset_aggregated(ctx);
1658 kdamond_split_regions(ctx);
1659 if (ctx->ops.reset_aggregated)
1660 ctx->ops.reset_aggregated(ctx);
1661 }
1662
1663 if (ctx->passed_sample_intervals == next_ops_update_sis) {
1664 ctx->next_ops_update_sis = next_ops_update_sis +
1665 ctx->attrs.ops_update_interval /
1666 sample_interval;
1667 if (ctx->ops.update)
1668 ctx->ops.update(ctx);
1669 sz_limit = damon_region_sz_limit(ctx);
1670 }
1671 }
1672done:
1673 damon_for_each_target(t, ctx) {
1674 damon_for_each_region_safe(r, next, t)
1675 damon_destroy_region(r, t);
1676 }
1677
1678 if (ctx->callback.before_terminate)
1679 ctx->callback.before_terminate(ctx);
1680 if (ctx->ops.cleanup)
1681 ctx->ops.cleanup(ctx);
1682
1683 pr_debug("kdamond (%d) finishes\n", current->pid);
1684 mutex_lock(&ctx->kdamond_lock);
1685 ctx->kdamond = NULL;
1686 mutex_unlock(&ctx->kdamond_lock);
1687
1688 mutex_lock(&damon_lock);
1689 nr_running_ctxs--;
1690 if (!nr_running_ctxs && running_exclusive_ctxs)
1691 running_exclusive_ctxs = false;
1692 mutex_unlock(&damon_lock);
1693
1694 return 0;
1695}
1696
1697/*
1698 * struct damon_system_ram_region - System RAM resource address region of
1699 * [@start, @end).
1700 * @start: Start address of the region (inclusive).
1701 * @end: End address of the region (exclusive).
1702 */
1703struct damon_system_ram_region {
1704 unsigned long start;
1705 unsigned long end;
1706};
1707
1708static int walk_system_ram(struct resource *res, void *arg)
1709{
1710 struct damon_system_ram_region *a = arg;
1711
1712 if (a->end - a->start < resource_size(res)) {
1713 a->start = res->start;
1714 a->end = res->end;
1715 }
1716 return 0;
1717}
1718
1719/*
1720 * Find biggest 'System RAM' resource and store its start and end address in
1721 * @start and @end, respectively. If no System RAM is found, returns false.
1722 */
1723static bool damon_find_biggest_system_ram(unsigned long *start,
1724 unsigned long *end)
1725
1726{
1727 struct damon_system_ram_region arg = {};
1728
1729 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
1730 if (arg.end <= arg.start)
1731 return false;
1732
1733 *start = arg.start;
1734 *end = arg.end;
1735 return true;
1736}
1737
1738/**
1739 * damon_set_region_biggest_system_ram_default() - Set the region of the given
1740 * monitoring target as requested, or biggest 'System RAM'.
1741 * @t: The monitoring target to set the region.
1742 * @start: The pointer to the start address of the region.
1743 * @end: The pointer to the end address of the region.
1744 *
1745 * This function sets the region of @t as requested by @start and @end. If the
1746 * values of @start and @end are zero, however, this function finds the biggest
1747 * 'System RAM' resource and sets the region to cover the resource. In the
1748 * latter case, this function saves the start and end addresses of the resource
1749 * in @start and @end, respectively.
1750 *
1751 * Return: 0 on success, negative error code otherwise.
1752 */
1753int damon_set_region_biggest_system_ram_default(struct damon_target *t,
1754 unsigned long *start, unsigned long *end)
1755{
1756 struct damon_addr_range addr_range;
1757
1758 if (*start > *end)
1759 return -EINVAL;
1760
1761 if (!*start && !*end &&
1762 !damon_find_biggest_system_ram(start, end))
1763 return -EINVAL;
1764
1765 addr_range.start = *start;
1766 addr_range.end = *end;
1767 return damon_set_regions(t, &addr_range, 1);
1768}
1769
1770/*
1771 * damon_moving_sum() - Calculate an inferred moving sum value.
1772 * @mvsum: Inferred sum of the last @len_window values.
1773 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
1774 * @len_window: The number of last values to take care of.
1775 * @new_value: New value that will be added to the pseudo moving sum.
1776 *
1777 * Moving sum (moving average * window size) is good for handling noise, but
1778 * the cost of keeping past values can be high for arbitrary window size. This
1779 * function implements a lightweight pseudo moving sum function that doesn't
1780 * keep the past window values.
1781 *
1782 * It simply assumes there was no noise in the past, and get the no-noise
1783 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
1784 * non-moving sum of the last window. For example, if @len_window is 10 and we
1785 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
1786 * values. Hence, this function simply drops @nomvsum / @len_window from
1787 * given @mvsum and add @new_value.
1788 *
1789 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
1790 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
1791 * calculating next moving sum with a new value, we should drop 0 from 50 and
1792 * add the new value. However, this function assumes it got value 5 for each
1793 * of the last ten times. Based on the assumption, when the next value is
1794 * measured, it drops the assumed past value, 5 from the current sum, and add
1795 * the new value to get the updated pseduo-moving average.
1796 *
1797 * This means the value could have errors, but the errors will be disappeared
1798 * for every @len_window aligned calls. For example, if @len_window is 10, the
1799 * pseudo moving sum with 11th value to 19th value would have an error. But
1800 * the sum with 20th value will not have the error.
1801 *
1802 * Return: Pseudo-moving average after getting the @new_value.
1803 */
1804static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
1805 unsigned int len_window, unsigned int new_value)
1806{
1807 return mvsum - nomvsum / len_window + new_value;
1808}
1809
1810/**
1811 * damon_update_region_access_rate() - Update the access rate of a region.
1812 * @r: The DAMON region to update for its access check result.
1813 * @accessed: Whether the region has accessed during last sampling interval.
1814 * @attrs: The damon_attrs of the DAMON context.
1815 *
1816 * Update the access rate of a region with the region's last sampling interval
1817 * access check result.
1818 *
1819 * Usually this will be called by &damon_operations->check_accesses callback.
1820 */
1821void damon_update_region_access_rate(struct damon_region *r, bool accessed,
1822 struct damon_attrs *attrs)
1823{
1824 unsigned int len_window = 1;
1825
1826 /*
1827 * sample_interval can be zero, but cannot be larger than
1828 * aggr_interval, owing to validation of damon_set_attrs().
1829 */
1830 if (attrs->sample_interval)
1831 len_window = damon_max_nr_accesses(attrs);
1832 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
1833 r->last_nr_accesses * 10000, len_window,
1834 accessed ? 10000 : 0);
1835
1836 if (accessed)
1837 r->nr_accesses++;
1838}
1839
1840static int __init damon_init(void)
1841{
1842 damon_region_cache = KMEM_CACHE(damon_region, 0);
1843 if (unlikely(!damon_region_cache)) {
1844 pr_err("creating damon_region_cache fails\n");
1845 return -ENOMEM;
1846 }
1847
1848 return 0;
1849}
1850
1851subsys_initcall(damon_init);
1852
1853#include "core-test.h"