Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Workingset detection
4 *
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6 */
7
8#include <linux/memcontrol.h>
9#include <linux/writeback.h>
10#include <linux/shmem_fs.h>
11#include <linux/pagemap.h>
12#include <linux/atomic.h>
13#include <linux/module.h>
14#include <linux/swap.h>
15#include <linux/dax.h>
16#include <linux/fs.h>
17#include <linux/mm.h>
18
19/*
20 * Double CLOCK lists
21 *
22 * Per node, two clock lists are maintained for file pages: the
23 * inactive and the active list. Freshly faulted pages start out at
24 * the head of the inactive list and page reclaim scans pages from the
25 * tail. Pages that are accessed multiple times on the inactive list
26 * are promoted to the active list, to protect them from reclaim,
27 * whereas active pages are demoted to the inactive list when the
28 * active list grows too big.
29 *
30 * fault ------------------------+
31 * |
32 * +--------------+ | +-------------+
33 * reclaim <- | inactive | <-+-- demotion | active | <--+
34 * +--------------+ +-------------+ |
35 * | |
36 * +-------------- promotion ------------------+
37 *
38 *
39 * Access frequency and refault distance
40 *
41 * A workload is thrashing when its pages are frequently used but they
42 * are evicted from the inactive list every time before another access
43 * would have promoted them to the active list.
44 *
45 * In cases where the average access distance between thrashing pages
46 * is bigger than the size of memory there is nothing that can be
47 * done - the thrashing set could never fit into memory under any
48 * circumstance.
49 *
50 * However, the average access distance could be bigger than the
51 * inactive list, yet smaller than the size of memory. In this case,
52 * the set could fit into memory if it weren't for the currently
53 * active pages - which may be used more, hopefully less frequently:
54 *
55 * +-memory available to cache-+
56 * | |
57 * +-inactive------+-active----+
58 * a b | c d e f g h i | J K L M N |
59 * +---------------+-----------+
60 *
61 * It is prohibitively expensive to accurately track access frequency
62 * of pages. But a reasonable approximation can be made to measure
63 * thrashing on the inactive list, after which refaulting pages can be
64 * activated optimistically to compete with the existing active pages.
65 *
66 * Approximating inactive page access frequency - Observations:
67 *
68 * 1. When a page is accessed for the first time, it is added to the
69 * head of the inactive list, slides every existing inactive page
70 * towards the tail by one slot, and pushes the current tail page
71 * out of memory.
72 *
73 * 2. When a page is accessed for the second time, it is promoted to
74 * the active list, shrinking the inactive list by one slot. This
75 * also slides all inactive pages that were faulted into the cache
76 * more recently than the activated page towards the tail of the
77 * inactive list.
78 *
79 * Thus:
80 *
81 * 1. The sum of evictions and activations between any two points in
82 * time indicate the minimum number of inactive pages accessed in
83 * between.
84 *
85 * 2. Moving one inactive page N page slots towards the tail of the
86 * list requires at least N inactive page accesses.
87 *
88 * Combining these:
89 *
90 * 1. When a page is finally evicted from memory, the number of
91 * inactive pages accessed while the page was in cache is at least
92 * the number of page slots on the inactive list.
93 *
94 * 2. In addition, measuring the sum of evictions and activations (E)
95 * at the time of a page's eviction, and comparing it to another
96 * reading (R) at the time the page faults back into memory tells
97 * the minimum number of accesses while the page was not cached.
98 * This is called the refault distance.
99 *
100 * Because the first access of the page was the fault and the second
101 * access the refault, we combine the in-cache distance with the
102 * out-of-cache distance to get the complete minimum access distance
103 * of this page:
104 *
105 * NR_inactive + (R - E)
106 *
107 * And knowing the minimum access distance of a page, we can easily
108 * tell if the page would be able to stay in cache assuming all page
109 * slots in the cache were available:
110 *
111 * NR_inactive + (R - E) <= NR_inactive + NR_active
112 *
113 * which can be further simplified to
114 *
115 * (R - E) <= NR_active
116 *
117 * Put into words, the refault distance (out-of-cache) can be seen as
118 * a deficit in inactive list space (in-cache). If the inactive list
119 * had (R - E) more page slots, the page would not have been evicted
120 * in between accesses, but activated instead. And on a full system,
121 * the only thing eating into inactive list space is active pages.
122 *
123 *
124 * Refaulting inactive pages
125 *
126 * All that is known about the active list is that the pages have been
127 * accessed more than once in the past. This means that at any given
128 * time there is actually a good chance that pages on the active list
129 * are no longer in active use.
130 *
131 * So when a refault distance of (R - E) is observed and there are at
132 * least (R - E) active pages, the refaulting page is activated
133 * optimistically in the hope that (R - E) active pages are actually
134 * used less frequently than the refaulting page - or even not used at
135 * all anymore.
136 *
137 * That means if inactive cache is refaulting with a suitable refault
138 * distance, we assume the cache workingset is transitioning and put
139 * pressure on the current active list.
140 *
141 * If this is wrong and demotion kicks in, the pages which are truly
142 * used more frequently will be reactivated while the less frequently
143 * used once will be evicted from memory.
144 *
145 * But if this is right, the stale pages will be pushed out of memory
146 * and the used pages get to stay in cache.
147 *
148 * Refaulting active pages
149 *
150 * If on the other hand the refaulting pages have recently been
151 * deactivated, it means that the active list is no longer protecting
152 * actively used cache from reclaim. The cache is NOT transitioning to
153 * a different workingset; the existing workingset is thrashing in the
154 * space allocated to the page cache.
155 *
156 *
157 * Implementation
158 *
159 * For each node's file LRU lists, a counter for inactive evictions
160 * and activations is maintained (node->inactive_age).
161 *
162 * On eviction, a snapshot of this counter (along with some bits to
163 * identify the node) is stored in the now empty page cache
164 * slot of the evicted page. This is called a shadow entry.
165 *
166 * On cache misses for which there are shadow entries, an eligible
167 * refault distance will immediately activate the refaulting page.
168 */
169
170#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
171 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
172#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
173
174/*
175 * Eviction timestamps need to be able to cover the full range of
176 * actionable refaults. However, bits are tight in the xarray
177 * entry, and after storing the identifier for the lruvec there might
178 * not be enough left to represent every single actionable refault. In
179 * that case, we have to sacrifice granularity for distance, and group
180 * evictions into coarser buckets by shaving off lower timestamp bits.
181 */
182static unsigned int bucket_order __read_mostly;
183
184static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
185 bool workingset)
186{
187 eviction >>= bucket_order;
188 eviction &= EVICTION_MASK;
189 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
190 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
191 eviction = (eviction << 1) | workingset;
192
193 return xa_mk_value(eviction);
194}
195
196static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
197 unsigned long *evictionp, bool *workingsetp)
198{
199 unsigned long entry = xa_to_value(shadow);
200 int memcgid, nid;
201 bool workingset;
202
203 workingset = entry & 1;
204 entry >>= 1;
205 nid = entry & ((1UL << NODES_SHIFT) - 1);
206 entry >>= NODES_SHIFT;
207 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
208 entry >>= MEM_CGROUP_ID_SHIFT;
209
210 *memcgidp = memcgid;
211 *pgdat = NODE_DATA(nid);
212 *evictionp = entry << bucket_order;
213 *workingsetp = workingset;
214}
215
216/**
217 * workingset_eviction - note the eviction of a page from memory
218 * @page: the page being evicted
219 *
220 * Returns a shadow entry to be stored in @page->mapping->i_pages in place
221 * of the evicted @page so that a later refault can be detected.
222 */
223void *workingset_eviction(struct page *page)
224{
225 struct pglist_data *pgdat = page_pgdat(page);
226 struct mem_cgroup *memcg = page_memcg(page);
227 int memcgid = mem_cgroup_id(memcg);
228 unsigned long eviction;
229 struct lruvec *lruvec;
230
231 /* Page is fully exclusive and pins page->mem_cgroup */
232 VM_BUG_ON_PAGE(PageLRU(page), page);
233 VM_BUG_ON_PAGE(page_count(page), page);
234 VM_BUG_ON_PAGE(!PageLocked(page), page);
235
236 lruvec = mem_cgroup_lruvec(pgdat, memcg);
237 eviction = atomic_long_inc_return(&lruvec->inactive_age);
238 return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
239}
240
241/**
242 * workingset_refault - evaluate the refault of a previously evicted page
243 * @page: the freshly allocated replacement page
244 * @shadow: shadow entry of the evicted page
245 *
246 * Calculates and evaluates the refault distance of the previously
247 * evicted page in the context of the node it was allocated in.
248 */
249void workingset_refault(struct page *page, void *shadow)
250{
251 unsigned long refault_distance;
252 struct pglist_data *pgdat;
253 unsigned long active_file;
254 struct mem_cgroup *memcg;
255 unsigned long eviction;
256 struct lruvec *lruvec;
257 unsigned long refault;
258 bool workingset;
259 int memcgid;
260
261 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
262
263 rcu_read_lock();
264 /*
265 * Look up the memcg associated with the stored ID. It might
266 * have been deleted since the page's eviction.
267 *
268 * Note that in rare events the ID could have been recycled
269 * for a new cgroup that refaults a shared page. This is
270 * impossible to tell from the available data. However, this
271 * should be a rare and limited disturbance, and activations
272 * are always speculative anyway. Ultimately, it's the aging
273 * algorithm's job to shake out the minimum access frequency
274 * for the active cache.
275 *
276 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
277 * would be better if the root_mem_cgroup existed in all
278 * configurations instead.
279 */
280 memcg = mem_cgroup_from_id(memcgid);
281 if (!mem_cgroup_disabled() && !memcg)
282 goto out;
283 lruvec = mem_cgroup_lruvec(pgdat, memcg);
284 refault = atomic_long_read(&lruvec->inactive_age);
285 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
286
287 /*
288 * Calculate the refault distance
289 *
290 * The unsigned subtraction here gives an accurate distance
291 * across inactive_age overflows in most cases. There is a
292 * special case: usually, shadow entries have a short lifetime
293 * and are either refaulted or reclaimed along with the inode
294 * before they get too old. But it is not impossible for the
295 * inactive_age to lap a shadow entry in the field, which can
296 * then result in a false small refault distance, leading to a
297 * false activation should this old entry actually refault
298 * again. However, earlier kernels used to deactivate
299 * unconditionally with *every* reclaim invocation for the
300 * longest time, so the occasional inappropriate activation
301 * leading to pressure on the active list is not a problem.
302 */
303 refault_distance = (refault - eviction) & EVICTION_MASK;
304
305 inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
306
307 /*
308 * Compare the distance to the existing workingset size. We
309 * don't act on pages that couldn't stay resident even if all
310 * the memory was available to the page cache.
311 */
312 if (refault_distance > active_file)
313 goto out;
314
315 SetPageActive(page);
316 atomic_long_inc(&lruvec->inactive_age);
317 inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
318
319 /* Page was active prior to eviction */
320 if (workingset) {
321 SetPageWorkingset(page);
322 inc_lruvec_state(lruvec, WORKINGSET_RESTORE);
323 }
324out:
325 rcu_read_unlock();
326}
327
328/**
329 * workingset_activation - note a page activation
330 * @page: page that is being activated
331 */
332void workingset_activation(struct page *page)
333{
334 struct mem_cgroup *memcg;
335 struct lruvec *lruvec;
336
337 rcu_read_lock();
338 /*
339 * Filter non-memcg pages here, e.g. unmap can call
340 * mark_page_accessed() on VDSO pages.
341 *
342 * XXX: See workingset_refault() - this should return
343 * root_mem_cgroup even for !CONFIG_MEMCG.
344 */
345 memcg = page_memcg_rcu(page);
346 if (!mem_cgroup_disabled() && !memcg)
347 goto out;
348 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
349 atomic_long_inc(&lruvec->inactive_age);
350out:
351 rcu_read_unlock();
352}
353
354/*
355 * Shadow entries reflect the share of the working set that does not
356 * fit into memory, so their number depends on the access pattern of
357 * the workload. In most cases, they will refault or get reclaimed
358 * along with the inode, but a (malicious) workload that streams
359 * through files with a total size several times that of available
360 * memory, while preventing the inodes from being reclaimed, can
361 * create excessive amounts of shadow nodes. To keep a lid on this,
362 * track shadow nodes and reclaim them when they grow way past the
363 * point where they would still be useful.
364 */
365
366static struct list_lru shadow_nodes;
367
368void workingset_update_node(struct xa_node *node)
369{
370 /*
371 * Track non-empty nodes that contain only shadow entries;
372 * unlink those that contain pages or are being freed.
373 *
374 * Avoid acquiring the list_lru lock when the nodes are
375 * already where they should be. The list_empty() test is safe
376 * as node->private_list is protected by the i_pages lock.
377 */
378 VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
379
380 if (node->count && node->count == node->nr_values) {
381 if (list_empty(&node->private_list)) {
382 list_lru_add(&shadow_nodes, &node->private_list);
383 __inc_lruvec_slab_state(node, WORKINGSET_NODES);
384 }
385 } else {
386 if (!list_empty(&node->private_list)) {
387 list_lru_del(&shadow_nodes, &node->private_list);
388 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
389 }
390 }
391}
392
393static unsigned long count_shadow_nodes(struct shrinker *shrinker,
394 struct shrink_control *sc)
395{
396 unsigned long max_nodes;
397 unsigned long nodes;
398 unsigned long pages;
399
400 nodes = list_lru_shrink_count(&shadow_nodes, sc);
401
402 /*
403 * Approximate a reasonable limit for the nodes
404 * containing shadow entries. We don't need to keep more
405 * shadow entries than possible pages on the active list,
406 * since refault distances bigger than that are dismissed.
407 *
408 * The size of the active list converges toward 100% of
409 * overall page cache as memory grows, with only a tiny
410 * inactive list. Assume the total cache size for that.
411 *
412 * Nodes might be sparsely populated, with only one shadow
413 * entry in the extreme case. Obviously, we cannot keep one
414 * node for every eligible shadow entry, so compromise on a
415 * worst-case density of 1/8th. Below that, not all eligible
416 * refaults can be detected anymore.
417 *
418 * On 64-bit with 7 xa_nodes per page and 64 slots
419 * each, this will reclaim shadow entries when they consume
420 * ~1.8% of available memory:
421 *
422 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
423 */
424#ifdef CONFIG_MEMCG
425 if (sc->memcg) {
426 struct lruvec *lruvec;
427 int i;
428
429 lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg);
430 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
431 pages += lruvec_page_state_local(lruvec,
432 NR_LRU_BASE + i);
433 pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE);
434 pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE);
435 } else
436#endif
437 pages = node_present_pages(sc->nid);
438
439 max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
440
441 if (!nodes)
442 return SHRINK_EMPTY;
443
444 if (nodes <= max_nodes)
445 return 0;
446 return nodes - max_nodes;
447}
448
449static enum lru_status shadow_lru_isolate(struct list_head *item,
450 struct list_lru_one *lru,
451 spinlock_t *lru_lock,
452 void *arg) __must_hold(lru_lock)
453{
454 struct xa_node *node = container_of(item, struct xa_node, private_list);
455 XA_STATE(xas, node->array, 0);
456 struct address_space *mapping;
457 int ret;
458
459 /*
460 * Page cache insertions and deletions synchroneously maintain
461 * the shadow node LRU under the i_pages lock and the
462 * lru_lock. Because the page cache tree is emptied before
463 * the inode can be destroyed, holding the lru_lock pins any
464 * address_space that has nodes on the LRU.
465 *
466 * We can then safely transition to the i_pages lock to
467 * pin only the address_space of the particular node we want
468 * to reclaim, take the node off-LRU, and drop the lru_lock.
469 */
470
471 mapping = container_of(node->array, struct address_space, i_pages);
472
473 /* Coming from the list, invert the lock order */
474 if (!xa_trylock(&mapping->i_pages)) {
475 spin_unlock_irq(lru_lock);
476 ret = LRU_RETRY;
477 goto out;
478 }
479
480 list_lru_isolate(lru, item);
481 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
482
483 spin_unlock(lru_lock);
484
485 /*
486 * The nodes should only contain one or more shadow entries,
487 * no pages, so we expect to be able to remove them all and
488 * delete and free the empty node afterwards.
489 */
490 if (WARN_ON_ONCE(!node->nr_values))
491 goto out_invalid;
492 if (WARN_ON_ONCE(node->count != node->nr_values))
493 goto out_invalid;
494 mapping->nrexceptional -= node->nr_values;
495 xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
496 xas.xa_offset = node->offset;
497 xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
498 xas_set_update(&xas, workingset_update_node);
499 /*
500 * We could store a shadow entry here which was the minimum of the
501 * shadow entries we were tracking ...
502 */
503 xas_store(&xas, NULL);
504 __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
505
506out_invalid:
507 xa_unlock_irq(&mapping->i_pages);
508 ret = LRU_REMOVED_RETRY;
509out:
510 cond_resched();
511 spin_lock_irq(lru_lock);
512 return ret;
513}
514
515static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
516 struct shrink_control *sc)
517{
518 /* list_lru lock nests inside the IRQ-safe i_pages lock */
519 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
520 NULL);
521}
522
523static struct shrinker workingset_shadow_shrinker = {
524 .count_objects = count_shadow_nodes,
525 .scan_objects = scan_shadow_nodes,
526 .seeks = 0, /* ->count reports only fully expendable nodes */
527 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
528};
529
530/*
531 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
532 * i_pages lock.
533 */
534static struct lock_class_key shadow_nodes_key;
535
536static int __init workingset_init(void)
537{
538 unsigned int timestamp_bits;
539 unsigned int max_order;
540 int ret;
541
542 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
543 /*
544 * Calculate the eviction bucket size to cover the longest
545 * actionable refault distance, which is currently half of
546 * memory (totalram_pages/2). However, memory hotplug may add
547 * some more pages at runtime, so keep working with up to
548 * double the initial memory by using totalram_pages as-is.
549 */
550 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
551 max_order = fls_long(totalram_pages() - 1);
552 if (max_order > timestamp_bits)
553 bucket_order = max_order - timestamp_bits;
554 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
555 timestamp_bits, max_order, bucket_order);
556
557 ret = prealloc_shrinker(&workingset_shadow_shrinker);
558 if (ret)
559 goto err;
560 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
561 &workingset_shadow_shrinker);
562 if (ret)
563 goto err_list_lru;
564 register_shrinker_prepared(&workingset_shadow_shrinker);
565 return 0;
566err_list_lru:
567 free_prealloced_shrinker(&workingset_shadow_shrinker);
568err:
569 return ret;
570}
571module_init(workingset_init);
1/*
2 * Workingset detection
3 *
4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
5 */
6
7#include <linux/memcontrol.h>
8#include <linux/writeback.h>
9#include <linux/pagemap.h>
10#include <linux/atomic.h>
11#include <linux/module.h>
12#include <linux/swap.h>
13#include <linux/dax.h>
14#include <linux/fs.h>
15#include <linux/mm.h>
16
17/*
18 * Double CLOCK lists
19 *
20 * Per node, two clock lists are maintained for file pages: the
21 * inactive and the active list. Freshly faulted pages start out at
22 * the head of the inactive list and page reclaim scans pages from the
23 * tail. Pages that are accessed multiple times on the inactive list
24 * are promoted to the active list, to protect them from reclaim,
25 * whereas active pages are demoted to the inactive list when the
26 * active list grows too big.
27 *
28 * fault ------------------------+
29 * |
30 * +--------------+ | +-------------+
31 * reclaim <- | inactive | <-+-- demotion | active | <--+
32 * +--------------+ +-------------+ |
33 * | |
34 * +-------------- promotion ------------------+
35 *
36 *
37 * Access frequency and refault distance
38 *
39 * A workload is thrashing when its pages are frequently used but they
40 * are evicted from the inactive list every time before another access
41 * would have promoted them to the active list.
42 *
43 * In cases where the average access distance between thrashing pages
44 * is bigger than the size of memory there is nothing that can be
45 * done - the thrashing set could never fit into memory under any
46 * circumstance.
47 *
48 * However, the average access distance could be bigger than the
49 * inactive list, yet smaller than the size of memory. In this case,
50 * the set could fit into memory if it weren't for the currently
51 * active pages - which may be used more, hopefully less frequently:
52 *
53 * +-memory available to cache-+
54 * | |
55 * +-inactive------+-active----+
56 * a b | c d e f g h i | J K L M N |
57 * +---------------+-----------+
58 *
59 * It is prohibitively expensive to accurately track access frequency
60 * of pages. But a reasonable approximation can be made to measure
61 * thrashing on the inactive list, after which refaulting pages can be
62 * activated optimistically to compete with the existing active pages.
63 *
64 * Approximating inactive page access frequency - Observations:
65 *
66 * 1. When a page is accessed for the first time, it is added to the
67 * head of the inactive list, slides every existing inactive page
68 * towards the tail by one slot, and pushes the current tail page
69 * out of memory.
70 *
71 * 2. When a page is accessed for the second time, it is promoted to
72 * the active list, shrinking the inactive list by one slot. This
73 * also slides all inactive pages that were faulted into the cache
74 * more recently than the activated page towards the tail of the
75 * inactive list.
76 *
77 * Thus:
78 *
79 * 1. The sum of evictions and activations between any two points in
80 * time indicate the minimum number of inactive pages accessed in
81 * between.
82 *
83 * 2. Moving one inactive page N page slots towards the tail of the
84 * list requires at least N inactive page accesses.
85 *
86 * Combining these:
87 *
88 * 1. When a page is finally evicted from memory, the number of
89 * inactive pages accessed while the page was in cache is at least
90 * the number of page slots on the inactive list.
91 *
92 * 2. In addition, measuring the sum of evictions and activations (E)
93 * at the time of a page's eviction, and comparing it to another
94 * reading (R) at the time the page faults back into memory tells
95 * the minimum number of accesses while the page was not cached.
96 * This is called the refault distance.
97 *
98 * Because the first access of the page was the fault and the second
99 * access the refault, we combine the in-cache distance with the
100 * out-of-cache distance to get the complete minimum access distance
101 * of this page:
102 *
103 * NR_inactive + (R - E)
104 *
105 * And knowing the minimum access distance of a page, we can easily
106 * tell if the page would be able to stay in cache assuming all page
107 * slots in the cache were available:
108 *
109 * NR_inactive + (R - E) <= NR_inactive + NR_active
110 *
111 * which can be further simplified to
112 *
113 * (R - E) <= NR_active
114 *
115 * Put into words, the refault distance (out-of-cache) can be seen as
116 * a deficit in inactive list space (in-cache). If the inactive list
117 * had (R - E) more page slots, the page would not have been evicted
118 * in between accesses, but activated instead. And on a full system,
119 * the only thing eating into inactive list space is active pages.
120 *
121 *
122 * Activating refaulting pages
123 *
124 * All that is known about the active list is that the pages have been
125 * accessed more than once in the past. This means that at any given
126 * time there is actually a good chance that pages on the active list
127 * are no longer in active use.
128 *
129 * So when a refault distance of (R - E) is observed and there are at
130 * least (R - E) active pages, the refaulting page is activated
131 * optimistically in the hope that (R - E) active pages are actually
132 * used less frequently than the refaulting page - or even not used at
133 * all anymore.
134 *
135 * If this is wrong and demotion kicks in, the pages which are truly
136 * used more frequently will be reactivated while the less frequently
137 * used once will be evicted from memory.
138 *
139 * But if this is right, the stale pages will be pushed out of memory
140 * and the used pages get to stay in cache.
141 *
142 *
143 * Implementation
144 *
145 * For each node's file LRU lists, a counter for inactive evictions
146 * and activations is maintained (node->inactive_age).
147 *
148 * On eviction, a snapshot of this counter (along with some bits to
149 * identify the node) is stored in the now empty page cache radix tree
150 * slot of the evicted page. This is called a shadow entry.
151 *
152 * On cache misses for which there are shadow entries, an eligible
153 * refault distance will immediately activate the refaulting page.
154 */
155
156#define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \
157 NODES_SHIFT + \
158 MEM_CGROUP_ID_SHIFT)
159#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
160
161/*
162 * Eviction timestamps need to be able to cover the full range of
163 * actionable refaults. However, bits are tight in the radix tree
164 * entry, and after storing the identifier for the lruvec there might
165 * not be enough left to represent every single actionable refault. In
166 * that case, we have to sacrifice granularity for distance, and group
167 * evictions into coarser buckets by shaving off lower timestamp bits.
168 */
169static unsigned int bucket_order __read_mostly;
170
171static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
172{
173 eviction >>= bucket_order;
174 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
175 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
176 eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
177
178 return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
179}
180
181static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
182 unsigned long *evictionp)
183{
184 unsigned long entry = (unsigned long)shadow;
185 int memcgid, nid;
186
187 entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
188 nid = entry & ((1UL << NODES_SHIFT) - 1);
189 entry >>= NODES_SHIFT;
190 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
191 entry >>= MEM_CGROUP_ID_SHIFT;
192
193 *memcgidp = memcgid;
194 *pgdat = NODE_DATA(nid);
195 *evictionp = entry << bucket_order;
196}
197
198/**
199 * workingset_eviction - note the eviction of a page from memory
200 * @mapping: address space the page was backing
201 * @page: the page being evicted
202 *
203 * Returns a shadow entry to be stored in @mapping->page_tree in place
204 * of the evicted @page so that a later refault can be detected.
205 */
206void *workingset_eviction(struct address_space *mapping, struct page *page)
207{
208 struct mem_cgroup *memcg = page_memcg(page);
209 struct pglist_data *pgdat = page_pgdat(page);
210 int memcgid = mem_cgroup_id(memcg);
211 unsigned long eviction;
212 struct lruvec *lruvec;
213
214 /* Page is fully exclusive and pins page->mem_cgroup */
215 VM_BUG_ON_PAGE(PageLRU(page), page);
216 VM_BUG_ON_PAGE(page_count(page), page);
217 VM_BUG_ON_PAGE(!PageLocked(page), page);
218
219 lruvec = mem_cgroup_lruvec(pgdat, memcg);
220 eviction = atomic_long_inc_return(&lruvec->inactive_age);
221 return pack_shadow(memcgid, pgdat, eviction);
222}
223
224/**
225 * workingset_refault - evaluate the refault of a previously evicted page
226 * @shadow: shadow entry of the evicted page
227 *
228 * Calculates and evaluates the refault distance of the previously
229 * evicted page in the context of the node it was allocated in.
230 *
231 * Returns %true if the page should be activated, %false otherwise.
232 */
233bool workingset_refault(void *shadow)
234{
235 unsigned long refault_distance;
236 unsigned long active_file;
237 struct mem_cgroup *memcg;
238 unsigned long eviction;
239 struct lruvec *lruvec;
240 unsigned long refault;
241 struct pglist_data *pgdat;
242 int memcgid;
243
244 unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
245
246 rcu_read_lock();
247 /*
248 * Look up the memcg associated with the stored ID. It might
249 * have been deleted since the page's eviction.
250 *
251 * Note that in rare events the ID could have been recycled
252 * for a new cgroup that refaults a shared page. This is
253 * impossible to tell from the available data. However, this
254 * should be a rare and limited disturbance, and activations
255 * are always speculative anyway. Ultimately, it's the aging
256 * algorithm's job to shake out the minimum access frequency
257 * for the active cache.
258 *
259 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
260 * would be better if the root_mem_cgroup existed in all
261 * configurations instead.
262 */
263 memcg = mem_cgroup_from_id(memcgid);
264 if (!mem_cgroup_disabled() && !memcg) {
265 rcu_read_unlock();
266 return false;
267 }
268 lruvec = mem_cgroup_lruvec(pgdat, memcg);
269 refault = atomic_long_read(&lruvec->inactive_age);
270 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
271 rcu_read_unlock();
272
273 /*
274 * The unsigned subtraction here gives an accurate distance
275 * across inactive_age overflows in most cases.
276 *
277 * There is a special case: usually, shadow entries have a
278 * short lifetime and are either refaulted or reclaimed along
279 * with the inode before they get too old. But it is not
280 * impossible for the inactive_age to lap a shadow entry in
281 * the field, which can then can result in a false small
282 * refault distance, leading to a false activation should this
283 * old entry actually refault again. However, earlier kernels
284 * used to deactivate unconditionally with *every* reclaim
285 * invocation for the longest time, so the occasional
286 * inappropriate activation leading to pressure on the active
287 * list is not a problem.
288 */
289 refault_distance = (refault - eviction) & EVICTION_MASK;
290
291 inc_node_state(pgdat, WORKINGSET_REFAULT);
292
293 if (refault_distance <= active_file) {
294 inc_node_state(pgdat, WORKINGSET_ACTIVATE);
295 return true;
296 }
297 return false;
298}
299
300/**
301 * workingset_activation - note a page activation
302 * @page: page that is being activated
303 */
304void workingset_activation(struct page *page)
305{
306 struct mem_cgroup *memcg;
307 struct lruvec *lruvec;
308
309 rcu_read_lock();
310 /*
311 * Filter non-memcg pages here, e.g. unmap can call
312 * mark_page_accessed() on VDSO pages.
313 *
314 * XXX: See workingset_refault() - this should return
315 * root_mem_cgroup even for !CONFIG_MEMCG.
316 */
317 memcg = page_memcg_rcu(page);
318 if (!mem_cgroup_disabled() && !memcg)
319 goto out;
320 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
321 atomic_long_inc(&lruvec->inactive_age);
322out:
323 rcu_read_unlock();
324}
325
326/*
327 * Shadow entries reflect the share of the working set that does not
328 * fit into memory, so their number depends on the access pattern of
329 * the workload. In most cases, they will refault or get reclaimed
330 * along with the inode, but a (malicious) workload that streams
331 * through files with a total size several times that of available
332 * memory, while preventing the inodes from being reclaimed, can
333 * create excessive amounts of shadow nodes. To keep a lid on this,
334 * track shadow nodes and reclaim them when they grow way past the
335 * point where they would still be useful.
336 */
337
338static struct list_lru shadow_nodes;
339
340void workingset_update_node(struct radix_tree_node *node, void *private)
341{
342 struct address_space *mapping = private;
343
344 /* Only regular page cache has shadow entries */
345 if (dax_mapping(mapping) || shmem_mapping(mapping))
346 return;
347
348 /*
349 * Track non-empty nodes that contain only shadow entries;
350 * unlink those that contain pages or are being freed.
351 *
352 * Avoid acquiring the list_lru lock when the nodes are
353 * already where they should be. The list_empty() test is safe
354 * as node->private_list is protected by &mapping->tree_lock.
355 */
356 if (node->count && node->count == node->exceptional) {
357 if (list_empty(&node->private_list)) {
358 node->private_data = mapping;
359 list_lru_add(&shadow_nodes, &node->private_list);
360 }
361 } else {
362 if (!list_empty(&node->private_list))
363 list_lru_del(&shadow_nodes, &node->private_list);
364 }
365}
366
367static unsigned long count_shadow_nodes(struct shrinker *shrinker,
368 struct shrink_control *sc)
369{
370 unsigned long max_nodes;
371 unsigned long nodes;
372 unsigned long cache;
373
374 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
375 local_irq_disable();
376 nodes = list_lru_shrink_count(&shadow_nodes, sc);
377 local_irq_enable();
378
379 /*
380 * Approximate a reasonable limit for the radix tree nodes
381 * containing shadow entries. We don't need to keep more
382 * shadow entries than possible pages on the active list,
383 * since refault distances bigger than that are dismissed.
384 *
385 * The size of the active list converges toward 100% of
386 * overall page cache as memory grows, with only a tiny
387 * inactive list. Assume the total cache size for that.
388 *
389 * Nodes might be sparsely populated, with only one shadow
390 * entry in the extreme case. Obviously, we cannot keep one
391 * node for every eligible shadow entry, so compromise on a
392 * worst-case density of 1/8th. Below that, not all eligible
393 * refaults can be detected anymore.
394 *
395 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
396 * each, this will reclaim shadow entries when they consume
397 * ~1.8% of available memory:
398 *
399 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
400 */
401 if (sc->memcg) {
402 cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
403 LRU_ALL_FILE);
404 } else {
405 cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
406 node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
407 }
408 max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
409
410 if (nodes <= max_nodes)
411 return 0;
412 return nodes - max_nodes;
413}
414
415static enum lru_status shadow_lru_isolate(struct list_head *item,
416 struct list_lru_one *lru,
417 spinlock_t *lru_lock,
418 void *arg)
419{
420 struct address_space *mapping;
421 struct radix_tree_node *node;
422 unsigned int i;
423 int ret;
424
425 /*
426 * Page cache insertions and deletions synchroneously maintain
427 * the shadow node LRU under the mapping->tree_lock and the
428 * lru_lock. Because the page cache tree is emptied before
429 * the inode can be destroyed, holding the lru_lock pins any
430 * address_space that has radix tree nodes on the LRU.
431 *
432 * We can then safely transition to the mapping->tree_lock to
433 * pin only the address_space of the particular node we want
434 * to reclaim, take the node off-LRU, and drop the lru_lock.
435 */
436
437 node = container_of(item, struct radix_tree_node, private_list);
438 mapping = node->private_data;
439
440 /* Coming from the list, invert the lock order */
441 if (!spin_trylock(&mapping->tree_lock)) {
442 spin_unlock(lru_lock);
443 ret = LRU_RETRY;
444 goto out;
445 }
446
447 list_lru_isolate(lru, item);
448 spin_unlock(lru_lock);
449
450 /*
451 * The nodes should only contain one or more shadow entries,
452 * no pages, so we expect to be able to remove them all and
453 * delete and free the empty node afterwards.
454 */
455 if (WARN_ON_ONCE(!node->exceptional))
456 goto out_invalid;
457 if (WARN_ON_ONCE(node->count != node->exceptional))
458 goto out_invalid;
459 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
460 if (node->slots[i]) {
461 if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i])))
462 goto out_invalid;
463 if (WARN_ON_ONCE(!node->exceptional))
464 goto out_invalid;
465 if (WARN_ON_ONCE(!mapping->nrexceptional))
466 goto out_invalid;
467 node->slots[i] = NULL;
468 node->exceptional--;
469 node->count--;
470 mapping->nrexceptional--;
471 }
472 }
473 if (WARN_ON_ONCE(node->exceptional))
474 goto out_invalid;
475 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
476 __radix_tree_delete_node(&mapping->page_tree, node,
477 workingset_update_node, mapping);
478
479out_invalid:
480 spin_unlock(&mapping->tree_lock);
481 ret = LRU_REMOVED_RETRY;
482out:
483 local_irq_enable();
484 cond_resched();
485 local_irq_disable();
486 spin_lock(lru_lock);
487 return ret;
488}
489
490static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
491 struct shrink_control *sc)
492{
493 unsigned long ret;
494
495 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
496 local_irq_disable();
497 ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
498 local_irq_enable();
499 return ret;
500}
501
502static struct shrinker workingset_shadow_shrinker = {
503 .count_objects = count_shadow_nodes,
504 .scan_objects = scan_shadow_nodes,
505 .seeks = DEFAULT_SEEKS,
506 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
507};
508
509/*
510 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
511 * mapping->tree_lock.
512 */
513static struct lock_class_key shadow_nodes_key;
514
515static int __init workingset_init(void)
516{
517 unsigned int timestamp_bits;
518 unsigned int max_order;
519 int ret;
520
521 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
522 /*
523 * Calculate the eviction bucket size to cover the longest
524 * actionable refault distance, which is currently half of
525 * memory (totalram_pages/2). However, memory hotplug may add
526 * some more pages at runtime, so keep working with up to
527 * double the initial memory by using totalram_pages as-is.
528 */
529 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
530 max_order = fls_long(totalram_pages - 1);
531 if (max_order > timestamp_bits)
532 bucket_order = max_order - timestamp_bits;
533 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
534 timestamp_bits, max_order, bucket_order);
535
536 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
537 if (ret)
538 goto err;
539 ret = register_shrinker(&workingset_shadow_shrinker);
540 if (ret)
541 goto err_list_lru;
542 return 0;
543err_list_lru:
544 list_lru_destroy(&shadow_nodes);
545err:
546 return ret;
547}
548module_init(workingset_init);