Loading...
1/*
2 * Workingset detection
3 *
4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
5 */
6
7#include <linux/memcontrol.h>
8#include <linux/writeback.h>
9#include <linux/pagemap.h>
10#include <linux/atomic.h>
11#include <linux/module.h>
12#include <linux/swap.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15
16/*
17 * Double CLOCK lists
18 *
19 * Per zone, two clock lists are maintained for file pages: the
20 * inactive and the active list. Freshly faulted pages start out at
21 * the head of the inactive list and page reclaim scans pages from the
22 * tail. Pages that are accessed multiple times on the inactive list
23 * are promoted to the active list, to protect them from reclaim,
24 * whereas active pages are demoted to the inactive list when the
25 * active list grows too big.
26 *
27 * fault ------------------------+
28 * |
29 * +--------------+ | +-------------+
30 * reclaim <- | inactive | <-+-- demotion | active | <--+
31 * +--------------+ +-------------+ |
32 * | |
33 * +-------------- promotion ------------------+
34 *
35 *
36 * Access frequency and refault distance
37 *
38 * A workload is thrashing when its pages are frequently used but they
39 * are evicted from the inactive list every time before another access
40 * would have promoted them to the active list.
41 *
42 * In cases where the average access distance between thrashing pages
43 * is bigger than the size of memory there is nothing that can be
44 * done - the thrashing set could never fit into memory under any
45 * circumstance.
46 *
47 * However, the average access distance could be bigger than the
48 * inactive list, yet smaller than the size of memory. In this case,
49 * the set could fit into memory if it weren't for the currently
50 * active pages - which may be used more, hopefully less frequently:
51 *
52 * +-memory available to cache-+
53 * | |
54 * +-inactive------+-active----+
55 * a b | c d e f g h i | J K L M N |
56 * +---------------+-----------+
57 *
58 * It is prohibitively expensive to accurately track access frequency
59 * of pages. But a reasonable approximation can be made to measure
60 * thrashing on the inactive list, after which refaulting pages can be
61 * activated optimistically to compete with the existing active pages.
62 *
63 * Approximating inactive page access frequency - Observations:
64 *
65 * 1. When a page is accessed for the first time, it is added to the
66 * head of the inactive list, slides every existing inactive page
67 * towards the tail by one slot, and pushes the current tail page
68 * out of memory.
69 *
70 * 2. When a page is accessed for the second time, it is promoted to
71 * the active list, shrinking the inactive list by one slot. This
72 * also slides all inactive pages that were faulted into the cache
73 * more recently than the activated page towards the tail of the
74 * inactive list.
75 *
76 * Thus:
77 *
78 * 1. The sum of evictions and activations between any two points in
79 * time indicate the minimum number of inactive pages accessed in
80 * between.
81 *
82 * 2. Moving one inactive page N page slots towards the tail of the
83 * list requires at least N inactive page accesses.
84 *
85 * Combining these:
86 *
87 * 1. When a page is finally evicted from memory, the number of
88 * inactive pages accessed while the page was in cache is at least
89 * the number of page slots on the inactive list.
90 *
91 * 2. In addition, measuring the sum of evictions and activations (E)
92 * at the time of a page's eviction, and comparing it to another
93 * reading (R) at the time the page faults back into memory tells
94 * the minimum number of accesses while the page was not cached.
95 * This is called the refault distance.
96 *
97 * Because the first access of the page was the fault and the second
98 * access the refault, we combine the in-cache distance with the
99 * out-of-cache distance to get the complete minimum access distance
100 * of this page:
101 *
102 * NR_inactive + (R - E)
103 *
104 * And knowing the minimum access distance of a page, we can easily
105 * tell if the page would be able to stay in cache assuming all page
106 * slots in the cache were available:
107 *
108 * NR_inactive + (R - E) <= NR_inactive + NR_active
109 *
110 * which can be further simplified to
111 *
112 * (R - E) <= NR_active
113 *
114 * Put into words, the refault distance (out-of-cache) can be seen as
115 * a deficit in inactive list space (in-cache). If the inactive list
116 * had (R - E) more page slots, the page would not have been evicted
117 * in between accesses, but activated instead. And on a full system,
118 * the only thing eating into inactive list space is active pages.
119 *
120 *
121 * Activating refaulting pages
122 *
123 * All that is known about the active list is that the pages have been
124 * accessed more than once in the past. This means that at any given
125 * time there is actually a good chance that pages on the active list
126 * are no longer in active use.
127 *
128 * So when a refault distance of (R - E) is observed and there are at
129 * least (R - E) active pages, the refaulting page is activated
130 * optimistically in the hope that (R - E) active pages are actually
131 * used less frequently than the refaulting page - or even not used at
132 * all anymore.
133 *
134 * If this is wrong and demotion kicks in, the pages which are truly
135 * used more frequently will be reactivated while the less frequently
136 * used once will be evicted from memory.
137 *
138 * But if this is right, the stale pages will be pushed out of memory
139 * and the used pages get to stay in cache.
140 *
141 *
142 * Implementation
143 *
144 * For each zone's file LRU lists, a counter for inactive evictions
145 * and activations is maintained (zone->inactive_age).
146 *
147 * On eviction, a snapshot of this counter (along with some bits to
148 * identify the zone) is stored in the now empty page cache radix tree
149 * slot of the evicted page. This is called a shadow entry.
150 *
151 * On cache misses for which there are shadow entries, an eligible
152 * refault distance will immediately activate the refaulting page.
153 */
154
155#define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \
156 ZONES_SHIFT + NODES_SHIFT + \
157 MEM_CGROUP_ID_SHIFT)
158#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
159
160/*
161 * Eviction timestamps need to be able to cover the full range of
162 * actionable refaults. However, bits are tight in the radix tree
163 * entry, and after storing the identifier for the lruvec there might
164 * not be enough left to represent every single actionable refault. In
165 * that case, we have to sacrifice granularity for distance, and group
166 * evictions into coarser buckets by shaving off lower timestamp bits.
167 */
168static unsigned int bucket_order __read_mostly;
169
170static void *pack_shadow(int memcgid, struct zone *zone, unsigned long eviction)
171{
172 eviction >>= bucket_order;
173 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
174 eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone);
175 eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
176 eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
177
178 return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
179}
180
181static void unpack_shadow(void *shadow, int *memcgidp, struct zone **zonep,
182 unsigned long *evictionp)
183{
184 unsigned long entry = (unsigned long)shadow;
185 int memcgid, nid, zid;
186
187 entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
188 zid = entry & ((1UL << ZONES_SHIFT) - 1);
189 entry >>= ZONES_SHIFT;
190 nid = entry & ((1UL << NODES_SHIFT) - 1);
191 entry >>= NODES_SHIFT;
192 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
193 entry >>= MEM_CGROUP_ID_SHIFT;
194
195 *memcgidp = memcgid;
196 *zonep = NODE_DATA(nid)->node_zones + zid;
197 *evictionp = entry << bucket_order;
198}
199
200/**
201 * workingset_eviction - note the eviction of a page from memory
202 * @mapping: address space the page was backing
203 * @page: the page being evicted
204 *
205 * Returns a shadow entry to be stored in @mapping->page_tree in place
206 * of the evicted @page so that a later refault can be detected.
207 */
208void *workingset_eviction(struct address_space *mapping, struct page *page)
209{
210 struct mem_cgroup *memcg = page_memcg(page);
211 struct zone *zone = page_zone(page);
212 int memcgid = mem_cgroup_id(memcg);
213 unsigned long eviction;
214 struct lruvec *lruvec;
215
216 /* Page is fully exclusive and pins page->mem_cgroup */
217 VM_BUG_ON_PAGE(PageLRU(page), page);
218 VM_BUG_ON_PAGE(page_count(page), page);
219 VM_BUG_ON_PAGE(!PageLocked(page), page);
220
221 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
222 eviction = atomic_long_inc_return(&lruvec->inactive_age);
223 return pack_shadow(memcgid, zone, eviction);
224}
225
226/**
227 * workingset_refault - evaluate the refault of a previously evicted page
228 * @shadow: shadow entry of the evicted page
229 *
230 * Calculates and evaluates the refault distance of the previously
231 * evicted page in the context of the zone it was allocated in.
232 *
233 * Returns %true if the page should be activated, %false otherwise.
234 */
235bool workingset_refault(void *shadow)
236{
237 unsigned long refault_distance;
238 unsigned long active_file;
239 struct mem_cgroup *memcg;
240 unsigned long eviction;
241 struct lruvec *lruvec;
242 unsigned long refault;
243 struct zone *zone;
244 int memcgid;
245
246 unpack_shadow(shadow, &memcgid, &zone, &eviction);
247
248 rcu_read_lock();
249 /*
250 * Look up the memcg associated with the stored ID. It might
251 * have been deleted since the page's eviction.
252 *
253 * Note that in rare events the ID could have been recycled
254 * for a new cgroup that refaults a shared page. This is
255 * impossible to tell from the available data. However, this
256 * should be a rare and limited disturbance, and activations
257 * are always speculative anyway. Ultimately, it's the aging
258 * algorithm's job to shake out the minimum access frequency
259 * for the active cache.
260 *
261 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
262 * would be better if the root_mem_cgroup existed in all
263 * configurations instead.
264 */
265 memcg = mem_cgroup_from_id(memcgid);
266 if (!mem_cgroup_disabled() && !memcg) {
267 rcu_read_unlock();
268 return false;
269 }
270 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
271 refault = atomic_long_read(&lruvec->inactive_age);
272 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
273 rcu_read_unlock();
274
275 /*
276 * The unsigned subtraction here gives an accurate distance
277 * across inactive_age overflows in most cases.
278 *
279 * There is a special case: usually, shadow entries have a
280 * short lifetime and are either refaulted or reclaimed along
281 * with the inode before they get too old. But it is not
282 * impossible for the inactive_age to lap a shadow entry in
283 * the field, which can then can result in a false small
284 * refault distance, leading to a false activation should this
285 * old entry actually refault again. However, earlier kernels
286 * used to deactivate unconditionally with *every* reclaim
287 * invocation for the longest time, so the occasional
288 * inappropriate activation leading to pressure on the active
289 * list is not a problem.
290 */
291 refault_distance = (refault - eviction) & EVICTION_MASK;
292
293 inc_zone_state(zone, WORKINGSET_REFAULT);
294
295 if (refault_distance <= active_file) {
296 inc_zone_state(zone, WORKINGSET_ACTIVATE);
297 return true;
298 }
299 return false;
300}
301
302/**
303 * workingset_activation - note a page activation
304 * @page: page that is being activated
305 */
306void workingset_activation(struct page *page)
307{
308 struct lruvec *lruvec;
309
310 lock_page_memcg(page);
311 /*
312 * Filter non-memcg pages here, e.g. unmap can call
313 * mark_page_accessed() on VDSO pages.
314 *
315 * XXX: See workingset_refault() - this should return
316 * root_mem_cgroup even for !CONFIG_MEMCG.
317 */
318 if (!mem_cgroup_disabled() && !page_memcg(page))
319 goto out;
320 lruvec = mem_cgroup_zone_lruvec(page_zone(page), page_memcg(page));
321 atomic_long_inc(&lruvec->inactive_age);
322out:
323 unlock_page_memcg(page);
324}
325
326/*
327 * Shadow entries reflect the share of the working set that does not
328 * fit into memory, so their number depends on the access pattern of
329 * the workload. In most cases, they will refault or get reclaimed
330 * along with the inode, but a (malicious) workload that streams
331 * through files with a total size several times that of available
332 * memory, while preventing the inodes from being reclaimed, can
333 * create excessive amounts of shadow nodes. To keep a lid on this,
334 * track shadow nodes and reclaim them when they grow way past the
335 * point where they would still be useful.
336 */
337
338struct list_lru workingset_shadow_nodes;
339
340static unsigned long count_shadow_nodes(struct shrinker *shrinker,
341 struct shrink_control *sc)
342{
343 unsigned long shadow_nodes;
344 unsigned long max_nodes;
345 unsigned long pages;
346
347 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
348 local_irq_disable();
349 shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
350 local_irq_enable();
351
352 if (memcg_kmem_enabled())
353 pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
354 LRU_ALL_FILE);
355 else
356 pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
357 node_page_state(sc->nid, NR_INACTIVE_FILE);
358
359 /*
360 * Active cache pages are limited to 50% of memory, and shadow
361 * entries that represent a refault distance bigger than that
362 * do not have any effect. Limit the number of shadow nodes
363 * such that shadow entries do not exceed the number of active
364 * cache pages, assuming a worst-case node population density
365 * of 1/8th on average.
366 *
367 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
368 * each, this will reclaim shadow entries when they consume
369 * ~2% of available memory:
370 *
371 * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
372 */
373 max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
374
375 if (shadow_nodes <= max_nodes)
376 return 0;
377
378 return shadow_nodes - max_nodes;
379}
380
381static enum lru_status shadow_lru_isolate(struct list_head *item,
382 struct list_lru_one *lru,
383 spinlock_t *lru_lock,
384 void *arg)
385{
386 struct address_space *mapping;
387 struct radix_tree_node *node;
388 unsigned int i;
389 int ret;
390
391 /*
392 * Page cache insertions and deletions synchroneously maintain
393 * the shadow node LRU under the mapping->tree_lock and the
394 * lru_lock. Because the page cache tree is emptied before
395 * the inode can be destroyed, holding the lru_lock pins any
396 * address_space that has radix tree nodes on the LRU.
397 *
398 * We can then safely transition to the mapping->tree_lock to
399 * pin only the address_space of the particular node we want
400 * to reclaim, take the node off-LRU, and drop the lru_lock.
401 */
402
403 node = container_of(item, struct radix_tree_node, private_list);
404 mapping = node->private_data;
405
406 /* Coming from the list, invert the lock order */
407 if (!spin_trylock(&mapping->tree_lock)) {
408 spin_unlock(lru_lock);
409 ret = LRU_RETRY;
410 goto out;
411 }
412
413 list_lru_isolate(lru, item);
414 spin_unlock(lru_lock);
415
416 /*
417 * The nodes should only contain one or more shadow entries,
418 * no pages, so we expect to be able to remove them all and
419 * delete and free the empty node afterwards.
420 */
421
422 BUG_ON(!node->count);
423 BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
424
425 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
426 if (node->slots[i]) {
427 BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
428 node->slots[i] = NULL;
429 BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
430 node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
431 BUG_ON(!mapping->nrexceptional);
432 mapping->nrexceptional--;
433 }
434 }
435 BUG_ON(node->count);
436 inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
437 if (!__radix_tree_delete_node(&mapping->page_tree, node))
438 BUG();
439
440 spin_unlock(&mapping->tree_lock);
441 ret = LRU_REMOVED_RETRY;
442out:
443 local_irq_enable();
444 cond_resched();
445 local_irq_disable();
446 spin_lock(lru_lock);
447 return ret;
448}
449
450static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
451 struct shrink_control *sc)
452{
453 unsigned long ret;
454
455 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
456 local_irq_disable();
457 ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
458 shadow_lru_isolate, NULL);
459 local_irq_enable();
460 return ret;
461}
462
463static struct shrinker workingset_shadow_shrinker = {
464 .count_objects = count_shadow_nodes,
465 .scan_objects = scan_shadow_nodes,
466 .seeks = DEFAULT_SEEKS,
467 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
468};
469
470/*
471 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
472 * mapping->tree_lock.
473 */
474static struct lock_class_key shadow_nodes_key;
475
476static int __init workingset_init(void)
477{
478 unsigned int timestamp_bits;
479 unsigned int max_order;
480 int ret;
481
482 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
483 /*
484 * Calculate the eviction bucket size to cover the longest
485 * actionable refault distance, which is currently half of
486 * memory (totalram_pages/2). However, memory hotplug may add
487 * some more pages at runtime, so keep working with up to
488 * double the initial memory by using totalram_pages as-is.
489 */
490 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
491 max_order = fls_long(totalram_pages - 1);
492 if (max_order > timestamp_bits)
493 bucket_order = max_order - timestamp_bits;
494 printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
495 timestamp_bits, max_order, bucket_order);
496
497 ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
498 if (ret)
499 goto err;
500 ret = register_shrinker(&workingset_shadow_shrinker);
501 if (ret)
502 goto err_list_lru;
503 return 0;
504err_list_lru:
505 list_lru_destroy(&workingset_shadow_nodes);
506err:
507 return ret;
508}
509module_init(workingset_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Workingset detection
4 *
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6 */
7
8#include <linux/memcontrol.h>
9#include <linux/mm_inline.h>
10#include <linux/writeback.h>
11#include <linux/shmem_fs.h>
12#include <linux/pagemap.h>
13#include <linux/atomic.h>
14#include <linux/module.h>
15#include <linux/swap.h>
16#include <linux/dax.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include "internal.h"
20
21/*
22 * Double CLOCK lists
23 *
24 * Per node, two clock lists are maintained for file pages: the
25 * inactive and the active list. Freshly faulted pages start out at
26 * the head of the inactive list and page reclaim scans pages from the
27 * tail. Pages that are accessed multiple times on the inactive list
28 * are promoted to the active list, to protect them from reclaim,
29 * whereas active pages are demoted to the inactive list when the
30 * active list grows too big.
31 *
32 * fault ------------------------+
33 * |
34 * +--------------+ | +-------------+
35 * reclaim <- | inactive | <-+-- demotion | active | <--+
36 * +--------------+ +-------------+ |
37 * | |
38 * +-------------- promotion ------------------+
39 *
40 *
41 * Access frequency and refault distance
42 *
43 * A workload is thrashing when its pages are frequently used but they
44 * are evicted from the inactive list every time before another access
45 * would have promoted them to the active list.
46 *
47 * In cases where the average access distance between thrashing pages
48 * is bigger than the size of memory there is nothing that can be
49 * done - the thrashing set could never fit into memory under any
50 * circumstance.
51 *
52 * However, the average access distance could be bigger than the
53 * inactive list, yet smaller than the size of memory. In this case,
54 * the set could fit into memory if it weren't for the currently
55 * active pages - which may be used more, hopefully less frequently:
56 *
57 * +-memory available to cache-+
58 * | |
59 * +-inactive------+-active----+
60 * a b | c d e f g h i | J K L M N |
61 * +---------------+-----------+
62 *
63 * It is prohibitively expensive to accurately track access frequency
64 * of pages. But a reasonable approximation can be made to measure
65 * thrashing on the inactive list, after which refaulting pages can be
66 * activated optimistically to compete with the existing active pages.
67 *
68 * Approximating inactive page access frequency - Observations:
69 *
70 * 1. When a page is accessed for the first time, it is added to the
71 * head of the inactive list, slides every existing inactive page
72 * towards the tail by one slot, and pushes the current tail page
73 * out of memory.
74 *
75 * 2. When a page is accessed for the second time, it is promoted to
76 * the active list, shrinking the inactive list by one slot. This
77 * also slides all inactive pages that were faulted into the cache
78 * more recently than the activated page towards the tail of the
79 * inactive list.
80 *
81 * Thus:
82 *
83 * 1. The sum of evictions and activations between any two points in
84 * time indicate the minimum number of inactive pages accessed in
85 * between.
86 *
87 * 2. Moving one inactive page N page slots towards the tail of the
88 * list requires at least N inactive page accesses.
89 *
90 * Combining these:
91 *
92 * 1. When a page is finally evicted from memory, the number of
93 * inactive pages accessed while the page was in cache is at least
94 * the number of page slots on the inactive list.
95 *
96 * 2. In addition, measuring the sum of evictions and activations (E)
97 * at the time of a page's eviction, and comparing it to another
98 * reading (R) at the time the page faults back into memory tells
99 * the minimum number of accesses while the page was not cached.
100 * This is called the refault distance.
101 *
102 * Because the first access of the page was the fault and the second
103 * access the refault, we combine the in-cache distance with the
104 * out-of-cache distance to get the complete minimum access distance
105 * of this page:
106 *
107 * NR_inactive + (R - E)
108 *
109 * And knowing the minimum access distance of a page, we can easily
110 * tell if the page would be able to stay in cache assuming all page
111 * slots in the cache were available:
112 *
113 * NR_inactive + (R - E) <= NR_inactive + NR_active
114 *
115 * If we have swap we should consider about NR_inactive_anon and
116 * NR_active_anon, so for page cache and anonymous respectively:
117 *
118 * NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file
119 * + NR_inactive_anon + NR_active_anon
120 *
121 * NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon
122 * + NR_inactive_file + NR_active_file
123 *
124 * Which can be further simplified to:
125 *
126 * (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon
127 *
128 * (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file
129 *
130 * Put into words, the refault distance (out-of-cache) can be seen as
131 * a deficit in inactive list space (in-cache). If the inactive list
132 * had (R - E) more page slots, the page would not have been evicted
133 * in between accesses, but activated instead. And on a full system,
134 * the only thing eating into inactive list space is active pages.
135 *
136 *
137 * Refaulting inactive pages
138 *
139 * All that is known about the active list is that the pages have been
140 * accessed more than once in the past. This means that at any given
141 * time there is actually a good chance that pages on the active list
142 * are no longer in active use.
143 *
144 * So when a refault distance of (R - E) is observed and there are at
145 * least (R - E) pages in the userspace workingset, the refaulting page
146 * is activated optimistically in the hope that (R - E) pages are actually
147 * used less frequently than the refaulting page - or even not used at
148 * all anymore.
149 *
150 * That means if inactive cache is refaulting with a suitable refault
151 * distance, we assume the cache workingset is transitioning and put
152 * pressure on the current workingset.
153 *
154 * If this is wrong and demotion kicks in, the pages which are truly
155 * used more frequently will be reactivated while the less frequently
156 * used once will be evicted from memory.
157 *
158 * But if this is right, the stale pages will be pushed out of memory
159 * and the used pages get to stay in cache.
160 *
161 * Refaulting active pages
162 *
163 * If on the other hand the refaulting pages have recently been
164 * deactivated, it means that the active list is no longer protecting
165 * actively used cache from reclaim. The cache is NOT transitioning to
166 * a different workingset; the existing workingset is thrashing in the
167 * space allocated to the page cache.
168 *
169 *
170 * Implementation
171 *
172 * For each node's LRU lists, a counter for inactive evictions and
173 * activations is maintained (node->nonresident_age).
174 *
175 * On eviction, a snapshot of this counter (along with some bits to
176 * identify the node) is stored in the now empty page cache
177 * slot of the evicted page. This is called a shadow entry.
178 *
179 * On cache misses for which there are shadow entries, an eligible
180 * refault distance will immediately activate the refaulting page.
181 */
182
183#define WORKINGSET_SHIFT 1
184#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
185 WORKINGSET_SHIFT + NODES_SHIFT + \
186 MEM_CGROUP_ID_SHIFT)
187#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
188
189/*
190 * Eviction timestamps need to be able to cover the full range of
191 * actionable refaults. However, bits are tight in the xarray
192 * entry, and after storing the identifier for the lruvec there might
193 * not be enough left to represent every single actionable refault. In
194 * that case, we have to sacrifice granularity for distance, and group
195 * evictions into coarser buckets by shaving off lower timestamp bits.
196 */
197static unsigned int bucket_order __read_mostly;
198
199static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
200 bool workingset)
201{
202 eviction &= EVICTION_MASK;
203 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
204 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
205 eviction = (eviction << WORKINGSET_SHIFT) | workingset;
206
207 return xa_mk_value(eviction);
208}
209
210static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
211 unsigned long *evictionp, bool *workingsetp)
212{
213 unsigned long entry = xa_to_value(shadow);
214 int memcgid, nid;
215 bool workingset;
216
217 workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1);
218 entry >>= WORKINGSET_SHIFT;
219 nid = entry & ((1UL << NODES_SHIFT) - 1);
220 entry >>= NODES_SHIFT;
221 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
222 entry >>= MEM_CGROUP_ID_SHIFT;
223
224 *memcgidp = memcgid;
225 *pgdat = NODE_DATA(nid);
226 *evictionp = entry;
227 *workingsetp = workingset;
228}
229
230#ifdef CONFIG_LRU_GEN
231
232static void *lru_gen_eviction(struct folio *folio)
233{
234 int hist;
235 unsigned long token;
236 unsigned long min_seq;
237 struct lruvec *lruvec;
238 struct lru_gen_folio *lrugen;
239 int type = folio_is_file_lru(folio);
240 int delta = folio_nr_pages(folio);
241 int refs = folio_lru_refs(folio);
242 int tier = lru_tier_from_refs(refs);
243 struct mem_cgroup *memcg = folio_memcg(folio);
244 struct pglist_data *pgdat = folio_pgdat(folio);
245
246 BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT);
247
248 lruvec = mem_cgroup_lruvec(memcg, pgdat);
249 lrugen = &lruvec->lrugen;
250 min_seq = READ_ONCE(lrugen->min_seq[type]);
251 token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0);
252
253 hist = lru_hist_from_seq(min_seq);
254 atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
255
256 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
257}
258
259/*
260 * Tests if the shadow entry is for a folio that was recently evicted.
261 * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
262 */
263static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
264 unsigned long *token, bool *workingset)
265{
266 int memcg_id;
267 unsigned long min_seq;
268 struct mem_cgroup *memcg;
269 struct pglist_data *pgdat;
270
271 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
272
273 memcg = mem_cgroup_from_id(memcg_id);
274 *lruvec = mem_cgroup_lruvec(memcg, pgdat);
275
276 min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]);
277 return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
278}
279
280static void lru_gen_refault(struct folio *folio, void *shadow)
281{
282 bool recent;
283 int hist, tier, refs;
284 bool workingset;
285 unsigned long token;
286 struct lruvec *lruvec;
287 struct lru_gen_folio *lrugen;
288 int type = folio_is_file_lru(folio);
289 int delta = folio_nr_pages(folio);
290
291 rcu_read_lock();
292
293 recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset);
294 if (lruvec != folio_lruvec(folio))
295 goto unlock;
296
297 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta);
298
299 if (!recent)
300 goto unlock;
301
302 lrugen = &lruvec->lrugen;
303
304 hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type]));
305 /* see the comment in folio_lru_refs() */
306 refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
307 tier = lru_tier_from_refs(refs);
308
309 atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]);
310 mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta);
311
312 /*
313 * Count the following two cases as stalls:
314 * 1. For pages accessed through page tables, hotter pages pushed out
315 * hot pages which refaulted immediately.
316 * 2. For pages accessed multiple times through file descriptors,
317 * they would have been protected by sort_folio().
318 */
319 if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
320 set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
321 mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
322 }
323unlock:
324 rcu_read_unlock();
325}
326
327#else /* !CONFIG_LRU_GEN */
328
329static void *lru_gen_eviction(struct folio *folio)
330{
331 return NULL;
332}
333
334static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
335 unsigned long *token, bool *workingset)
336{
337 return false;
338}
339
340static void lru_gen_refault(struct folio *folio, void *shadow)
341{
342}
343
344#endif /* CONFIG_LRU_GEN */
345
346/**
347 * workingset_age_nonresident - age non-resident entries as LRU ages
348 * @lruvec: the lruvec that was aged
349 * @nr_pages: the number of pages to count
350 *
351 * As in-memory pages are aged, non-resident pages need to be aged as
352 * well, in order for the refault distances later on to be comparable
353 * to the in-memory dimensions. This function allows reclaim and LRU
354 * operations to drive the non-resident aging along in parallel.
355 */
356void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
357{
358 /*
359 * Reclaiming a cgroup means reclaiming all its children in a
360 * round-robin fashion. That means that each cgroup has an LRU
361 * order that is composed of the LRU orders of its child
362 * cgroups; and every page has an LRU position not just in the
363 * cgroup that owns it, but in all of that group's ancestors.
364 *
365 * So when the physical inactive list of a leaf cgroup ages,
366 * the virtual inactive lists of all its parents, including
367 * the root cgroup's, age as well.
368 */
369 do {
370 atomic_long_add(nr_pages, &lruvec->nonresident_age);
371 } while ((lruvec = parent_lruvec(lruvec)));
372}
373
374/**
375 * workingset_eviction - note the eviction of a folio from memory
376 * @target_memcg: the cgroup that is causing the reclaim
377 * @folio: the folio being evicted
378 *
379 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
380 * of the evicted @folio so that a later refault can be detected.
381 */
382void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
383{
384 struct pglist_data *pgdat = folio_pgdat(folio);
385 unsigned long eviction;
386 struct lruvec *lruvec;
387 int memcgid;
388
389 /* Folio is fully exclusive and pins folio's memory cgroup pointer */
390 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
391 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
392 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
393
394 if (lru_gen_enabled())
395 return lru_gen_eviction(folio);
396
397 lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
398 /* XXX: target_memcg can be NULL, go through lruvec */
399 memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
400 eviction = atomic_long_read(&lruvec->nonresident_age);
401 eviction >>= bucket_order;
402 workingset_age_nonresident(lruvec, folio_nr_pages(folio));
403 return pack_shadow(memcgid, pgdat, eviction,
404 folio_test_workingset(folio));
405}
406
407/**
408 * workingset_test_recent - tests if the shadow entry is for a folio that was
409 * recently evicted. Also fills in @workingset with the value unpacked from
410 * shadow.
411 * @shadow: the shadow entry to be tested.
412 * @file: whether the corresponding folio is from the file lru.
413 * @workingset: where the workingset value unpacked from shadow should
414 * be stored.
415 * @flush: whether to flush cgroup rstat.
416 *
417 * Return: true if the shadow is for a recently evicted folio; false otherwise.
418 */
419bool workingset_test_recent(void *shadow, bool file, bool *workingset,
420 bool flush)
421{
422 struct mem_cgroup *eviction_memcg;
423 struct lruvec *eviction_lruvec;
424 unsigned long refault_distance;
425 unsigned long workingset_size;
426 unsigned long refault;
427 int memcgid;
428 struct pglist_data *pgdat;
429 unsigned long eviction;
430
431 rcu_read_lock();
432
433 if (lru_gen_enabled()) {
434 bool recent = lru_gen_test_recent(shadow, file,
435 &eviction_lruvec, &eviction, workingset);
436
437 rcu_read_unlock();
438 return recent;
439 }
440
441
442 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
443 eviction <<= bucket_order;
444
445 /*
446 * Look up the memcg associated with the stored ID. It might
447 * have been deleted since the folio's eviction.
448 *
449 * Note that in rare events the ID could have been recycled
450 * for a new cgroup that refaults a shared folio. This is
451 * impossible to tell from the available data. However, this
452 * should be a rare and limited disturbance, and activations
453 * are always speculative anyway. Ultimately, it's the aging
454 * algorithm's job to shake out the minimum access frequency
455 * for the active cache.
456 *
457 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
458 * would be better if the root_mem_cgroup existed in all
459 * configurations instead.
460 */
461 eviction_memcg = mem_cgroup_from_id(memcgid);
462 if (!mem_cgroup_disabled() &&
463 (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) {
464 rcu_read_unlock();
465 return false;
466 }
467
468 rcu_read_unlock();
469
470 /*
471 * Flush stats (and potentially sleep) outside the RCU read section.
472 *
473 * Note that workingset_test_recent() itself might be called in RCU read
474 * section (for e.g, in cachestat) - these callers need to skip flushing
475 * stats (via the flush argument).
476 *
477 * XXX: With per-memcg flushing and thresholding, is ratelimiting
478 * still needed here?
479 */
480 if (flush)
481 mem_cgroup_flush_stats_ratelimited(eviction_memcg);
482
483 eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
484 refault = atomic_long_read(&eviction_lruvec->nonresident_age);
485
486 /*
487 * Calculate the refault distance
488 *
489 * The unsigned subtraction here gives an accurate distance
490 * across nonresident_age overflows in most cases. There is a
491 * special case: usually, shadow entries have a short lifetime
492 * and are either refaulted or reclaimed along with the inode
493 * before they get too old. But it is not impossible for the
494 * nonresident_age to lap a shadow entry in the field, which
495 * can then result in a false small refault distance, leading
496 * to a false activation should this old entry actually
497 * refault again. However, earlier kernels used to deactivate
498 * unconditionally with *every* reclaim invocation for the
499 * longest time, so the occasional inappropriate activation
500 * leading to pressure on the active list is not a problem.
501 */
502 refault_distance = (refault - eviction) & EVICTION_MASK;
503
504 /*
505 * Compare the distance to the existing workingset size. We
506 * don't activate pages that couldn't stay resident even if
507 * all the memory was available to the workingset. Whether
508 * workingset competition needs to consider anon or not depends
509 * on having free swap space.
510 */
511 workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
512 if (!file) {
513 workingset_size += lruvec_page_state(eviction_lruvec,
514 NR_INACTIVE_FILE);
515 }
516 if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) {
517 workingset_size += lruvec_page_state(eviction_lruvec,
518 NR_ACTIVE_ANON);
519 if (file) {
520 workingset_size += lruvec_page_state(eviction_lruvec,
521 NR_INACTIVE_ANON);
522 }
523 }
524
525 mem_cgroup_put(eviction_memcg);
526 return refault_distance <= workingset_size;
527}
528
529/**
530 * workingset_refault - Evaluate the refault of a previously evicted folio.
531 * @folio: The freshly allocated replacement folio.
532 * @shadow: Shadow entry of the evicted folio.
533 *
534 * Calculates and evaluates the refault distance of the previously
535 * evicted folio in the context of the node and the memcg whose memory
536 * pressure caused the eviction.
537 */
538void workingset_refault(struct folio *folio, void *shadow)
539{
540 bool file = folio_is_file_lru(folio);
541 struct pglist_data *pgdat;
542 struct mem_cgroup *memcg;
543 struct lruvec *lruvec;
544 bool workingset;
545 long nr;
546
547 if (lru_gen_enabled()) {
548 lru_gen_refault(folio, shadow);
549 return;
550 }
551
552 /*
553 * The activation decision for this folio is made at the level
554 * where the eviction occurred, as that is where the LRU order
555 * during folio reclaim is being determined.
556 *
557 * However, the cgroup that will own the folio is the one that
558 * is actually experiencing the refault event. Make sure the folio is
559 * locked to guarantee folio_memcg() stability throughout.
560 */
561 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
562 nr = folio_nr_pages(folio);
563 memcg = folio_memcg(folio);
564 pgdat = folio_pgdat(folio);
565 lruvec = mem_cgroup_lruvec(memcg, pgdat);
566
567 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
568
569 if (!workingset_test_recent(shadow, file, &workingset, true))
570 return;
571
572 folio_set_active(folio);
573 workingset_age_nonresident(lruvec, nr);
574 mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr);
575
576 /* Folio was active prior to eviction */
577 if (workingset) {
578 folio_set_workingset(folio);
579 /*
580 * XXX: Move to folio_add_lru() when it supports new vs
581 * putback
582 */
583 lru_note_cost_refault(folio);
584 mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
585 }
586}
587
588/**
589 * workingset_activation - note a page activation
590 * @folio: Folio that is being activated.
591 */
592void workingset_activation(struct folio *folio)
593{
594 /*
595 * Filter non-memcg pages here, e.g. unmap can call
596 * mark_page_accessed() on VDSO pages.
597 */
598 if (mem_cgroup_disabled() || folio_memcg_charged(folio))
599 workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio));
600}
601
602/*
603 * Shadow entries reflect the share of the working set that does not
604 * fit into memory, so their number depends on the access pattern of
605 * the workload. In most cases, they will refault or get reclaimed
606 * along with the inode, but a (malicious) workload that streams
607 * through files with a total size several times that of available
608 * memory, while preventing the inodes from being reclaimed, can
609 * create excessive amounts of shadow nodes. To keep a lid on this,
610 * track shadow nodes and reclaim them when they grow way past the
611 * point where they would still be useful.
612 */
613
614struct list_lru shadow_nodes;
615
616void workingset_update_node(struct xa_node *node)
617{
618 struct address_space *mapping;
619 struct page *page = virt_to_page(node);
620
621 /*
622 * Track non-empty nodes that contain only shadow entries;
623 * unlink those that contain pages or are being freed.
624 *
625 * Avoid acquiring the list_lru lock when the nodes are
626 * already where they should be. The list_empty() test is safe
627 * as node->private_list is protected by the i_pages lock.
628 */
629 mapping = container_of(node->array, struct address_space, i_pages);
630 lockdep_assert_held(&mapping->i_pages.xa_lock);
631
632 if (node->count && node->count == node->nr_values) {
633 if (list_empty(&node->private_list)) {
634 list_lru_add_obj(&shadow_nodes, &node->private_list);
635 __inc_node_page_state(page, WORKINGSET_NODES);
636 }
637 } else {
638 if (!list_empty(&node->private_list)) {
639 list_lru_del_obj(&shadow_nodes, &node->private_list);
640 __dec_node_page_state(page, WORKINGSET_NODES);
641 }
642 }
643}
644
645static unsigned long count_shadow_nodes(struct shrinker *shrinker,
646 struct shrink_control *sc)
647{
648 unsigned long max_nodes;
649 unsigned long nodes;
650 unsigned long pages;
651
652 nodes = list_lru_shrink_count(&shadow_nodes, sc);
653 if (!nodes)
654 return SHRINK_EMPTY;
655
656 /*
657 * Approximate a reasonable limit for the nodes
658 * containing shadow entries. We don't need to keep more
659 * shadow entries than possible pages on the active list,
660 * since refault distances bigger than that are dismissed.
661 *
662 * The size of the active list converges toward 100% of
663 * overall page cache as memory grows, with only a tiny
664 * inactive list. Assume the total cache size for that.
665 *
666 * Nodes might be sparsely populated, with only one shadow
667 * entry in the extreme case. Obviously, we cannot keep one
668 * node for every eligible shadow entry, so compromise on a
669 * worst-case density of 1/8th. Below that, not all eligible
670 * refaults can be detected anymore.
671 *
672 * On 64-bit with 7 xa_nodes per page and 64 slots
673 * each, this will reclaim shadow entries when they consume
674 * ~1.8% of available memory:
675 *
676 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
677 */
678#ifdef CONFIG_MEMCG
679 if (sc->memcg) {
680 struct lruvec *lruvec;
681 int i;
682
683 mem_cgroup_flush_stats_ratelimited(sc->memcg);
684 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
685 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
686 pages += lruvec_page_state_local(lruvec,
687 NR_LRU_BASE + i);
688 pages += lruvec_page_state_local(
689 lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
690 pages += lruvec_page_state_local(
691 lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
692 } else
693#endif
694 pages = node_present_pages(sc->nid);
695
696 max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
697
698 if (nodes <= max_nodes)
699 return 0;
700 return nodes - max_nodes;
701}
702
703static enum lru_status shadow_lru_isolate(struct list_head *item,
704 struct list_lru_one *lru,
705 void *arg) __must_hold(lru->lock)
706{
707 struct xa_node *node = container_of(item, struct xa_node, private_list);
708 struct address_space *mapping;
709 int ret;
710
711 /*
712 * Page cache insertions and deletions synchronously maintain
713 * the shadow node LRU under the i_pages lock and the
714 * &lru->lock. Because the page cache tree is emptied before
715 * the inode can be destroyed, holding the &lru->lock pins any
716 * address_space that has nodes on the LRU.
717 *
718 * We can then safely transition to the i_pages lock to
719 * pin only the address_space of the particular node we want
720 * to reclaim, take the node off-LRU, and drop the &lru->lock.
721 */
722
723 mapping = container_of(node->array, struct address_space, i_pages);
724
725 /* Coming from the list, invert the lock order */
726 if (!xa_trylock(&mapping->i_pages)) {
727 spin_unlock_irq(&lru->lock);
728 ret = LRU_RETRY;
729 goto out;
730 }
731
732 /* For page cache we need to hold i_lock */
733 if (mapping->host != NULL) {
734 if (!spin_trylock(&mapping->host->i_lock)) {
735 xa_unlock(&mapping->i_pages);
736 spin_unlock_irq(&lru->lock);
737 ret = LRU_RETRY;
738 goto out;
739 }
740 }
741
742 list_lru_isolate(lru, item);
743 __dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
744
745 spin_unlock(&lru->lock);
746
747 /*
748 * The nodes should only contain one or more shadow entries,
749 * no pages, so we expect to be able to remove them all and
750 * delete and free the empty node afterwards.
751 */
752 if (WARN_ON_ONCE(!node->nr_values))
753 goto out_invalid;
754 if (WARN_ON_ONCE(node->count != node->nr_values))
755 goto out_invalid;
756 xa_delete_node(node, workingset_update_node);
757 __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
758
759out_invalid:
760 xa_unlock_irq(&mapping->i_pages);
761 if (mapping->host != NULL) {
762 if (mapping_shrinkable(mapping))
763 inode_add_lru(mapping->host);
764 spin_unlock(&mapping->host->i_lock);
765 }
766 ret = LRU_REMOVED_RETRY;
767out:
768 cond_resched();
769 return ret;
770}
771
772static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
773 struct shrink_control *sc)
774{
775 /* list_lru lock nests inside the IRQ-safe i_pages lock */
776 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
777 NULL);
778}
779
780/*
781 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
782 * i_pages lock.
783 */
784static struct lock_class_key shadow_nodes_key;
785
786static int __init workingset_init(void)
787{
788 struct shrinker *workingset_shadow_shrinker;
789 unsigned int timestamp_bits;
790 unsigned int max_order;
791 int ret = -ENOMEM;
792
793 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
794 /*
795 * Calculate the eviction bucket size to cover the longest
796 * actionable refault distance, which is currently half of
797 * memory (totalram_pages/2). However, memory hotplug may add
798 * some more pages at runtime, so keep working with up to
799 * double the initial memory by using totalram_pages as-is.
800 */
801 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
802 max_order = fls_long(totalram_pages() - 1);
803 if (max_order > timestamp_bits)
804 bucket_order = max_order - timestamp_bits;
805 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
806 timestamp_bits, max_order, bucket_order);
807
808 workingset_shadow_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
809 SHRINKER_MEMCG_AWARE,
810 "mm-shadow");
811 if (!workingset_shadow_shrinker)
812 goto err;
813
814 ret = list_lru_init_memcg_key(&shadow_nodes, workingset_shadow_shrinker,
815 &shadow_nodes_key);
816 if (ret)
817 goto err_list_lru;
818
819 workingset_shadow_shrinker->count_objects = count_shadow_nodes;
820 workingset_shadow_shrinker->scan_objects = scan_shadow_nodes;
821 /* ->count reports only fully expendable nodes */
822 workingset_shadow_shrinker->seeks = 0;
823
824 shrinker_register(workingset_shadow_shrinker);
825 return 0;
826err_list_lru:
827 shrinker_free(workingset_shadow_shrinker);
828err:
829 return ret;
830}
831module_init(workingset_init);