Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Workingset detection
  4 *
  5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
  6 */
  7
  8#include <linux/memcontrol.h>
 
  9#include <linux/writeback.h>
 10#include <linux/shmem_fs.h>
 11#include <linux/pagemap.h>
 12#include <linux/atomic.h>
 13#include <linux/module.h>
 14#include <linux/swap.h>
 15#include <linux/dax.h>
 16#include <linux/fs.h>
 17#include <linux/mm.h>
 
 18
 19/*
 20 *		Double CLOCK lists
 21 *
 22 * Per node, two clock lists are maintained for file pages: the
 23 * inactive and the active list.  Freshly faulted pages start out at
 24 * the head of the inactive list and page reclaim scans pages from the
 25 * tail.  Pages that are accessed multiple times on the inactive list
 26 * are promoted to the active list, to protect them from reclaim,
 27 * whereas active pages are demoted to the inactive list when the
 28 * active list grows too big.
 29 *
 30 *   fault ------------------------+
 31 *                                 |
 32 *              +--------------+   |            +-------------+
 33 *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
 34 *              +--------------+                +-------------+    |
 35 *                     |                                           |
 36 *                     +-------------- promotion ------------------+
 37 *
 38 *
 39 *		Access frequency and refault distance
 40 *
 41 * A workload is thrashing when its pages are frequently used but they
 42 * are evicted from the inactive list every time before another access
 43 * would have promoted them to the active list.
 44 *
 45 * In cases where the average access distance between thrashing pages
 46 * is bigger than the size of memory there is nothing that can be
 47 * done - the thrashing set could never fit into memory under any
 48 * circumstance.
 49 *
 50 * However, the average access distance could be bigger than the
 51 * inactive list, yet smaller than the size of memory.  In this case,
 52 * the set could fit into memory if it weren't for the currently
 53 * active pages - which may be used more, hopefully less frequently:
 54 *
 55 *      +-memory available to cache-+
 56 *      |                           |
 57 *      +-inactive------+-active----+
 58 *  a b | c d e f g h i | J K L M N |
 59 *      +---------------+-----------+
 60 *
 61 * It is prohibitively expensive to accurately track access frequency
 62 * of pages.  But a reasonable approximation can be made to measure
 63 * thrashing on the inactive list, after which refaulting pages can be
 64 * activated optimistically to compete with the existing active pages.
 65 *
 66 * Approximating inactive page access frequency - Observations:
 67 *
 68 * 1. When a page is accessed for the first time, it is added to the
 69 *    head of the inactive list, slides every existing inactive page
 70 *    towards the tail by one slot, and pushes the current tail page
 71 *    out of memory.
 72 *
 73 * 2. When a page is accessed for the second time, it is promoted to
 74 *    the active list, shrinking the inactive list by one slot.  This
 75 *    also slides all inactive pages that were faulted into the cache
 76 *    more recently than the activated page towards the tail of the
 77 *    inactive list.
 78 *
 79 * Thus:
 80 *
 81 * 1. The sum of evictions and activations between any two points in
 82 *    time indicate the minimum number of inactive pages accessed in
 83 *    between.
 84 *
 85 * 2. Moving one inactive page N page slots towards the tail of the
 86 *    list requires at least N inactive page accesses.
 87 *
 88 * Combining these:
 89 *
 90 * 1. When a page is finally evicted from memory, the number of
 91 *    inactive pages accessed while the page was in cache is at least
 92 *    the number of page slots on the inactive list.
 93 *
 94 * 2. In addition, measuring the sum of evictions and activations (E)
 95 *    at the time of a page's eviction, and comparing it to another
 96 *    reading (R) at the time the page faults back into memory tells
 97 *    the minimum number of accesses while the page was not cached.
 98 *    This is called the refault distance.
 99 *
100 * Because the first access of the page was the fault and the second
101 * access the refault, we combine the in-cache distance with the
102 * out-of-cache distance to get the complete minimum access distance
103 * of this page:
104 *
105 *      NR_inactive + (R - E)
106 *
107 * And knowing the minimum access distance of a page, we can easily
108 * tell if the page would be able to stay in cache assuming all page
109 * slots in the cache were available:
110 *
111 *   NR_inactive + (R - E) <= NR_inactive + NR_active
112 *
113 * which can be further simplified to
 
114 *
115 *   (R - E) <= NR_active
 
 
 
 
 
 
 
 
 
 
116 *
117 * Put into words, the refault distance (out-of-cache) can be seen as
118 * a deficit in inactive list space (in-cache).  If the inactive list
119 * had (R - E) more page slots, the page would not have been evicted
120 * in between accesses, but activated instead.  And on a full system,
121 * the only thing eating into inactive list space is active pages.
122 *
123 *
124 *		Activating refaulting pages
125 *
126 * All that is known about the active list is that the pages have been
127 * accessed more than once in the past.  This means that at any given
128 * time there is actually a good chance that pages on the active list
129 * are no longer in active use.
130 *
131 * So when a refault distance of (R - E) is observed and there are at
132 * least (R - E) active pages, the refaulting page is activated
133 * optimistically in the hope that (R - E) active pages are actually
134 * used less frequently than the refaulting page - or even not used at
135 * all anymore.
136 *
 
 
 
 
137 * If this is wrong and demotion kicks in, the pages which are truly
138 * used more frequently will be reactivated while the less frequently
139 * used once will be evicted from memory.
140 *
141 * But if this is right, the stale pages will be pushed out of memory
142 * and the used pages get to stay in cache.
143 *
 
 
 
 
 
 
 
 
144 *
145 *		Implementation
146 *
147 * For each node's file LRU lists, a counter for inactive evictions
148 * and activations is maintained (node->inactive_age).
149 *
150 * On eviction, a snapshot of this counter (along with some bits to
151 * identify the node) is stored in the now empty page cache radix tree
152 * slot of the evicted page.  This is called a shadow entry.
153 *
154 * On cache misses for which there are shadow entries, an eligible
155 * refault distance will immediately activate the refaulting page.
156 */
157
158#define EVICTION_SHIFT	(RADIX_TREE_EXCEPTIONAL_ENTRY + \
159			 NODES_SHIFT +	\
 
160			 MEM_CGROUP_ID_SHIFT)
161#define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
162
163/*
164 * Eviction timestamps need to be able to cover the full range of
165 * actionable refaults. However, bits are tight in the radix tree
166 * entry, and after storing the identifier for the lruvec there might
167 * not be enough left to represent every single actionable refault. In
168 * that case, we have to sacrifice granularity for distance, and group
169 * evictions into coarser buckets by shaving off lower timestamp bits.
170 */
171static unsigned int bucket_order __read_mostly;
172
173static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
 
174{
175	eviction >>= bucket_order;
176	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
177	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
178	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
179
180	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
181}
182
183static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
184			  unsigned long *evictionp)
185{
186	unsigned long entry = (unsigned long)shadow;
187	int memcgid, nid;
 
188
189	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
 
190	nid = entry & ((1UL << NODES_SHIFT) - 1);
191	entry >>= NODES_SHIFT;
192	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
193	entry >>= MEM_CGROUP_ID_SHIFT;
194
195	*memcgidp = memcgid;
196	*pgdat = NODE_DATA(nid);
197	*evictionp = entry << bucket_order;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198}
199
200/**
201 * workingset_eviction - note the eviction of a page from memory
202 * @mapping: address space the page was backing
203 * @page: the page being evicted
204 *
205 * Returns a shadow entry to be stored in @mapping->i_pages in place
206 * of the evicted @page so that a later refault can be detected.
207 */
208void *workingset_eviction(struct address_space *mapping, struct page *page)
209{
210	struct mem_cgroup *memcg = page_memcg(page);
211	struct pglist_data *pgdat = page_pgdat(page);
212	int memcgid = mem_cgroup_id(memcg);
213	unsigned long eviction;
214	struct lruvec *lruvec;
 
215
216	/* Page is fully exclusive and pins page->mem_cgroup */
217	VM_BUG_ON_PAGE(PageLRU(page), page);
218	VM_BUG_ON_PAGE(page_count(page), page);
219	VM_BUG_ON_PAGE(!PageLocked(page), page);
220
221	lruvec = mem_cgroup_lruvec(pgdat, memcg);
222	eviction = atomic_long_inc_return(&lruvec->inactive_age);
223	return pack_shadow(memcgid, pgdat, eviction);
 
 
 
 
 
 
 
 
224}
225
226/**
227 * workingset_refault - evaluate the refault of a previously evicted page
228 * @shadow: shadow entry of the evicted page
 
 
 
 
 
 
229 *
230 * Calculates and evaluates the refault distance of the previously
231 * evicted page in the context of the node it was allocated in.
232 *
233 * Returns %true if the page should be activated, %false otherwise.
234 */
235bool workingset_refault(void *shadow)
 
236{
 
 
237	unsigned long refault_distance;
238	unsigned long active_file;
239	struct mem_cgroup *memcg;
240	unsigned long eviction;
241	struct lruvec *lruvec;
242	unsigned long refault;
243	struct pglist_data *pgdat;
244	int memcgid;
245
246	unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
247
248	rcu_read_lock();
 
 
 
 
 
 
 
 
 
 
 
 
 
249	/*
250	 * Look up the memcg associated with the stored ID. It might
251	 * have been deleted since the page's eviction.
252	 *
253	 * Note that in rare events the ID could have been recycled
254	 * for a new cgroup that refaults a shared page. This is
255	 * impossible to tell from the available data. However, this
256	 * should be a rare and limited disturbance, and activations
257	 * are always speculative anyway. Ultimately, it's the aging
258	 * algorithm's job to shake out the minimum access frequency
259	 * for the active cache.
260	 *
261	 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
262	 * would be better if the root_mem_cgroup existed in all
263	 * configurations instead.
264	 */
265	memcg = mem_cgroup_from_id(memcgid);
266	if (!mem_cgroup_disabled() && !memcg) {
 
267		rcu_read_unlock();
268		return false;
269	}
270	lruvec = mem_cgroup_lruvec(pgdat, memcg);
271	refault = atomic_long_read(&lruvec->inactive_age);
272	active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
273
274	/*
275	 * The unsigned subtraction here gives an accurate distance
276	 * across inactive_age overflows in most cases.
277	 *
278	 * There is a special case: usually, shadow entries have a
279	 * short lifetime and are either refaulted or reclaimed along
280	 * with the inode before they get too old.  But it is not
281	 * impossible for the inactive_age to lap a shadow entry in
282	 * the field, which can then can result in a false small
283	 * refault distance, leading to a false activation should this
284	 * old entry actually refault again.  However, earlier kernels
285	 * used to deactivate unconditionally with *every* reclaim
286	 * invocation for the longest time, so the occasional
287	 * inappropriate activation leading to pressure on the active
288	 * list is not a problem.
289	 */
290	refault_distance = (refault - eviction) & EVICTION_MASK;
 
291
292	inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
 
293
294	if (refault_distance <= active_file) {
295		inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
296		rcu_read_unlock();
297		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298	}
299	rcu_read_unlock();
300	return false;
 
 
 
 
 
 
 
 
 
301}
302
303/**
304 * workingset_activation - note a page activation
305 * @page: page that is being activated
 
 
 
 
 
306 */
307void workingset_activation(struct page *page)
308{
 
 
309	struct mem_cgroup *memcg;
310	struct lruvec *lruvec;
 
 
 
 
 
 
 
311
312	rcu_read_lock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313	/*
314	 * Filter non-memcg pages here, e.g. unmap can call
315	 * mark_page_accessed() on VDSO pages.
316	 *
317	 * XXX: See workingset_refault() - this should return
318	 * root_mem_cgroup even for !CONFIG_MEMCG.
319	 */
320	memcg = page_memcg_rcu(page);
321	if (!mem_cgroup_disabled() && !memcg)
322		goto out;
323	lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
324	atomic_long_inc(&lruvec->inactive_age);
325out:
326	rcu_read_unlock();
327}
328
329/*
330 * Shadow entries reflect the share of the working set that does not
331 * fit into memory, so their number depends on the access pattern of
332 * the workload.  In most cases, they will refault or get reclaimed
333 * along with the inode, but a (malicious) workload that streams
334 * through files with a total size several times that of available
335 * memory, while preventing the inodes from being reclaimed, can
336 * create excessive amounts of shadow nodes.  To keep a lid on this,
337 * track shadow nodes and reclaim them when they grow way past the
338 * point where they would still be useful.
339 */
340
341static struct list_lru shadow_nodes;
342
343void workingset_update_node(struct radix_tree_node *node)
344{
 
 
 
345	/*
346	 * Track non-empty nodes that contain only shadow entries;
347	 * unlink those that contain pages or are being freed.
348	 *
349	 * Avoid acquiring the list_lru lock when the nodes are
350	 * already where they should be. The list_empty() test is safe
351	 * as node->private_list is protected by the i_pages lock.
352	 */
353	if (node->count && node->count == node->exceptional) {
354		if (list_empty(&node->private_list))
355			list_lru_add(&shadow_nodes, &node->private_list);
 
 
 
 
 
356	} else {
357		if (!list_empty(&node->private_list))
358			list_lru_del(&shadow_nodes, &node->private_list);
 
 
359	}
360}
361
362static unsigned long count_shadow_nodes(struct shrinker *shrinker,
363					struct shrink_control *sc)
364{
365	unsigned long max_nodes;
366	unsigned long nodes;
367	unsigned long cache;
368
369	/* list_lru lock nests inside the IRQ-safe i_pages lock */
370	local_irq_disable();
371	nodes = list_lru_shrink_count(&shadow_nodes, sc);
372	local_irq_enable();
 
373
374	/*
375	 * Approximate a reasonable limit for the radix tree nodes
376	 * containing shadow entries. We don't need to keep more
377	 * shadow entries than possible pages on the active list,
378	 * since refault distances bigger than that are dismissed.
379	 *
380	 * The size of the active list converges toward 100% of
381	 * overall page cache as memory grows, with only a tiny
382	 * inactive list. Assume the total cache size for that.
383	 *
384	 * Nodes might be sparsely populated, with only one shadow
385	 * entry in the extreme case. Obviously, we cannot keep one
386	 * node for every eligible shadow entry, so compromise on a
387	 * worst-case density of 1/8th. Below that, not all eligible
388	 * refaults can be detected anymore.
389	 *
390	 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
391	 * each, this will reclaim shadow entries when they consume
392	 * ~1.8% of available memory:
393	 *
394	 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
395	 */
 
396	if (sc->memcg) {
397		cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
398						     LRU_ALL_FILE);
399	} else {
400		cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
401			node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
402	}
403	max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
 
 
 
 
 
 
 
 
 
 
404
405	if (nodes <= max_nodes)
406		return 0;
407	return nodes - max_nodes;
408}
409
410static enum lru_status shadow_lru_isolate(struct list_head *item,
411					  struct list_lru_one *lru,
412					  spinlock_t *lru_lock,
413					  void *arg)
414{
 
415	struct address_space *mapping;
416	struct radix_tree_node *node;
417	unsigned int i;
418	int ret;
419
420	/*
421	 * Page cache insertions and deletions synchroneously maintain
422	 * the shadow node LRU under the i_pages lock and the
423	 * lru_lock.  Because the page cache tree is emptied before
424	 * the inode can be destroyed, holding the lru_lock pins any
425	 * address_space that has radix tree nodes on the LRU.
426	 *
427	 * We can then safely transition to the i_pages lock to
428	 * pin only the address_space of the particular node we want
429	 * to reclaim, take the node off-LRU, and drop the lru_lock.
430	 */
431
432	node = container_of(item, struct radix_tree_node, private_list);
433	mapping = container_of(node->root, struct address_space, i_pages);
434
435	/* Coming from the list, invert the lock order */
436	if (!xa_trylock(&mapping->i_pages)) {
437		spin_unlock(lru_lock);
438		ret = LRU_RETRY;
439		goto out;
440	}
441
 
 
 
 
 
 
 
 
 
 
442	list_lru_isolate(lru, item);
443	spin_unlock(lru_lock);
 
 
444
445	/*
446	 * The nodes should only contain one or more shadow entries,
447	 * no pages, so we expect to be able to remove them all and
448	 * delete and free the empty node afterwards.
449	 */
450	if (WARN_ON_ONCE(!node->exceptional))
451		goto out_invalid;
452	if (WARN_ON_ONCE(node->count != node->exceptional))
453		goto out_invalid;
454	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
455		if (node->slots[i]) {
456			if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i])))
457				goto out_invalid;
458			if (WARN_ON_ONCE(!node->exceptional))
459				goto out_invalid;
460			if (WARN_ON_ONCE(!mapping->nrexceptional))
461				goto out_invalid;
462			node->slots[i] = NULL;
463			node->exceptional--;
464			node->count--;
465			mapping->nrexceptional--;
466		}
467	}
468	if (WARN_ON_ONCE(node->exceptional))
469		goto out_invalid;
470	inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
471	__radix_tree_delete_node(&mapping->i_pages, node,
472				 workingset_lookup_update(mapping));
473
474out_invalid:
475	xa_unlock(&mapping->i_pages);
 
 
 
 
 
476	ret = LRU_REMOVED_RETRY;
477out:
478	local_irq_enable();
479	cond_resched();
480	local_irq_disable();
481	spin_lock(lru_lock);
482	return ret;
483}
484
485static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
486				       struct shrink_control *sc)
487{
488	unsigned long ret;
489
490	/* list_lru lock nests inside the IRQ-safe i_pages lock */
491	local_irq_disable();
492	ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
493	local_irq_enable();
494	return ret;
495}
496
497static struct shrinker workingset_shadow_shrinker = {
498	.count_objects = count_shadow_nodes,
499	.scan_objects = scan_shadow_nodes,
500	.seeks = DEFAULT_SEEKS,
501	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
502};
503
504/*
505 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
506 * i_pages lock.
507 */
508static struct lock_class_key shadow_nodes_key;
509
510static int __init workingset_init(void)
511{
 
512	unsigned int timestamp_bits;
513	unsigned int max_order;
514	int ret;
515
516	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
517	/*
518	 * Calculate the eviction bucket size to cover the longest
519	 * actionable refault distance, which is currently half of
520	 * memory (totalram_pages/2). However, memory hotplug may add
521	 * some more pages at runtime, so keep working with up to
522	 * double the initial memory by using totalram_pages as-is.
523	 */
524	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
525	max_order = fls_long(totalram_pages - 1);
526	if (max_order > timestamp_bits)
527		bucket_order = max_order - timestamp_bits;
528	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
529	       timestamp_bits, max_order, bucket_order);
530
531	ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
532	if (ret)
 
 
533		goto err;
534	ret = register_shrinker(&workingset_shadow_shrinker);
 
 
535	if (ret)
536		goto err_list_lru;
 
 
 
 
 
 
 
537	return 0;
538err_list_lru:
539	list_lru_destroy(&shadow_nodes);
540err:
541	return ret;
542}
543module_init(workingset_init);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Workingset detection
  4 *
  5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
  6 */
  7
  8#include <linux/memcontrol.h>
  9#include <linux/mm_inline.h>
 10#include <linux/writeback.h>
 11#include <linux/shmem_fs.h>
 12#include <linux/pagemap.h>
 13#include <linux/atomic.h>
 14#include <linux/module.h>
 15#include <linux/swap.h>
 16#include <linux/dax.h>
 17#include <linux/fs.h>
 18#include <linux/mm.h>
 19#include "internal.h"
 20
 21/*
 22 *		Double CLOCK lists
 23 *
 24 * Per node, two clock lists are maintained for file pages: the
 25 * inactive and the active list.  Freshly faulted pages start out at
 26 * the head of the inactive list and page reclaim scans pages from the
 27 * tail.  Pages that are accessed multiple times on the inactive list
 28 * are promoted to the active list, to protect them from reclaim,
 29 * whereas active pages are demoted to the inactive list when the
 30 * active list grows too big.
 31 *
 32 *   fault ------------------------+
 33 *                                 |
 34 *              +--------------+   |            +-------------+
 35 *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
 36 *              +--------------+                +-------------+    |
 37 *                     |                                           |
 38 *                     +-------------- promotion ------------------+
 39 *
 40 *
 41 *		Access frequency and refault distance
 42 *
 43 * A workload is thrashing when its pages are frequently used but they
 44 * are evicted from the inactive list every time before another access
 45 * would have promoted them to the active list.
 46 *
 47 * In cases where the average access distance between thrashing pages
 48 * is bigger than the size of memory there is nothing that can be
 49 * done - the thrashing set could never fit into memory under any
 50 * circumstance.
 51 *
 52 * However, the average access distance could be bigger than the
 53 * inactive list, yet smaller than the size of memory.  In this case,
 54 * the set could fit into memory if it weren't for the currently
 55 * active pages - which may be used more, hopefully less frequently:
 56 *
 57 *      +-memory available to cache-+
 58 *      |                           |
 59 *      +-inactive------+-active----+
 60 *  a b | c d e f g h i | J K L M N |
 61 *      +---------------+-----------+
 62 *
 63 * It is prohibitively expensive to accurately track access frequency
 64 * of pages.  But a reasonable approximation can be made to measure
 65 * thrashing on the inactive list, after which refaulting pages can be
 66 * activated optimistically to compete with the existing active pages.
 67 *
 68 * Approximating inactive page access frequency - Observations:
 69 *
 70 * 1. When a page is accessed for the first time, it is added to the
 71 *    head of the inactive list, slides every existing inactive page
 72 *    towards the tail by one slot, and pushes the current tail page
 73 *    out of memory.
 74 *
 75 * 2. When a page is accessed for the second time, it is promoted to
 76 *    the active list, shrinking the inactive list by one slot.  This
 77 *    also slides all inactive pages that were faulted into the cache
 78 *    more recently than the activated page towards the tail of the
 79 *    inactive list.
 80 *
 81 * Thus:
 82 *
 83 * 1. The sum of evictions and activations between any two points in
 84 *    time indicate the minimum number of inactive pages accessed in
 85 *    between.
 86 *
 87 * 2. Moving one inactive page N page slots towards the tail of the
 88 *    list requires at least N inactive page accesses.
 89 *
 90 * Combining these:
 91 *
 92 * 1. When a page is finally evicted from memory, the number of
 93 *    inactive pages accessed while the page was in cache is at least
 94 *    the number of page slots on the inactive list.
 95 *
 96 * 2. In addition, measuring the sum of evictions and activations (E)
 97 *    at the time of a page's eviction, and comparing it to another
 98 *    reading (R) at the time the page faults back into memory tells
 99 *    the minimum number of accesses while the page was not cached.
100 *    This is called the refault distance.
101 *
102 * Because the first access of the page was the fault and the second
103 * access the refault, we combine the in-cache distance with the
104 * out-of-cache distance to get the complete minimum access distance
105 * of this page:
106 *
107 *      NR_inactive + (R - E)
108 *
109 * And knowing the minimum access distance of a page, we can easily
110 * tell if the page would be able to stay in cache assuming all page
111 * slots in the cache were available:
112 *
113 *   NR_inactive + (R - E) <= NR_inactive + NR_active
114 *
115 * If we have swap we should consider about NR_inactive_anon and
116 * NR_active_anon, so for page cache and anonymous respectively:
117 *
118 *   NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file
119 *   + NR_inactive_anon + NR_active_anon
120 *
121 *   NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon
122 *   + NR_inactive_file + NR_active_file
123 *
124 * Which can be further simplified to:
125 *
126 *   (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon
127 *
128 *   (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file
129 *
130 * Put into words, the refault distance (out-of-cache) can be seen as
131 * a deficit in inactive list space (in-cache).  If the inactive list
132 * had (R - E) more page slots, the page would not have been evicted
133 * in between accesses, but activated instead.  And on a full system,
134 * the only thing eating into inactive list space is active pages.
135 *
136 *
137 *		Refaulting inactive pages
138 *
139 * All that is known about the active list is that the pages have been
140 * accessed more than once in the past.  This means that at any given
141 * time there is actually a good chance that pages on the active list
142 * are no longer in active use.
143 *
144 * So when a refault distance of (R - E) is observed and there are at
145 * least (R - E) pages in the userspace workingset, the refaulting page
146 * is activated optimistically in the hope that (R - E) pages are actually
147 * used less frequently than the refaulting page - or even not used at
148 * all anymore.
149 *
150 * That means if inactive cache is refaulting with a suitable refault
151 * distance, we assume the cache workingset is transitioning and put
152 * pressure on the current workingset.
153 *
154 * If this is wrong and demotion kicks in, the pages which are truly
155 * used more frequently will be reactivated while the less frequently
156 * used once will be evicted from memory.
157 *
158 * But if this is right, the stale pages will be pushed out of memory
159 * and the used pages get to stay in cache.
160 *
161 *		Refaulting active pages
162 *
163 * If on the other hand the refaulting pages have recently been
164 * deactivated, it means that the active list is no longer protecting
165 * actively used cache from reclaim. The cache is NOT transitioning to
166 * a different workingset; the existing workingset is thrashing in the
167 * space allocated to the page cache.
168 *
169 *
170 *		Implementation
171 *
172 * For each node's LRU lists, a counter for inactive evictions and
173 * activations is maintained (node->nonresident_age).
174 *
175 * On eviction, a snapshot of this counter (along with some bits to
176 * identify the node) is stored in the now empty page cache
177 * slot of the evicted page.  This is called a shadow entry.
178 *
179 * On cache misses for which there are shadow entries, an eligible
180 * refault distance will immediately activate the refaulting page.
181 */
182
183#define WORKINGSET_SHIFT 1
184#define EVICTION_SHIFT	((BITS_PER_LONG - BITS_PER_XA_VALUE) +	\
185			 WORKINGSET_SHIFT + NODES_SHIFT + \
186			 MEM_CGROUP_ID_SHIFT)
187#define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
188
189/*
190 * Eviction timestamps need to be able to cover the full range of
191 * actionable refaults. However, bits are tight in the xarray
192 * entry, and after storing the identifier for the lruvec there might
193 * not be enough left to represent every single actionable refault. In
194 * that case, we have to sacrifice granularity for distance, and group
195 * evictions into coarser buckets by shaving off lower timestamp bits.
196 */
197static unsigned int bucket_order __read_mostly;
198
199static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
200			 bool workingset)
201{
202	eviction &= EVICTION_MASK;
203	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
204	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
205	eviction = (eviction << WORKINGSET_SHIFT) | workingset;
206
207	return xa_mk_value(eviction);
208}
209
210static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
211			  unsigned long *evictionp, bool *workingsetp)
212{
213	unsigned long entry = xa_to_value(shadow);
214	int memcgid, nid;
215	bool workingset;
216
217	workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1);
218	entry >>= WORKINGSET_SHIFT;
219	nid = entry & ((1UL << NODES_SHIFT) - 1);
220	entry >>= NODES_SHIFT;
221	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
222	entry >>= MEM_CGROUP_ID_SHIFT;
223
224	*memcgidp = memcgid;
225	*pgdat = NODE_DATA(nid);
226	*evictionp = entry;
227	*workingsetp = workingset;
228}
229
230#ifdef CONFIG_LRU_GEN
231
232static void *lru_gen_eviction(struct folio *folio)
233{
234	int hist;
235	unsigned long token;
236	unsigned long min_seq;
237	struct lruvec *lruvec;
238	struct lru_gen_folio *lrugen;
239	int type = folio_is_file_lru(folio);
240	int delta = folio_nr_pages(folio);
241	int refs = folio_lru_refs(folio);
242	int tier = lru_tier_from_refs(refs);
243	struct mem_cgroup *memcg = folio_memcg(folio);
244	struct pglist_data *pgdat = folio_pgdat(folio);
245
246	BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT);
247
248	lruvec = mem_cgroup_lruvec(memcg, pgdat);
249	lrugen = &lruvec->lrugen;
250	min_seq = READ_ONCE(lrugen->min_seq[type]);
251	token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0);
252
253	hist = lru_hist_from_seq(min_seq);
254	atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
255
256	return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
257}
258
259/*
260 * Tests if the shadow entry is for a folio that was recently evicted.
261 * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
262 */
263static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
264				unsigned long *token, bool *workingset)
265{
266	int memcg_id;
267	unsigned long min_seq;
268	struct mem_cgroup *memcg;
269	struct pglist_data *pgdat;
270
271	unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
272
273	memcg = mem_cgroup_from_id(memcg_id);
274	*lruvec = mem_cgroup_lruvec(memcg, pgdat);
275
276	min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]);
277	return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
278}
279
280static void lru_gen_refault(struct folio *folio, void *shadow)
281{
282	bool recent;
283	int hist, tier, refs;
284	bool workingset;
285	unsigned long token;
286	struct lruvec *lruvec;
287	struct lru_gen_folio *lrugen;
288	int type = folio_is_file_lru(folio);
289	int delta = folio_nr_pages(folio);
290
291	rcu_read_lock();
292
293	recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset);
294	if (lruvec != folio_lruvec(folio))
295		goto unlock;
296
297	mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta);
298
299	if (!recent)
300		goto unlock;
301
302	lrugen = &lruvec->lrugen;
303
304	hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type]));
305	/* see the comment in folio_lru_refs() */
306	refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
307	tier = lru_tier_from_refs(refs);
308
309	atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]);
310	mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta);
311
312	/*
313	 * Count the following two cases as stalls:
314	 * 1. For pages accessed through page tables, hotter pages pushed out
315	 *    hot pages which refaulted immediately.
316	 * 2. For pages accessed multiple times through file descriptors,
317	 *    they would have been protected by sort_folio().
318	 */
319	if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
320		set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
321		mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
322	}
323unlock:
324	rcu_read_unlock();
325}
326
327#else /* !CONFIG_LRU_GEN */
328
329static void *lru_gen_eviction(struct folio *folio)
330{
331	return NULL;
332}
333
334static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
335				unsigned long *token, bool *workingset)
336{
337	return false;
338}
339
340static void lru_gen_refault(struct folio *folio, void *shadow)
341{
342}
343
344#endif /* CONFIG_LRU_GEN */
345
346/**
347 * workingset_age_nonresident - age non-resident entries as LRU ages
348 * @lruvec: the lruvec that was aged
349 * @nr_pages: the number of pages to count
350 *
351 * As in-memory pages are aged, non-resident pages need to be aged as
352 * well, in order for the refault distances later on to be comparable
353 * to the in-memory dimensions. This function allows reclaim and LRU
354 * operations to drive the non-resident aging along in parallel.
355 */
356void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
357{
358	/*
359	 * Reclaiming a cgroup means reclaiming all its children in a
360	 * round-robin fashion. That means that each cgroup has an LRU
361	 * order that is composed of the LRU orders of its child
362	 * cgroups; and every page has an LRU position not just in the
363	 * cgroup that owns it, but in all of that group's ancestors.
364	 *
365	 * So when the physical inactive list of a leaf cgroup ages,
366	 * the virtual inactive lists of all its parents, including
367	 * the root cgroup's, age as well.
368	 */
369	do {
370		atomic_long_add(nr_pages, &lruvec->nonresident_age);
371	} while ((lruvec = parent_lruvec(lruvec)));
372}
373
374/**
375 * workingset_eviction - note the eviction of a folio from memory
376 * @target_memcg: the cgroup that is causing the reclaim
377 * @folio: the folio being evicted
378 *
379 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
380 * of the evicted @folio so that a later refault can be detected.
381 */
382void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
383{
384	struct pglist_data *pgdat = folio_pgdat(folio);
 
 
385	unsigned long eviction;
386	struct lruvec *lruvec;
387	int memcgid;
388
389	/* Folio is fully exclusive and pins folio's memory cgroup pointer */
390	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
391	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
392	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
393
394	if (lru_gen_enabled())
395		return lru_gen_eviction(folio);
396
397	lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
398	/* XXX: target_memcg can be NULL, go through lruvec */
399	memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
400	eviction = atomic_long_read(&lruvec->nonresident_age);
401	eviction >>= bucket_order;
402	workingset_age_nonresident(lruvec, folio_nr_pages(folio));
403	return pack_shadow(memcgid, pgdat, eviction,
404				folio_test_workingset(folio));
405}
406
407/**
408 * workingset_test_recent - tests if the shadow entry is for a folio that was
409 * recently evicted. Also fills in @workingset with the value unpacked from
410 * shadow.
411 * @shadow: the shadow entry to be tested.
412 * @file: whether the corresponding folio is from the file lru.
413 * @workingset: where the workingset value unpacked from shadow should
414 * be stored.
415 * @flush: whether to flush cgroup rstat.
416 *
417 * Return: true if the shadow is for a recently evicted folio; false otherwise.
 
 
 
418 */
419bool workingset_test_recent(void *shadow, bool file, bool *workingset,
420				bool flush)
421{
422	struct mem_cgroup *eviction_memcg;
423	struct lruvec *eviction_lruvec;
424	unsigned long refault_distance;
425	unsigned long workingset_size;
 
 
 
426	unsigned long refault;
 
427	int memcgid;
428	struct pglist_data *pgdat;
429	unsigned long eviction;
430
431	rcu_read_lock();
432
433	if (lru_gen_enabled()) {
434		bool recent = lru_gen_test_recent(shadow, file,
435				&eviction_lruvec, &eviction, workingset);
436
437		rcu_read_unlock();
438		return recent;
439	}
440
441
442	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
443	eviction <<= bucket_order;
444
445	/*
446	 * Look up the memcg associated with the stored ID. It might
447	 * have been deleted since the folio's eviction.
448	 *
449	 * Note that in rare events the ID could have been recycled
450	 * for a new cgroup that refaults a shared folio. This is
451	 * impossible to tell from the available data. However, this
452	 * should be a rare and limited disturbance, and activations
453	 * are always speculative anyway. Ultimately, it's the aging
454	 * algorithm's job to shake out the minimum access frequency
455	 * for the active cache.
456	 *
457	 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
458	 * would be better if the root_mem_cgroup existed in all
459	 * configurations instead.
460	 */
461	eviction_memcg = mem_cgroup_from_id(memcgid);
462	if (!mem_cgroup_disabled() &&
463	    (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) {
464		rcu_read_unlock();
465		return false;
466	}
467
468	rcu_read_unlock();
 
469
470	/*
471	 * Flush stats (and potentially sleep) outside the RCU read section.
 
472	 *
473	 * Note that workingset_test_recent() itself might be called in RCU read
474	 * section (for e.g, in cachestat) - these callers need to skip flushing
475	 * stats (via the flush argument).
476	 *
477	 * XXX: With per-memcg flushing and thresholding, is ratelimiting
478	 * still needed here?
 
 
 
 
 
479	 */
480	if (flush)
481		mem_cgroup_flush_stats_ratelimited(eviction_memcg);
482
483	eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
484	refault = atomic_long_read(&eviction_lruvec->nonresident_age);
485
486	/*
487	 * Calculate the refault distance
488	 *
489	 * The unsigned subtraction here gives an accurate distance
490	 * across nonresident_age overflows in most cases. There is a
491	 * special case: usually, shadow entries have a short lifetime
492	 * and are either refaulted or reclaimed along with the inode
493	 * before they get too old.  But it is not impossible for the
494	 * nonresident_age to lap a shadow entry in the field, which
495	 * can then result in a false small refault distance, leading
496	 * to a false activation should this old entry actually
497	 * refault again.  However, earlier kernels used to deactivate
498	 * unconditionally with *every* reclaim invocation for the
499	 * longest time, so the occasional inappropriate activation
500	 * leading to pressure on the active list is not a problem.
501	 */
502	refault_distance = (refault - eviction) & EVICTION_MASK;
503
504	/*
505	 * Compare the distance to the existing workingset size. We
506	 * don't activate pages that couldn't stay resident even if
507	 * all the memory was available to the workingset. Whether
508	 * workingset competition needs to consider anon or not depends
509	 * on having free swap space.
510	 */
511	workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
512	if (!file) {
513		workingset_size += lruvec_page_state(eviction_lruvec,
514						     NR_INACTIVE_FILE);
515	}
516	if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) {
517		workingset_size += lruvec_page_state(eviction_lruvec,
518						     NR_ACTIVE_ANON);
519		if (file) {
520			workingset_size += lruvec_page_state(eviction_lruvec,
521						     NR_INACTIVE_ANON);
522		}
523	}
524
525	mem_cgroup_put(eviction_memcg);
526	return refault_distance <= workingset_size;
527}
528
529/**
530 * workingset_refault - Evaluate the refault of a previously evicted folio.
531 * @folio: The freshly allocated replacement folio.
532 * @shadow: Shadow entry of the evicted folio.
533 *
534 * Calculates and evaluates the refault distance of the previously
535 * evicted folio in the context of the node and the memcg whose memory
536 * pressure caused the eviction.
537 */
538void workingset_refault(struct folio *folio, void *shadow)
539{
540	bool file = folio_is_file_lru(folio);
541	struct pglist_data *pgdat;
542	struct mem_cgroup *memcg;
543	struct lruvec *lruvec;
544	bool workingset;
545	long nr;
546
547	if (lru_gen_enabled()) {
548		lru_gen_refault(folio, shadow);
549		return;
550	}
551
552	/*
553	 * The activation decision for this folio is made at the level
554	 * where the eviction occurred, as that is where the LRU order
555	 * during folio reclaim is being determined.
556	 *
557	 * However, the cgroup that will own the folio is the one that
558	 * is actually experiencing the refault event. Make sure the folio is
559	 * locked to guarantee folio_memcg() stability throughout.
560	 */
561	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
562	nr = folio_nr_pages(folio);
563	memcg = folio_memcg(folio);
564	pgdat = folio_pgdat(folio);
565	lruvec = mem_cgroup_lruvec(memcg, pgdat);
566
567	mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
568
569	if (!workingset_test_recent(shadow, file, &workingset, true))
570		return;
571
572	folio_set_active(folio);
573	workingset_age_nonresident(lruvec, nr);
574	mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr);
575
576	/* Folio was active prior to eviction */
577	if (workingset) {
578		folio_set_workingset(folio);
579		/*
580		 * XXX: Move to folio_add_lru() when it supports new vs
581		 * putback
582		 */
583		lru_note_cost_refault(folio);
584		mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
585	}
586}
587
588/**
589 * workingset_activation - note a page activation
590 * @folio: Folio that is being activated.
591 */
592void workingset_activation(struct folio *folio)
593{
594	/*
595	 * Filter non-memcg pages here, e.g. unmap can call
596	 * mark_page_accessed() on VDSO pages.
 
 
 
597	 */
598	if (mem_cgroup_disabled() || folio_memcg_charged(folio))
599		workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio));
 
 
 
 
 
600}
601
602/*
603 * Shadow entries reflect the share of the working set that does not
604 * fit into memory, so their number depends on the access pattern of
605 * the workload.  In most cases, they will refault or get reclaimed
606 * along with the inode, but a (malicious) workload that streams
607 * through files with a total size several times that of available
608 * memory, while preventing the inodes from being reclaimed, can
609 * create excessive amounts of shadow nodes.  To keep a lid on this,
610 * track shadow nodes and reclaim them when they grow way past the
611 * point where they would still be useful.
612 */
613
614struct list_lru shadow_nodes;
615
616void workingset_update_node(struct xa_node *node)
617{
618	struct address_space *mapping;
619	struct page *page = virt_to_page(node);
620
621	/*
622	 * Track non-empty nodes that contain only shadow entries;
623	 * unlink those that contain pages or are being freed.
624	 *
625	 * Avoid acquiring the list_lru lock when the nodes are
626	 * already where they should be. The list_empty() test is safe
627	 * as node->private_list is protected by the i_pages lock.
628	 */
629	mapping = container_of(node->array, struct address_space, i_pages);
630	lockdep_assert_held(&mapping->i_pages.xa_lock);
631
632	if (node->count && node->count == node->nr_values) {
633		if (list_empty(&node->private_list)) {
634			list_lru_add_obj(&shadow_nodes, &node->private_list);
635			__inc_node_page_state(page, WORKINGSET_NODES);
636		}
637	} else {
638		if (!list_empty(&node->private_list)) {
639			list_lru_del_obj(&shadow_nodes, &node->private_list);
640			__dec_node_page_state(page, WORKINGSET_NODES);
641		}
642	}
643}
644
645static unsigned long count_shadow_nodes(struct shrinker *shrinker,
646					struct shrink_control *sc)
647{
648	unsigned long max_nodes;
649	unsigned long nodes;
650	unsigned long pages;
651
 
 
652	nodes = list_lru_shrink_count(&shadow_nodes, sc);
653	if (!nodes)
654		return SHRINK_EMPTY;
655
656	/*
657	 * Approximate a reasonable limit for the nodes
658	 * containing shadow entries. We don't need to keep more
659	 * shadow entries than possible pages on the active list,
660	 * since refault distances bigger than that are dismissed.
661	 *
662	 * The size of the active list converges toward 100% of
663	 * overall page cache as memory grows, with only a tiny
664	 * inactive list. Assume the total cache size for that.
665	 *
666	 * Nodes might be sparsely populated, with only one shadow
667	 * entry in the extreme case. Obviously, we cannot keep one
668	 * node for every eligible shadow entry, so compromise on a
669	 * worst-case density of 1/8th. Below that, not all eligible
670	 * refaults can be detected anymore.
671	 *
672	 * On 64-bit with 7 xa_nodes per page and 64 slots
673	 * each, this will reclaim shadow entries when they consume
674	 * ~1.8% of available memory:
675	 *
676	 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
677	 */
678#ifdef CONFIG_MEMCG
679	if (sc->memcg) {
680		struct lruvec *lruvec;
681		int i;
682
683		mem_cgroup_flush_stats_ratelimited(sc->memcg);
684		lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
685		for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
686			pages += lruvec_page_state_local(lruvec,
687							 NR_LRU_BASE + i);
688		pages += lruvec_page_state_local(
689			lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
690		pages += lruvec_page_state_local(
691			lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
692	} else
693#endif
694		pages = node_present_pages(sc->nid);
695
696	max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
697
698	if (nodes <= max_nodes)
699		return 0;
700	return nodes - max_nodes;
701}
702
703static enum lru_status shadow_lru_isolate(struct list_head *item,
704					  struct list_lru_one *lru,
705					  void *arg) __must_hold(lru->lock)
 
706{
707	struct xa_node *node = container_of(item, struct xa_node, private_list);
708	struct address_space *mapping;
 
 
709	int ret;
710
711	/*
712	 * Page cache insertions and deletions synchronously maintain
713	 * the shadow node LRU under the i_pages lock and the
714	 * &lru->lock. Because the page cache tree is emptied before
715	 * the inode can be destroyed, holding the &lru->lock pins any
716	 * address_space that has nodes on the LRU.
717	 *
718	 * We can then safely transition to the i_pages lock to
719	 * pin only the address_space of the particular node we want
720	 * to reclaim, take the node off-LRU, and drop the &lru->lock.
721	 */
722
723	mapping = container_of(node->array, struct address_space, i_pages);
 
724
725	/* Coming from the list, invert the lock order */
726	if (!xa_trylock(&mapping->i_pages)) {
727		spin_unlock_irq(&lru->lock);
728		ret = LRU_RETRY;
729		goto out;
730	}
731
732	/* For page cache we need to hold i_lock */
733	if (mapping->host != NULL) {
734		if (!spin_trylock(&mapping->host->i_lock)) {
735			xa_unlock(&mapping->i_pages);
736			spin_unlock_irq(&lru->lock);
737			ret = LRU_RETRY;
738			goto out;
739		}
740	}
741
742	list_lru_isolate(lru, item);
743	__dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
744
745	spin_unlock(&lru->lock);
746
747	/*
748	 * The nodes should only contain one or more shadow entries,
749	 * no pages, so we expect to be able to remove them all and
750	 * delete and free the empty node afterwards.
751	 */
752	if (WARN_ON_ONCE(!node->nr_values))
 
 
753		goto out_invalid;
754	if (WARN_ON_ONCE(node->count != node->nr_values))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755		goto out_invalid;
756	xa_delete_node(node, workingset_update_node);
757	__inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
 
758
759out_invalid:
760	xa_unlock_irq(&mapping->i_pages);
761	if (mapping->host != NULL) {
762		if (mapping_shrinkable(mapping))
763			inode_add_lru(mapping->host);
764		spin_unlock(&mapping->host->i_lock);
765	}
766	ret = LRU_REMOVED_RETRY;
767out:
 
768	cond_resched();
 
 
769	return ret;
770}
771
772static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
773				       struct shrink_control *sc)
774{
 
 
775	/* list_lru lock nests inside the IRQ-safe i_pages lock */
776	return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
777					NULL);
 
 
778}
779
 
 
 
 
 
 
 
780/*
781 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
782 * i_pages lock.
783 */
784static struct lock_class_key shadow_nodes_key;
785
786static int __init workingset_init(void)
787{
788	struct shrinker *workingset_shadow_shrinker;
789	unsigned int timestamp_bits;
790	unsigned int max_order;
791	int ret = -ENOMEM;
792
793	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
794	/*
795	 * Calculate the eviction bucket size to cover the longest
796	 * actionable refault distance, which is currently half of
797	 * memory (totalram_pages/2). However, memory hotplug may add
798	 * some more pages at runtime, so keep working with up to
799	 * double the initial memory by using totalram_pages as-is.
800	 */
801	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
802	max_order = fls_long(totalram_pages() - 1);
803	if (max_order > timestamp_bits)
804		bucket_order = max_order - timestamp_bits;
805	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
806	       timestamp_bits, max_order, bucket_order);
807
808	workingset_shadow_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
809						    SHRINKER_MEMCG_AWARE,
810						    "mm-shadow");
811	if (!workingset_shadow_shrinker)
812		goto err;
813
814	ret = list_lru_init_memcg_key(&shadow_nodes, workingset_shadow_shrinker,
815				      &shadow_nodes_key);
816	if (ret)
817		goto err_list_lru;
818
819	workingset_shadow_shrinker->count_objects = count_shadow_nodes;
820	workingset_shadow_shrinker->scan_objects = scan_shadow_nodes;
821	/* ->count reports only fully expendable nodes */
822	workingset_shadow_shrinker->seeks = 0;
823
824	shrinker_register(workingset_shadow_shrinker);
825	return 0;
826err_list_lru:
827	shrinker_free(workingset_shadow_shrinker);
828err:
829	return ret;
830}
831module_init(workingset_init);