Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Workingset detection
  3 *
  4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
  5 */
  6
  7#include <linux/memcontrol.h>
  8#include <linux/writeback.h>
  9#include <linux/pagemap.h>
 10#include <linux/atomic.h>
 11#include <linux/module.h>
 12#include <linux/swap.h>
 13#include <linux/fs.h>
 14#include <linux/mm.h>
 15
 16/*
 17 *		Double CLOCK lists
 18 *
 19 * Per zone, two clock lists are maintained for file pages: the
 20 * inactive and the active list.  Freshly faulted pages start out at
 21 * the head of the inactive list and page reclaim scans pages from the
 22 * tail.  Pages that are accessed multiple times on the inactive list
 23 * are promoted to the active list, to protect them from reclaim,
 24 * whereas active pages are demoted to the inactive list when the
 25 * active list grows too big.
 26 *
 27 *   fault ------------------------+
 28 *                                 |
 29 *              +--------------+   |            +-------------+
 30 *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
 31 *              +--------------+                +-------------+    |
 32 *                     |                                           |
 33 *                     +-------------- promotion ------------------+
 34 *
 35 *
 36 *		Access frequency and refault distance
 37 *
 38 * A workload is thrashing when its pages are frequently used but they
 39 * are evicted from the inactive list every time before another access
 40 * would have promoted them to the active list.
 41 *
 42 * In cases where the average access distance between thrashing pages
 43 * is bigger than the size of memory there is nothing that can be
 44 * done - the thrashing set could never fit into memory under any
 45 * circumstance.
 46 *
 47 * However, the average access distance could be bigger than the
 48 * inactive list, yet smaller than the size of memory.  In this case,
 49 * the set could fit into memory if it weren't for the currently
 50 * active pages - which may be used more, hopefully less frequently:
 51 *
 52 *      +-memory available to cache-+
 53 *      |                           |
 54 *      +-inactive------+-active----+
 55 *  a b | c d e f g h i | J K L M N |
 56 *      +---------------+-----------+
 57 *
 58 * It is prohibitively expensive to accurately track access frequency
 59 * of pages.  But a reasonable approximation can be made to measure
 60 * thrashing on the inactive list, after which refaulting pages can be
 61 * activated optimistically to compete with the existing active pages.
 62 *
 63 * Approximating inactive page access frequency - Observations:
 64 *
 65 * 1. When a page is accessed for the first time, it is added to the
 66 *    head of the inactive list, slides every existing inactive page
 67 *    towards the tail by one slot, and pushes the current tail page
 68 *    out of memory.
 69 *
 70 * 2. When a page is accessed for the second time, it is promoted to
 71 *    the active list, shrinking the inactive list by one slot.  This
 72 *    also slides all inactive pages that were faulted into the cache
 73 *    more recently than the activated page towards the tail of the
 74 *    inactive list.
 75 *
 76 * Thus:
 77 *
 78 * 1. The sum of evictions and activations between any two points in
 79 *    time indicate the minimum number of inactive pages accessed in
 80 *    between.
 81 *
 82 * 2. Moving one inactive page N page slots towards the tail of the
 83 *    list requires at least N inactive page accesses.
 84 *
 85 * Combining these:
 86 *
 87 * 1. When a page is finally evicted from memory, the number of
 88 *    inactive pages accessed while the page was in cache is at least
 89 *    the number of page slots on the inactive list.
 90 *
 91 * 2. In addition, measuring the sum of evictions and activations (E)
 92 *    at the time of a page's eviction, and comparing it to another
 93 *    reading (R) at the time the page faults back into memory tells
 94 *    the minimum number of accesses while the page was not cached.
 95 *    This is called the refault distance.
 96 *
 97 * Because the first access of the page was the fault and the second
 98 * access the refault, we combine the in-cache distance with the
 99 * out-of-cache distance to get the complete minimum access distance
100 * of this page:
101 *
102 *      NR_inactive + (R - E)
103 *
104 * And knowing the minimum access distance of a page, we can easily
105 * tell if the page would be able to stay in cache assuming all page
106 * slots in the cache were available:
107 *
108 *   NR_inactive + (R - E) <= NR_inactive + NR_active
109 *
110 * which can be further simplified to
111 *
112 *   (R - E) <= NR_active
113 *
114 * Put into words, the refault distance (out-of-cache) can be seen as
115 * a deficit in inactive list space (in-cache).  If the inactive list
116 * had (R - E) more page slots, the page would not have been evicted
117 * in between accesses, but activated instead.  And on a full system,
118 * the only thing eating into inactive list space is active pages.
119 *
120 *
121 *		Activating refaulting pages
122 *
123 * All that is known about the active list is that the pages have been
124 * accessed more than once in the past.  This means that at any given
125 * time there is actually a good chance that pages on the active list
126 * are no longer in active use.
127 *
128 * So when a refault distance of (R - E) is observed and there are at
129 * least (R - E) active pages, the refaulting page is activated
130 * optimistically in the hope that (R - E) active pages are actually
131 * used less frequently than the refaulting page - or even not used at
132 * all anymore.
133 *
134 * If this is wrong and demotion kicks in, the pages which are truly
135 * used more frequently will be reactivated while the less frequently
136 * used once will be evicted from memory.
137 *
138 * But if this is right, the stale pages will be pushed out of memory
139 * and the used pages get to stay in cache.
140 *
141 *
142 *		Implementation
143 *
144 * For each zone's file LRU lists, a counter for inactive evictions
145 * and activations is maintained (zone->inactive_age).
146 *
147 * On eviction, a snapshot of this counter (along with some bits to
148 * identify the zone) is stored in the now empty page cache radix tree
149 * slot of the evicted page.  This is called a shadow entry.
150 *
151 * On cache misses for which there are shadow entries, an eligible
152 * refault distance will immediately activate the refaulting page.
153 */
154
155#define EVICTION_SHIFT	(RADIX_TREE_EXCEPTIONAL_ENTRY + \
156			 ZONES_SHIFT + NODES_SHIFT +	\
157			 MEM_CGROUP_ID_SHIFT)
158#define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
159
160/*
161 * Eviction timestamps need to be able to cover the full range of
162 * actionable refaults. However, bits are tight in the radix tree
163 * entry, and after storing the identifier for the lruvec there might
164 * not be enough left to represent every single actionable refault. In
165 * that case, we have to sacrifice granularity for distance, and group
166 * evictions into coarser buckets by shaving off lower timestamp bits.
167 */
168static unsigned int bucket_order __read_mostly;
169
170static void *pack_shadow(int memcgid, struct zone *zone, unsigned long eviction)
171{
172	eviction >>= bucket_order;
173	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
174	eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone);
175	eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
176	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
177
178	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
179}
180
181static void unpack_shadow(void *shadow, int *memcgidp, struct zone **zonep,
182			  unsigned long *evictionp)
183{
184	unsigned long entry = (unsigned long)shadow;
185	int memcgid, nid, zid;
186
187	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
188	zid = entry & ((1UL << ZONES_SHIFT) - 1);
189	entry >>= ZONES_SHIFT;
190	nid = entry & ((1UL << NODES_SHIFT) - 1);
191	entry >>= NODES_SHIFT;
192	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
193	entry >>= MEM_CGROUP_ID_SHIFT;
194
195	*memcgidp = memcgid;
196	*zonep = NODE_DATA(nid)->node_zones + zid;
197	*evictionp = entry << bucket_order;
198}
199
200/**
201 * workingset_eviction - note the eviction of a page from memory
202 * @mapping: address space the page was backing
203 * @page: the page being evicted
204 *
205 * Returns a shadow entry to be stored in @mapping->page_tree in place
206 * of the evicted @page so that a later refault can be detected.
207 */
208void *workingset_eviction(struct address_space *mapping, struct page *page)
209{
210	struct mem_cgroup *memcg = page_memcg(page);
211	struct zone *zone = page_zone(page);
212	int memcgid = mem_cgroup_id(memcg);
213	unsigned long eviction;
214	struct lruvec *lruvec;
215
216	/* Page is fully exclusive and pins page->mem_cgroup */
217	VM_BUG_ON_PAGE(PageLRU(page), page);
218	VM_BUG_ON_PAGE(page_count(page), page);
219	VM_BUG_ON_PAGE(!PageLocked(page), page);
220
221	lruvec = mem_cgroup_zone_lruvec(zone, memcg);
222	eviction = atomic_long_inc_return(&lruvec->inactive_age);
223	return pack_shadow(memcgid, zone, eviction);
224}
225
226/**
227 * workingset_refault - evaluate the refault of a previously evicted page
228 * @shadow: shadow entry of the evicted page
229 *
230 * Calculates and evaluates the refault distance of the previously
231 * evicted page in the context of the zone it was allocated in.
232 *
233 * Returns %true if the page should be activated, %false otherwise.
234 */
235bool workingset_refault(void *shadow)
236{
237	unsigned long refault_distance;
238	unsigned long active_file;
239	struct mem_cgroup *memcg;
240	unsigned long eviction;
241	struct lruvec *lruvec;
242	unsigned long refault;
243	struct zone *zone;
244	int memcgid;
245
246	unpack_shadow(shadow, &memcgid, &zone, &eviction);
247
248	rcu_read_lock();
249	/*
250	 * Look up the memcg associated with the stored ID. It might
251	 * have been deleted since the page's eviction.
252	 *
253	 * Note that in rare events the ID could have been recycled
254	 * for a new cgroup that refaults a shared page. This is
255	 * impossible to tell from the available data. However, this
256	 * should be a rare and limited disturbance, and activations
257	 * are always speculative anyway. Ultimately, it's the aging
258	 * algorithm's job to shake out the minimum access frequency
259	 * for the active cache.
260	 *
261	 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
262	 * would be better if the root_mem_cgroup existed in all
263	 * configurations instead.
264	 */
265	memcg = mem_cgroup_from_id(memcgid);
266	if (!mem_cgroup_disabled() && !memcg) {
267		rcu_read_unlock();
268		return false;
269	}
270	lruvec = mem_cgroup_zone_lruvec(zone, memcg);
271	refault = atomic_long_read(&lruvec->inactive_age);
272	active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
273	rcu_read_unlock();
274
275	/*
276	 * The unsigned subtraction here gives an accurate distance
277	 * across inactive_age overflows in most cases.
278	 *
279	 * There is a special case: usually, shadow entries have a
280	 * short lifetime and are either refaulted or reclaimed along
281	 * with the inode before they get too old.  But it is not
282	 * impossible for the inactive_age to lap a shadow entry in
283	 * the field, which can then can result in a false small
284	 * refault distance, leading to a false activation should this
285	 * old entry actually refault again.  However, earlier kernels
286	 * used to deactivate unconditionally with *every* reclaim
287	 * invocation for the longest time, so the occasional
288	 * inappropriate activation leading to pressure on the active
289	 * list is not a problem.
290	 */
291	refault_distance = (refault - eviction) & EVICTION_MASK;
292
293	inc_zone_state(zone, WORKINGSET_REFAULT);
294
295	if (refault_distance <= active_file) {
296		inc_zone_state(zone, WORKINGSET_ACTIVATE);
297		return true;
298	}
299	return false;
300}
301
302/**
303 * workingset_activation - note a page activation
304 * @page: page that is being activated
305 */
306void workingset_activation(struct page *page)
307{
308	struct lruvec *lruvec;
309
310	lock_page_memcg(page);
311	/*
312	 * Filter non-memcg pages here, e.g. unmap can call
313	 * mark_page_accessed() on VDSO pages.
314	 *
315	 * XXX: See workingset_refault() - this should return
316	 * root_mem_cgroup even for !CONFIG_MEMCG.
317	 */
318	if (!mem_cgroup_disabled() && !page_memcg(page))
319		goto out;
320	lruvec = mem_cgroup_zone_lruvec(page_zone(page), page_memcg(page));
321	atomic_long_inc(&lruvec->inactive_age);
322out:
323	unlock_page_memcg(page);
324}
325
326/*
327 * Shadow entries reflect the share of the working set that does not
328 * fit into memory, so their number depends on the access pattern of
329 * the workload.  In most cases, they will refault or get reclaimed
330 * along with the inode, but a (malicious) workload that streams
331 * through files with a total size several times that of available
332 * memory, while preventing the inodes from being reclaimed, can
333 * create excessive amounts of shadow nodes.  To keep a lid on this,
334 * track shadow nodes and reclaim them when they grow way past the
335 * point where they would still be useful.
336 */
337
338struct list_lru workingset_shadow_nodes;
339
340static unsigned long count_shadow_nodes(struct shrinker *shrinker,
341					struct shrink_control *sc)
342{
343	unsigned long shadow_nodes;
344	unsigned long max_nodes;
345	unsigned long pages;
346
347	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
348	local_irq_disable();
349	shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
350	local_irq_enable();
351
352	if (memcg_kmem_enabled())
353		pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
354						     LRU_ALL_FILE);
355	else
356		pages = node_page_state(sc->nid, NR_ACTIVE_FILE) +
357			node_page_state(sc->nid, NR_INACTIVE_FILE);
358
359	/*
360	 * Active cache pages are limited to 50% of memory, and shadow
361	 * entries that represent a refault distance bigger than that
362	 * do not have any effect.  Limit the number of shadow nodes
363	 * such that shadow entries do not exceed the number of active
364	 * cache pages, assuming a worst-case node population density
365	 * of 1/8th on average.
366	 *
367	 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
368	 * each, this will reclaim shadow entries when they consume
369	 * ~2% of available memory:
370	 *
371	 * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
372	 */
373	max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
374
375	if (shadow_nodes <= max_nodes)
376		return 0;
377
378	return shadow_nodes - max_nodes;
379}
380
381static enum lru_status shadow_lru_isolate(struct list_head *item,
382					  struct list_lru_one *lru,
383					  spinlock_t *lru_lock,
384					  void *arg)
385{
386	struct address_space *mapping;
387	struct radix_tree_node *node;
388	unsigned int i;
389	int ret;
390
391	/*
392	 * Page cache insertions and deletions synchroneously maintain
393	 * the shadow node LRU under the mapping->tree_lock and the
394	 * lru_lock.  Because the page cache tree is emptied before
395	 * the inode can be destroyed, holding the lru_lock pins any
396	 * address_space that has radix tree nodes on the LRU.
397	 *
398	 * We can then safely transition to the mapping->tree_lock to
399	 * pin only the address_space of the particular node we want
400	 * to reclaim, take the node off-LRU, and drop the lru_lock.
401	 */
402
403	node = container_of(item, struct radix_tree_node, private_list);
404	mapping = node->private_data;
405
406	/* Coming from the list, invert the lock order */
407	if (!spin_trylock(&mapping->tree_lock)) {
408		spin_unlock(lru_lock);
409		ret = LRU_RETRY;
410		goto out;
411	}
412
413	list_lru_isolate(lru, item);
414	spin_unlock(lru_lock);
415
416	/*
417	 * The nodes should only contain one or more shadow entries,
418	 * no pages, so we expect to be able to remove them all and
419	 * delete and free the empty node afterwards.
420	 */
421
422	BUG_ON(!node->count);
423	BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
424
425	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
426		if (node->slots[i]) {
427			BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
428			node->slots[i] = NULL;
429			BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
430			node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
431			BUG_ON(!mapping->nrexceptional);
432			mapping->nrexceptional--;
433		}
434	}
435	BUG_ON(node->count);
436	inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
437	if (!__radix_tree_delete_node(&mapping->page_tree, node))
438		BUG();
439
440	spin_unlock(&mapping->tree_lock);
441	ret = LRU_REMOVED_RETRY;
442out:
443	local_irq_enable();
444	cond_resched();
445	local_irq_disable();
446	spin_lock(lru_lock);
447	return ret;
448}
449
450static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
451				       struct shrink_control *sc)
452{
453	unsigned long ret;
454
455	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
456	local_irq_disable();
457	ret =  list_lru_shrink_walk(&workingset_shadow_nodes, sc,
458				    shadow_lru_isolate, NULL);
459	local_irq_enable();
460	return ret;
461}
462
463static struct shrinker workingset_shadow_shrinker = {
464	.count_objects = count_shadow_nodes,
465	.scan_objects = scan_shadow_nodes,
466	.seeks = DEFAULT_SEEKS,
467	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
468};
469
470/*
471 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
472 * mapping->tree_lock.
473 */
474static struct lock_class_key shadow_nodes_key;
475
476static int __init workingset_init(void)
477{
478	unsigned int timestamp_bits;
479	unsigned int max_order;
480	int ret;
481
482	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
483	/*
484	 * Calculate the eviction bucket size to cover the longest
485	 * actionable refault distance, which is currently half of
486	 * memory (totalram_pages/2). However, memory hotplug may add
487	 * some more pages at runtime, so keep working with up to
488	 * double the initial memory by using totalram_pages as-is.
489	 */
490	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
491	max_order = fls_long(totalram_pages - 1);
492	if (max_order > timestamp_bits)
493		bucket_order = max_order - timestamp_bits;
494	printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
495	       timestamp_bits, max_order, bucket_order);
496
497	ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
498	if (ret)
499		goto err;
500	ret = register_shrinker(&workingset_shadow_shrinker);
501	if (ret)
502		goto err_list_lru;
503	return 0;
504err_list_lru:
505	list_lru_destroy(&workingset_shadow_nodes);
506err:
507	return ret;
508}
509module_init(workingset_init);