Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Workingset detection
  3 *
  4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
  5 */
  6
  7#include <linux/memcontrol.h>
  8#include <linux/writeback.h>
  9#include <linux/pagemap.h>
 10#include <linux/atomic.h>
 11#include <linux/module.h>
 12#include <linux/swap.h>
 13#include <linux/fs.h>
 14#include <linux/mm.h>
 15
 16/*
 17 *		Double CLOCK lists
 18 *
 19 * Per zone, two clock lists are maintained for file pages: the
 20 * inactive and the active list.  Freshly faulted pages start out at
 21 * the head of the inactive list and page reclaim scans pages from the
 22 * tail.  Pages that are accessed multiple times on the inactive list
 23 * are promoted to the active list, to protect them from reclaim,
 24 * whereas active pages are demoted to the inactive list when the
 25 * active list grows too big.
 26 *
 27 *   fault ------------------------+
 28 *                                 |
 29 *              +--------------+   |            +-------------+
 30 *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
 31 *              +--------------+                +-------------+    |
 32 *                     |                                           |
 33 *                     +-------------- promotion ------------------+
 34 *
 35 *
 36 *		Access frequency and refault distance
 37 *
 38 * A workload is thrashing when its pages are frequently used but they
 39 * are evicted from the inactive list every time before another access
 40 * would have promoted them to the active list.
 41 *
 42 * In cases where the average access distance between thrashing pages
 43 * is bigger than the size of memory there is nothing that can be
 44 * done - the thrashing set could never fit into memory under any
 45 * circumstance.
 46 *
 47 * However, the average access distance could be bigger than the
 48 * inactive list, yet smaller than the size of memory.  In this case,
 49 * the set could fit into memory if it weren't for the currently
 50 * active pages - which may be used more, hopefully less frequently:
 51 *
 52 *      +-memory available to cache-+
 53 *      |                           |
 54 *      +-inactive------+-active----+
 55 *  a b | c d e f g h i | J K L M N |
 56 *      +---------------+-----------+
 57 *
 58 * It is prohibitively expensive to accurately track access frequency
 59 * of pages.  But a reasonable approximation can be made to measure
 60 * thrashing on the inactive list, after which refaulting pages can be
 61 * activated optimistically to compete with the existing active pages.
 62 *
 63 * Approximating inactive page access frequency - Observations:
 64 *
 65 * 1. When a page is accessed for the first time, it is added to the
 66 *    head of the inactive list, slides every existing inactive page
 67 *    towards the tail by one slot, and pushes the current tail page
 68 *    out of memory.
 69 *
 70 * 2. When a page is accessed for the second time, it is promoted to
 71 *    the active list, shrinking the inactive list by one slot.  This
 72 *    also slides all inactive pages that were faulted into the cache
 73 *    more recently than the activated page towards the tail of the
 74 *    inactive list.
 75 *
 76 * Thus:
 77 *
 78 * 1. The sum of evictions and activations between any two points in
 79 *    time indicate the minimum number of inactive pages accessed in
 80 *    between.
 81 *
 82 * 2. Moving one inactive page N page slots towards the tail of the
 83 *    list requires at least N inactive page accesses.
 84 *
 85 * Combining these:
 86 *
 87 * 1. When a page is finally evicted from memory, the number of
 88 *    inactive pages accessed while the page was in cache is at least
 89 *    the number of page slots on the inactive list.
 90 *
 91 * 2. In addition, measuring the sum of evictions and activations (E)
 92 *    at the time of a page's eviction, and comparing it to another
 93 *    reading (R) at the time the page faults back into memory tells
 94 *    the minimum number of accesses while the page was not cached.
 95 *    This is called the refault distance.
 96 *
 97 * Because the first access of the page was the fault and the second
 98 * access the refault, we combine the in-cache distance with the
 99 * out-of-cache distance to get the complete minimum access distance
100 * of this page:
101 *
102 *      NR_inactive + (R - E)
103 *
104 * And knowing the minimum access distance of a page, we can easily
105 * tell if the page would be able to stay in cache assuming all page
106 * slots in the cache were available:
107 *
108 *   NR_inactive + (R - E) <= NR_inactive + NR_active
109 *
110 * which can be further simplified to
111 *
112 *   (R - E) <= NR_active
113 *
114 * Put into words, the refault distance (out-of-cache) can be seen as
115 * a deficit in inactive list space (in-cache).  If the inactive list
116 * had (R - E) more page slots, the page would not have been evicted
117 * in between accesses, but activated instead.  And on a full system,
118 * the only thing eating into inactive list space is active pages.
119 *
120 *
121 *		Activating refaulting pages
122 *
123 * All that is known about the active list is that the pages have been
124 * accessed more than once in the past.  This means that at any given
125 * time there is actually a good chance that pages on the active list
126 * are no longer in active use.
127 *
128 * So when a refault distance of (R - E) is observed and there are at
129 * least (R - E) active pages, the refaulting page is activated
130 * optimistically in the hope that (R - E) active pages are actually
131 * used less frequently than the refaulting page - or even not used at
132 * all anymore.
133 *
134 * If this is wrong and demotion kicks in, the pages which are truly
135 * used more frequently will be reactivated while the less frequently
136 * used once will be evicted from memory.
137 *
138 * But if this is right, the stale pages will be pushed out of memory
139 * and the used pages get to stay in cache.
140 *
141 *
142 *		Implementation
143 *
144 * For each zone's file LRU lists, a counter for inactive evictions
145 * and activations is maintained (zone->inactive_age).
146 *
147 * On eviction, a snapshot of this counter (along with some bits to
148 * identify the zone) is stored in the now empty page cache radix tree
149 * slot of the evicted page.  This is called a shadow entry.
150 *
151 * On cache misses for which there are shadow entries, an eligible
152 * refault distance will immediately activate the refaulting page.
153 */
154
155static void *pack_shadow(unsigned long eviction, struct zone *zone)
156{
157	eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone);
158	eviction = (eviction << ZONES_SHIFT) | zone_idx(zone);
159	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
160
161	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
162}
163
164static void unpack_shadow(void *shadow,
165			  struct zone **zone,
166			  unsigned long *distance)
167{
168	unsigned long entry = (unsigned long)shadow;
169	unsigned long eviction;
170	unsigned long refault;
171	unsigned long mask;
172	int zid, nid;
173
174	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
175	zid = entry & ((1UL << ZONES_SHIFT) - 1);
176	entry >>= ZONES_SHIFT;
177	nid = entry & ((1UL << NODES_SHIFT) - 1);
178	entry >>= NODES_SHIFT;
179	eviction = entry;
180
181	*zone = NODE_DATA(nid)->node_zones + zid;
182
183	refault = atomic_long_read(&(*zone)->inactive_age);
184	mask = ~0UL >> (NODES_SHIFT + ZONES_SHIFT +
185			RADIX_TREE_EXCEPTIONAL_SHIFT);
186	/*
187	 * The unsigned subtraction here gives an accurate distance
188	 * across inactive_age overflows in most cases.
189	 *
190	 * There is a special case: usually, shadow entries have a
191	 * short lifetime and are either refaulted or reclaimed along
192	 * with the inode before they get too old.  But it is not
193	 * impossible for the inactive_age to lap a shadow entry in
194	 * the field, which can then can result in a false small
195	 * refault distance, leading to a false activation should this
196	 * old entry actually refault again.  However, earlier kernels
197	 * used to deactivate unconditionally with *every* reclaim
198	 * invocation for the longest time, so the occasional
199	 * inappropriate activation leading to pressure on the active
200	 * list is not a problem.
201	 */
202	*distance = (refault - eviction) & mask;
203}
204
205/**
206 * workingset_eviction - note the eviction of a page from memory
207 * @mapping: address space the page was backing
208 * @page: the page being evicted
209 *
210 * Returns a shadow entry to be stored in @mapping->page_tree in place
211 * of the evicted @page so that a later refault can be detected.
212 */
213void *workingset_eviction(struct address_space *mapping, struct page *page)
214{
215	struct zone *zone = page_zone(page);
216	unsigned long eviction;
217
218	eviction = atomic_long_inc_return(&zone->inactive_age);
219	return pack_shadow(eviction, zone);
220}
221
222/**
223 * workingset_refault - evaluate the refault of a previously evicted page
224 * @shadow: shadow entry of the evicted page
225 *
226 * Calculates and evaluates the refault distance of the previously
227 * evicted page in the context of the zone it was allocated in.
228 *
229 * Returns %true if the page should be activated, %false otherwise.
230 */
231bool workingset_refault(void *shadow)
232{
233	unsigned long refault_distance;
234	struct zone *zone;
235
236	unpack_shadow(shadow, &zone, &refault_distance);
237	inc_zone_state(zone, WORKINGSET_REFAULT);
238
239	if (refault_distance <= zone_page_state(zone, NR_ACTIVE_FILE)) {
240		inc_zone_state(zone, WORKINGSET_ACTIVATE);
241		return true;
242	}
243	return false;
244}
245
246/**
247 * workingset_activation - note a page activation
248 * @page: page that is being activated
249 */
250void workingset_activation(struct page *page)
251{
252	atomic_long_inc(&page_zone(page)->inactive_age);
253}
254
255/*
256 * Shadow entries reflect the share of the working set that does not
257 * fit into memory, so their number depends on the access pattern of
258 * the workload.  In most cases, they will refault or get reclaimed
259 * along with the inode, but a (malicious) workload that streams
260 * through files with a total size several times that of available
261 * memory, while preventing the inodes from being reclaimed, can
262 * create excessive amounts of shadow nodes.  To keep a lid on this,
263 * track shadow nodes and reclaim them when they grow way past the
264 * point where they would still be useful.
265 */
266
267struct list_lru workingset_shadow_nodes;
268
269static unsigned long count_shadow_nodes(struct shrinker *shrinker,
270					struct shrink_control *sc)
271{
272	unsigned long shadow_nodes;
273	unsigned long max_nodes;
274	unsigned long pages;
275
276	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
277	local_irq_disable();
278	shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid);
279	local_irq_enable();
280
281	pages = node_present_pages(sc->nid);
282	/*
283	 * Active cache pages are limited to 50% of memory, and shadow
284	 * entries that represent a refault distance bigger than that
285	 * do not have any effect.  Limit the number of shadow nodes
286	 * such that shadow entries do not exceed the number of active
287	 * cache pages, assuming a worst-case node population density
288	 * of 1/8th on average.
289	 *
290	 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
291	 * each, this will reclaim shadow entries when they consume
292	 * ~2% of available memory:
293	 *
294	 * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
295	 */
296	max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
297
298	if (shadow_nodes <= max_nodes)
299		return 0;
300
301	return shadow_nodes - max_nodes;
302}
303
304static enum lru_status shadow_lru_isolate(struct list_head *item,
305					  spinlock_t *lru_lock,
306					  void *arg)
307{
308	struct address_space *mapping;
309	struct radix_tree_node *node;
310	unsigned int i;
311	int ret;
312
313	/*
314	 * Page cache insertions and deletions synchroneously maintain
315	 * the shadow node LRU under the mapping->tree_lock and the
316	 * lru_lock.  Because the page cache tree is emptied before
317	 * the inode can be destroyed, holding the lru_lock pins any
318	 * address_space that has radix tree nodes on the LRU.
319	 *
320	 * We can then safely transition to the mapping->tree_lock to
321	 * pin only the address_space of the particular node we want
322	 * to reclaim, take the node off-LRU, and drop the lru_lock.
323	 */
324
325	node = container_of(item, struct radix_tree_node, private_list);
326	mapping = node->private_data;
327
328	/* Coming from the list, invert the lock order */
329	if (!spin_trylock(&mapping->tree_lock)) {
330		spin_unlock(lru_lock);
331		ret = LRU_RETRY;
332		goto out;
333	}
334
335	list_del_init(item);
336	spin_unlock(lru_lock);
337
338	/*
339	 * The nodes should only contain one or more shadow entries,
340	 * no pages, so we expect to be able to remove them all and
341	 * delete and free the empty node afterwards.
342	 */
343
344	BUG_ON(!node->count);
345	BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
346
347	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
348		if (node->slots[i]) {
349			BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
350			node->slots[i] = NULL;
351			BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
352			node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
353			BUG_ON(!mapping->nrshadows);
354			mapping->nrshadows--;
355		}
356	}
357	BUG_ON(node->count);
358	inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
359	if (!__radix_tree_delete_node(&mapping->page_tree, node))
360		BUG();
361
362	spin_unlock(&mapping->tree_lock);
363	ret = LRU_REMOVED_RETRY;
364out:
365	local_irq_enable();
366	cond_resched();
367	local_irq_disable();
368	spin_lock(lru_lock);
369	return ret;
370}
371
372static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
373				       struct shrink_control *sc)
374{
375	unsigned long ret;
376
377	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
378	local_irq_disable();
379	ret =  list_lru_walk_node(&workingset_shadow_nodes, sc->nid,
380				  shadow_lru_isolate, NULL, &sc->nr_to_scan);
381	local_irq_enable();
382	return ret;
383}
384
385static struct shrinker workingset_shadow_shrinker = {
386	.count_objects = count_shadow_nodes,
387	.scan_objects = scan_shadow_nodes,
388	.seeks = DEFAULT_SEEKS,
389	.flags = SHRINKER_NUMA_AWARE,
390};
391
392/*
393 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
394 * mapping->tree_lock.
395 */
396static struct lock_class_key shadow_nodes_key;
397
398static int __init workingset_init(void)
399{
400	int ret;
401
402	ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
403	if (ret)
404		goto err;
405	ret = register_shrinker(&workingset_shadow_shrinker);
406	if (ret)
407		goto err_list_lru;
408	return 0;
409err_list_lru:
410	list_lru_destroy(&workingset_shadow_nodes);
411err:
412	return ret;
413}
414module_init(workingset_init);