Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  4 * Authors: David Chinner and Glauber Costa
  5 *
  6 * Generic LRU infrastructure
  7 */
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/mm.h>
 11#include <linux/list_lru.h>
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/memcontrol.h>
 15#include "slab.h"
 16
 17#ifdef CONFIG_MEMCG_KMEM
 18static LIST_HEAD(list_lrus);
 19static DEFINE_MUTEX(list_lrus_mutex);
 20
 21static void list_lru_register(struct list_lru *lru)
 22{
 23	mutex_lock(&list_lrus_mutex);
 24	list_add(&lru->list, &list_lrus);
 25	mutex_unlock(&list_lrus_mutex);
 26}
 27
 28static void list_lru_unregister(struct list_lru *lru)
 29{
 30	mutex_lock(&list_lrus_mutex);
 31	list_del(&lru->list);
 32	mutex_unlock(&list_lrus_mutex);
 33}
 
 
 
 
 34
 35static int lru_shrinker_id(struct list_lru *lru)
 36{
 37	return lru->shrinker_id;
 38}
 
 39
 
 40static inline bool list_lru_memcg_aware(struct list_lru *lru)
 41{
 42	return lru->memcg_aware;
 
 
 
 
 43}
 44
 45static inline struct list_lru_one *
 46list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 47{
 48	struct list_lru_memcg *memcg_lrus;
 49	/*
 50	 * Either lock or RCU protects the array of per cgroup lists
 51	 * from relocation (see memcg_update_list_lru_node).
 52	 */
 53	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
 54					   lockdep_is_held(&nlru->lock));
 55	if (memcg_lrus && idx >= 0)
 56		return memcg_lrus->lru[idx];
 57	return &nlru->lru;
 58}
 59
 
 
 
 
 
 
 
 
 
 
 60static inline struct list_lru_one *
 61list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
 62		   struct mem_cgroup **memcg_ptr)
 63{
 64	struct list_lru_one *l = &nlru->lru;
 65	struct mem_cgroup *memcg = NULL;
 66
 67	if (!nlru->memcg_lrus)
 68		goto out;
 69
 70	memcg = mem_cgroup_from_obj(ptr);
 71	if (!memcg)
 72		goto out;
 73
 74	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
 75out:
 76	if (memcg_ptr)
 77		*memcg_ptr = memcg;
 78	return l;
 79}
 80#else
 81static void list_lru_register(struct list_lru *lru)
 82{
 83}
 84
 85static void list_lru_unregister(struct list_lru *lru)
 86{
 87}
 88
 89static int lru_shrinker_id(struct list_lru *lru)
 90{
 91	return -1;
 92}
 93
 94static inline bool list_lru_memcg_aware(struct list_lru *lru)
 95{
 96	return false;
 97}
 98
 99static inline struct list_lru_one *
100list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
101{
102	return &nlru->lru;
103}
104
105static inline struct list_lru_one *
106list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
107		   struct mem_cgroup **memcg_ptr)
108{
109	if (memcg_ptr)
110		*memcg_ptr = NULL;
111	return &nlru->lru;
112}
113#endif /* CONFIG_MEMCG_KMEM */
114
115bool list_lru_add(struct list_lru *lru, struct list_head *item)
116{
117	int nid = page_to_nid(virt_to_page(item));
118	struct list_lru_node *nlru = &lru->node[nid];
119	struct mem_cgroup *memcg;
120	struct list_lru_one *l;
121
122	spin_lock(&nlru->lock);
123	if (list_empty(item)) {
124		l = list_lru_from_kmem(nlru, item, &memcg);
125		list_add_tail(item, &l->list);
126		/* Set shrinker bit if the first element was added */
127		if (!l->nr_items++)
128			memcg_set_shrinker_bit(memcg, nid,
129					       lru_shrinker_id(lru));
130		nlru->nr_items++;
131		spin_unlock(&nlru->lock);
132		return true;
133	}
134	spin_unlock(&nlru->lock);
135	return false;
136}
137EXPORT_SYMBOL_GPL(list_lru_add);
138
139bool list_lru_del(struct list_lru *lru, struct list_head *item)
140{
141	int nid = page_to_nid(virt_to_page(item));
142	struct list_lru_node *nlru = &lru->node[nid];
143	struct list_lru_one *l;
144
145	spin_lock(&nlru->lock);
146	if (!list_empty(item)) {
147		l = list_lru_from_kmem(nlru, item, NULL);
148		list_del_init(item);
149		l->nr_items--;
150		nlru->nr_items--;
151		spin_unlock(&nlru->lock);
152		return true;
153	}
154	spin_unlock(&nlru->lock);
155	return false;
156}
157EXPORT_SYMBOL_GPL(list_lru_del);
158
159void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
160{
161	list_del_init(item);
162	list->nr_items--;
163}
164EXPORT_SYMBOL_GPL(list_lru_isolate);
165
166void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
167			   struct list_head *head)
168{
169	list_move(item, head);
170	list->nr_items--;
171}
172EXPORT_SYMBOL_GPL(list_lru_isolate_move);
173
174unsigned long list_lru_count_one(struct list_lru *lru,
175				 int nid, struct mem_cgroup *memcg)
176{
177	struct list_lru_node *nlru = &lru->node[nid];
178	struct list_lru_one *l;
179	unsigned long count;
180
181	rcu_read_lock();
182	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
183	count = READ_ONCE(l->nr_items);
184	rcu_read_unlock();
185
186	return count;
187}
 
 
 
 
 
 
188EXPORT_SYMBOL_GPL(list_lru_count_one);
189
190unsigned long list_lru_count_node(struct list_lru *lru, int nid)
191{
192	struct list_lru_node *nlru;
 
193
194	nlru = &lru->node[nid];
195	return nlru->nr_items;
 
 
 
 
196}
197EXPORT_SYMBOL_GPL(list_lru_count_node);
198
199static unsigned long
200__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
201		    list_lru_walk_cb isolate, void *cb_arg,
202		    unsigned long *nr_to_walk)
203{
204
 
205	struct list_lru_one *l;
206	struct list_head *item, *n;
207	unsigned long isolated = 0;
208
 
209	l = list_lru_from_memcg_idx(nlru, memcg_idx);
210restart:
211	list_for_each_safe(item, n, &l->list) {
212		enum lru_status ret;
213
214		/*
215		 * decrement nr_to_walk first so that we don't livelock if we
216		 * get stuck on large numbers of LRU_RETRY items
217		 */
218		if (!*nr_to_walk)
219			break;
220		--*nr_to_walk;
221
222		ret = isolate(item, l, &nlru->lock, cb_arg);
223		switch (ret) {
224		case LRU_REMOVED_RETRY:
225			assert_spin_locked(&nlru->lock);
226			fallthrough;
227		case LRU_REMOVED:
228			isolated++;
229			nlru->nr_items--;
230			/*
231			 * If the lru lock has been dropped, our list
232			 * traversal is now invalid and so we have to
233			 * restart from scratch.
234			 */
235			if (ret == LRU_REMOVED_RETRY)
236				goto restart;
237			break;
238		case LRU_ROTATE:
239			list_move_tail(item, &l->list);
240			break;
241		case LRU_SKIP:
242			break;
243		case LRU_RETRY:
244			/*
245			 * The lru lock has been dropped, our list traversal is
246			 * now invalid and so we have to restart from scratch.
247			 */
248			assert_spin_locked(&nlru->lock);
249			goto restart;
250		default:
251			BUG();
252		}
253	}
 
 
254	return isolated;
255}
256
257unsigned long
258list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
259		  list_lru_walk_cb isolate, void *cb_arg,
260		  unsigned long *nr_to_walk)
261{
262	struct list_lru_node *nlru = &lru->node[nid];
263	unsigned long ret;
264
265	spin_lock(&nlru->lock);
266	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
267				  nr_to_walk);
268	spin_unlock(&nlru->lock);
269	return ret;
270}
271EXPORT_SYMBOL_GPL(list_lru_walk_one);
272
273unsigned long
274list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
275		      list_lru_walk_cb isolate, void *cb_arg,
276		      unsigned long *nr_to_walk)
277{
278	struct list_lru_node *nlru = &lru->node[nid];
279	unsigned long ret;
280
281	spin_lock_irq(&nlru->lock);
282	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
283				  nr_to_walk);
284	spin_unlock_irq(&nlru->lock);
285	return ret;
286}
287
288unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
289				 list_lru_walk_cb isolate, void *cb_arg,
290				 unsigned long *nr_to_walk)
291{
292	long isolated = 0;
293	int memcg_idx;
294
295	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
296				      nr_to_walk);
297	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
298		for_each_memcg_cache_index(memcg_idx) {
299			struct list_lru_node *nlru = &lru->node[nid];
300
301			spin_lock(&nlru->lock);
302			isolated += __list_lru_walk_one(nlru, memcg_idx,
303							isolate, cb_arg,
304							nr_to_walk);
305			spin_unlock(&nlru->lock);
306
307			if (*nr_to_walk <= 0)
308				break;
309		}
310	}
311	return isolated;
312}
313EXPORT_SYMBOL_GPL(list_lru_walk_node);
314
315static void init_one_lru(struct list_lru_one *l)
316{
317	INIT_LIST_HEAD(&l->list);
318	l->nr_items = 0;
319}
320
321#ifdef CONFIG_MEMCG_KMEM
322static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
323					  int begin, int end)
324{
325	int i;
326
327	for (i = begin; i < end; i++)
328		kfree(memcg_lrus->lru[i]);
329}
330
331static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
332				      int begin, int end)
333{
334	int i;
335
336	for (i = begin; i < end; i++) {
337		struct list_lru_one *l;
338
339		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
340		if (!l)
341			goto fail;
342
343		init_one_lru(l);
344		memcg_lrus->lru[i] = l;
345	}
346	return 0;
347fail:
348	__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
349	return -ENOMEM;
350}
351
352static int memcg_init_list_lru_node(struct list_lru_node *nlru)
353{
354	struct list_lru_memcg *memcg_lrus;
355	int size = memcg_nr_cache_ids;
356
357	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
358			      size * sizeof(void *), GFP_KERNEL);
359	if (!memcg_lrus)
360		return -ENOMEM;
361
362	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
363		kvfree(memcg_lrus);
364		return -ENOMEM;
365	}
366	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
367
368	return 0;
369}
370
371static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
372{
373	struct list_lru_memcg *memcg_lrus;
374	/*
375	 * This is called when shrinker has already been unregistered,
376	 * and nobody can use it. So, there is no need to use kvfree_rcu_local().
377	 */
378	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
379	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
380	kvfree(memcg_lrus);
381}
382
383static void kvfree_rcu_local(struct rcu_head *head)
384{
385	struct list_lru_memcg *mlru;
386
387	mlru = container_of(head, struct list_lru_memcg, rcu);
388	kvfree(mlru);
389}
390
391static int memcg_update_list_lru_node(struct list_lru_node *nlru,
392				      int old_size, int new_size)
393{
394	struct list_lru_memcg *old, *new;
395
396	BUG_ON(old_size > new_size);
397
398	old = rcu_dereference_protected(nlru->memcg_lrus,
399					lockdep_is_held(&list_lrus_mutex));
400	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
401	if (!new)
402		return -ENOMEM;
403
404	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
405		kvfree(new);
406		return -ENOMEM;
407	}
408
409	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
410
411	/*
412	 * The locking below allows readers that hold nlru->lock avoid taking
413	 * rcu_read_lock (see list_lru_from_memcg_idx).
414	 *
415	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
416	 * we have to use IRQ-safe primitives here to avoid deadlock.
417	 */
418	spin_lock_irq(&nlru->lock);
419	rcu_assign_pointer(nlru->memcg_lrus, new);
420	spin_unlock_irq(&nlru->lock);
421
422	call_rcu(&old->rcu, kvfree_rcu_local);
423	return 0;
424}
425
426static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
427					      int old_size, int new_size)
428{
429	struct list_lru_memcg *memcg_lrus;
430
431	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
432					       lockdep_is_held(&list_lrus_mutex));
433	/* do not bother shrinking the array back to the old size, because we
434	 * cannot handle allocation failures here */
435	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
436}
437
438static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
439{
440	int i;
441
442	lru->memcg_aware = memcg_aware;
443
444	if (!memcg_aware)
445		return 0;
446
447	for_each_node(i) {
448		if (memcg_init_list_lru_node(&lru->node[i]))
449			goto fail;
450	}
451	return 0;
452fail:
453	for (i = i - 1; i >= 0; i--) {
454		if (!lru->node[i].memcg_lrus)
455			continue;
456		memcg_destroy_list_lru_node(&lru->node[i]);
457	}
458	return -ENOMEM;
459}
460
461static void memcg_destroy_list_lru(struct list_lru *lru)
462{
463	int i;
464
465	if (!list_lru_memcg_aware(lru))
466		return;
467
468	for_each_node(i)
469		memcg_destroy_list_lru_node(&lru->node[i]);
470}
471
472static int memcg_update_list_lru(struct list_lru *lru,
473				 int old_size, int new_size)
474{
475	int i;
476
477	if (!list_lru_memcg_aware(lru))
478		return 0;
479
480	for_each_node(i) {
481		if (memcg_update_list_lru_node(&lru->node[i],
482					       old_size, new_size))
483			goto fail;
484	}
485	return 0;
486fail:
487	for (i = i - 1; i >= 0; i--) {
488		if (!lru->node[i].memcg_lrus)
489			continue;
490
491		memcg_cancel_update_list_lru_node(&lru->node[i],
492						  old_size, new_size);
493	}
494	return -ENOMEM;
495}
496
497static void memcg_cancel_update_list_lru(struct list_lru *lru,
498					 int old_size, int new_size)
499{
500	int i;
501
502	if (!list_lru_memcg_aware(lru))
503		return;
504
505	for_each_node(i)
506		memcg_cancel_update_list_lru_node(&lru->node[i],
507						  old_size, new_size);
508}
509
510int memcg_update_all_list_lrus(int new_size)
511{
512	int ret = 0;
513	struct list_lru *lru;
514	int old_size = memcg_nr_cache_ids;
515
516	mutex_lock(&list_lrus_mutex);
517	list_for_each_entry(lru, &list_lrus, list) {
518		ret = memcg_update_list_lru(lru, old_size, new_size);
519		if (ret)
520			goto fail;
521	}
522out:
523	mutex_unlock(&list_lrus_mutex);
524	return ret;
525fail:
526	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
527		memcg_cancel_update_list_lru(lru, old_size, new_size);
528	goto out;
529}
530
531static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
532				      int src_idx, struct mem_cgroup *dst_memcg)
533{
534	struct list_lru_node *nlru = &lru->node[nid];
535	int dst_idx = dst_memcg->kmemcg_id;
536	struct list_lru_one *src, *dst;
537	bool set;
538
539	/*
540	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
541	 * we have to use IRQ-safe primitives here to avoid deadlock.
542	 */
543	spin_lock_irq(&nlru->lock);
544
545	src = list_lru_from_memcg_idx(nlru, src_idx);
546	dst = list_lru_from_memcg_idx(nlru, dst_idx);
547
548	list_splice_init(&src->list, &dst->list);
549	set = (!dst->nr_items && src->nr_items);
550	dst->nr_items += src->nr_items;
551	if (set)
552		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
553	src->nr_items = 0;
554
555	spin_unlock_irq(&nlru->lock);
556}
557
558static void memcg_drain_list_lru(struct list_lru *lru,
559				 int src_idx, struct mem_cgroup *dst_memcg)
560{
561	int i;
562
563	if (!list_lru_memcg_aware(lru))
564		return;
565
566	for_each_node(i)
567		memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
568}
569
570void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
571{
572	struct list_lru *lru;
573
574	mutex_lock(&list_lrus_mutex);
575	list_for_each_entry(lru, &list_lrus, list)
576		memcg_drain_list_lru(lru, src_idx, dst_memcg);
577	mutex_unlock(&list_lrus_mutex);
578}
579#else
580static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
581{
582	return 0;
583}
584
585static void memcg_destroy_list_lru(struct list_lru *lru)
586{
587}
588#endif /* CONFIG_MEMCG_KMEM */
589
590int __list_lru_init(struct list_lru *lru, bool memcg_aware,
591		    struct lock_class_key *key, struct shrinker *shrinker)
592{
593	int i;
 
594	int err = -ENOMEM;
595
596#ifdef CONFIG_MEMCG_KMEM
597	if (shrinker)
598		lru->shrinker_id = shrinker->id;
599	else
600		lru->shrinker_id = -1;
601#endif
602	memcg_get_cache_ids();
603
604	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
605	if (!lru->node)
606		goto out;
607
608	for_each_node(i) {
609		spin_lock_init(&lru->node[i].lock);
610		if (key)
611			lockdep_set_class(&lru->node[i].lock, key);
612		init_one_lru(&lru->node[i].lru);
613	}
614
615	err = memcg_init_list_lru(lru, memcg_aware);
616	if (err) {
617		kfree(lru->node);
618		/* Do this so a list_lru_destroy() doesn't crash: */
619		lru->node = NULL;
620		goto out;
621	}
622
623	list_lru_register(lru);
624out:
625	memcg_put_cache_ids();
626	return err;
627}
628EXPORT_SYMBOL_GPL(__list_lru_init);
629
630void list_lru_destroy(struct list_lru *lru)
631{
632	/* Already destroyed or not yet initialized? */
633	if (!lru->node)
634		return;
635
636	memcg_get_cache_ids();
637
638	list_lru_unregister(lru);
639
640	memcg_destroy_list_lru(lru);
641	kfree(lru->node);
642	lru->node = NULL;
643
644#ifdef CONFIG_MEMCG_KMEM
645	lru->shrinker_id = -1;
646#endif
647	memcg_put_cache_ids();
648}
649EXPORT_SYMBOL_GPL(list_lru_destroy);
v4.6
 
  1/*
  2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3 * Authors: David Chinner and Glauber Costa
  4 *
  5 * Generic LRU infrastructure
  6 */
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/mm.h>
 10#include <linux/list_lru.h>
 11#include <linux/slab.h>
 12#include <linux/mutex.h>
 13#include <linux/memcontrol.h>
 
 14
 15#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 16static LIST_HEAD(list_lrus);
 17static DEFINE_MUTEX(list_lrus_mutex);
 18
 19static void list_lru_register(struct list_lru *lru)
 20{
 21	mutex_lock(&list_lrus_mutex);
 22	list_add(&lru->list, &list_lrus);
 23	mutex_unlock(&list_lrus_mutex);
 24}
 25
 26static void list_lru_unregister(struct list_lru *lru)
 27{
 28	mutex_lock(&list_lrus_mutex);
 29	list_del(&lru->list);
 30	mutex_unlock(&list_lrus_mutex);
 31}
 32#else
 33static void list_lru_register(struct list_lru *lru)
 34{
 35}
 36
 37static void list_lru_unregister(struct list_lru *lru)
 38{
 
 39}
 40#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 41
 42#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 43static inline bool list_lru_memcg_aware(struct list_lru *lru)
 44{
 45	/*
 46	 * This needs node 0 to be always present, even
 47	 * in the systems supporting sparse numa ids.
 48	 */
 49	return !!lru->node[0].memcg_lrus;
 50}
 51
 52static inline struct list_lru_one *
 53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 54{
 
 55	/*
 56	 * The lock protects the array of per cgroup lists from relocation
 57	 * (see memcg_update_list_lru_node).
 58	 */
 59	lockdep_assert_held(&nlru->lock);
 60	if (nlru->memcg_lrus && idx >= 0)
 61		return nlru->memcg_lrus->lru[idx];
 62
 63	return &nlru->lru;
 64}
 65
 66static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
 67{
 68	struct page *page;
 69
 70	if (!memcg_kmem_enabled())
 71		return NULL;
 72	page = virt_to_head_page(ptr);
 73	return page->mem_cgroup;
 74}
 75
 76static inline struct list_lru_one *
 77list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
 
 78{
 79	struct mem_cgroup *memcg;
 
 80
 81	if (!nlru->memcg_lrus)
 82		return &nlru->lru;
 83
 84	memcg = mem_cgroup_from_kmem(ptr);
 85	if (!memcg)
 86		return &nlru->lru;
 87
 88	return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
 
 
 
 
 89}
 90#else
 
 
 
 
 
 
 
 
 
 
 
 
 
 91static inline bool list_lru_memcg_aware(struct list_lru *lru)
 92{
 93	return false;
 94}
 95
 96static inline struct list_lru_one *
 97list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 98{
 99	return &nlru->lru;
100}
101
102static inline struct list_lru_one *
103list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
 
104{
 
 
105	return &nlru->lru;
106}
107#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
108
109bool list_lru_add(struct list_lru *lru, struct list_head *item)
110{
111	int nid = page_to_nid(virt_to_page(item));
112	struct list_lru_node *nlru = &lru->node[nid];
 
113	struct list_lru_one *l;
114
115	spin_lock(&nlru->lock);
116	if (list_empty(item)) {
117		l = list_lru_from_kmem(nlru, item);
118		list_add_tail(item, &l->list);
119		l->nr_items++;
 
 
 
 
120		spin_unlock(&nlru->lock);
121		return true;
122	}
123	spin_unlock(&nlru->lock);
124	return false;
125}
126EXPORT_SYMBOL_GPL(list_lru_add);
127
128bool list_lru_del(struct list_lru *lru, struct list_head *item)
129{
130	int nid = page_to_nid(virt_to_page(item));
131	struct list_lru_node *nlru = &lru->node[nid];
132	struct list_lru_one *l;
133
134	spin_lock(&nlru->lock);
135	if (!list_empty(item)) {
136		l = list_lru_from_kmem(nlru, item);
137		list_del_init(item);
138		l->nr_items--;
 
139		spin_unlock(&nlru->lock);
140		return true;
141	}
142	spin_unlock(&nlru->lock);
143	return false;
144}
145EXPORT_SYMBOL_GPL(list_lru_del);
146
147void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
148{
149	list_del_init(item);
150	list->nr_items--;
151}
152EXPORT_SYMBOL_GPL(list_lru_isolate);
153
154void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
155			   struct list_head *head)
156{
157	list_move(item, head);
158	list->nr_items--;
159}
160EXPORT_SYMBOL_GPL(list_lru_isolate_move);
161
162static unsigned long __list_lru_count_one(struct list_lru *lru,
163					  int nid, int memcg_idx)
164{
165	struct list_lru_node *nlru = &lru->node[nid];
166	struct list_lru_one *l;
167	unsigned long count;
168
169	spin_lock(&nlru->lock);
170	l = list_lru_from_memcg_idx(nlru, memcg_idx);
171	count = l->nr_items;
172	spin_unlock(&nlru->lock);
173
174	return count;
175}
176
177unsigned long list_lru_count_one(struct list_lru *lru,
178				 int nid, struct mem_cgroup *memcg)
179{
180	return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
181}
182EXPORT_SYMBOL_GPL(list_lru_count_one);
183
184unsigned long list_lru_count_node(struct list_lru *lru, int nid)
185{
186	long count = 0;
187	int memcg_idx;
188
189	count += __list_lru_count_one(lru, nid, -1);
190	if (list_lru_memcg_aware(lru)) {
191		for_each_memcg_cache_index(memcg_idx)
192			count += __list_lru_count_one(lru, nid, memcg_idx);
193	}
194	return count;
195}
196EXPORT_SYMBOL_GPL(list_lru_count_node);
197
198static unsigned long
199__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
200		    list_lru_walk_cb isolate, void *cb_arg,
201		    unsigned long *nr_to_walk)
202{
203
204	struct list_lru_node *nlru = &lru->node[nid];
205	struct list_lru_one *l;
206	struct list_head *item, *n;
207	unsigned long isolated = 0;
208
209	spin_lock(&nlru->lock);
210	l = list_lru_from_memcg_idx(nlru, memcg_idx);
211restart:
212	list_for_each_safe(item, n, &l->list) {
213		enum lru_status ret;
214
215		/*
216		 * decrement nr_to_walk first so that we don't livelock if we
217		 * get stuck on large numbesr of LRU_RETRY items
218		 */
219		if (!*nr_to_walk)
220			break;
221		--*nr_to_walk;
222
223		ret = isolate(item, l, &nlru->lock, cb_arg);
224		switch (ret) {
225		case LRU_REMOVED_RETRY:
226			assert_spin_locked(&nlru->lock);
 
227		case LRU_REMOVED:
228			isolated++;
 
229			/*
230			 * If the lru lock has been dropped, our list
231			 * traversal is now invalid and so we have to
232			 * restart from scratch.
233			 */
234			if (ret == LRU_REMOVED_RETRY)
235				goto restart;
236			break;
237		case LRU_ROTATE:
238			list_move_tail(item, &l->list);
239			break;
240		case LRU_SKIP:
241			break;
242		case LRU_RETRY:
243			/*
244			 * The lru lock has been dropped, our list traversal is
245			 * now invalid and so we have to restart from scratch.
246			 */
247			assert_spin_locked(&nlru->lock);
248			goto restart;
249		default:
250			BUG();
251		}
252	}
253
254	spin_unlock(&nlru->lock);
255	return isolated;
256}
257
258unsigned long
259list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
260		  list_lru_walk_cb isolate, void *cb_arg,
261		  unsigned long *nr_to_walk)
262{
263	return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
264				   isolate, cb_arg, nr_to_walk);
 
 
 
 
 
 
265}
266EXPORT_SYMBOL_GPL(list_lru_walk_one);
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
269				 list_lru_walk_cb isolate, void *cb_arg,
270				 unsigned long *nr_to_walk)
271{
272	long isolated = 0;
273	int memcg_idx;
274
275	isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
276					nr_to_walk);
277	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
278		for_each_memcg_cache_index(memcg_idx) {
279			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
280						isolate, cb_arg, nr_to_walk);
 
 
 
 
 
 
281			if (*nr_to_walk <= 0)
282				break;
283		}
284	}
285	return isolated;
286}
287EXPORT_SYMBOL_GPL(list_lru_walk_node);
288
289static void init_one_lru(struct list_lru_one *l)
290{
291	INIT_LIST_HEAD(&l->list);
292	l->nr_items = 0;
293}
294
295#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
296static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
297					  int begin, int end)
298{
299	int i;
300
301	for (i = begin; i < end; i++)
302		kfree(memcg_lrus->lru[i]);
303}
304
305static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
306				      int begin, int end)
307{
308	int i;
309
310	for (i = begin; i < end; i++) {
311		struct list_lru_one *l;
312
313		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
314		if (!l)
315			goto fail;
316
317		init_one_lru(l);
318		memcg_lrus->lru[i] = l;
319	}
320	return 0;
321fail:
322	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
323	return -ENOMEM;
324}
325
326static int memcg_init_list_lru_node(struct list_lru_node *nlru)
327{
 
328	int size = memcg_nr_cache_ids;
329
330	nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
331	if (!nlru->memcg_lrus)
 
332		return -ENOMEM;
333
334	if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
335		kfree(nlru->memcg_lrus);
336		return -ENOMEM;
337	}
 
338
339	return 0;
340}
341
342static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
343{
344	__memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
345	kfree(nlru->memcg_lrus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346}
347
348static int memcg_update_list_lru_node(struct list_lru_node *nlru,
349				      int old_size, int new_size)
350{
351	struct list_lru_memcg *old, *new;
352
353	BUG_ON(old_size > new_size);
354
355	old = nlru->memcg_lrus;
356	new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
 
357	if (!new)
358		return -ENOMEM;
359
360	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
361		kfree(new);
362		return -ENOMEM;
363	}
364
365	memcpy(new, old, old_size * sizeof(void *));
366
367	/*
368	 * The lock guarantees that we won't race with a reader
369	 * (see list_lru_from_memcg_idx).
370	 *
371	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
372	 * we have to use IRQ-safe primitives here to avoid deadlock.
373	 */
374	spin_lock_irq(&nlru->lock);
375	nlru->memcg_lrus = new;
376	spin_unlock_irq(&nlru->lock);
377
378	kfree(old);
379	return 0;
380}
381
382static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
383					      int old_size, int new_size)
384{
 
 
 
 
385	/* do not bother shrinking the array back to the old size, because we
386	 * cannot handle allocation failures here */
387	__memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
388}
389
390static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
391{
392	int i;
393
 
 
394	if (!memcg_aware)
395		return 0;
396
397	for_each_node(i) {
398		if (memcg_init_list_lru_node(&lru->node[i]))
399			goto fail;
400	}
401	return 0;
402fail:
403	for (i = i - 1; i >= 0; i--) {
404		if (!lru->node[i].memcg_lrus)
405			continue;
406		memcg_destroy_list_lru_node(&lru->node[i]);
407	}
408	return -ENOMEM;
409}
410
411static void memcg_destroy_list_lru(struct list_lru *lru)
412{
413	int i;
414
415	if (!list_lru_memcg_aware(lru))
416		return;
417
418	for_each_node(i)
419		memcg_destroy_list_lru_node(&lru->node[i]);
420}
421
422static int memcg_update_list_lru(struct list_lru *lru,
423				 int old_size, int new_size)
424{
425	int i;
426
427	if (!list_lru_memcg_aware(lru))
428		return 0;
429
430	for_each_node(i) {
431		if (memcg_update_list_lru_node(&lru->node[i],
432					       old_size, new_size))
433			goto fail;
434	}
435	return 0;
436fail:
437	for (i = i - 1; i >= 0; i--) {
438		if (!lru->node[i].memcg_lrus)
439			continue;
440
441		memcg_cancel_update_list_lru_node(&lru->node[i],
442						  old_size, new_size);
443	}
444	return -ENOMEM;
445}
446
447static void memcg_cancel_update_list_lru(struct list_lru *lru,
448					 int old_size, int new_size)
449{
450	int i;
451
452	if (!list_lru_memcg_aware(lru))
453		return;
454
455	for_each_node(i)
456		memcg_cancel_update_list_lru_node(&lru->node[i],
457						  old_size, new_size);
458}
459
460int memcg_update_all_list_lrus(int new_size)
461{
462	int ret = 0;
463	struct list_lru *lru;
464	int old_size = memcg_nr_cache_ids;
465
466	mutex_lock(&list_lrus_mutex);
467	list_for_each_entry(lru, &list_lrus, list) {
468		ret = memcg_update_list_lru(lru, old_size, new_size);
469		if (ret)
470			goto fail;
471	}
472out:
473	mutex_unlock(&list_lrus_mutex);
474	return ret;
475fail:
476	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
477		memcg_cancel_update_list_lru(lru, old_size, new_size);
478	goto out;
479}
480
481static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
482				      int src_idx, int dst_idx)
483{
 
 
484	struct list_lru_one *src, *dst;
 
485
486	/*
487	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
488	 * we have to use IRQ-safe primitives here to avoid deadlock.
489	 */
490	spin_lock_irq(&nlru->lock);
491
492	src = list_lru_from_memcg_idx(nlru, src_idx);
493	dst = list_lru_from_memcg_idx(nlru, dst_idx);
494
495	list_splice_init(&src->list, &dst->list);
 
496	dst->nr_items += src->nr_items;
 
 
497	src->nr_items = 0;
498
499	spin_unlock_irq(&nlru->lock);
500}
501
502static void memcg_drain_list_lru(struct list_lru *lru,
503				 int src_idx, int dst_idx)
504{
505	int i;
506
507	if (!list_lru_memcg_aware(lru))
508		return;
509
510	for_each_node(i)
511		memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
512}
513
514void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
515{
516	struct list_lru *lru;
517
518	mutex_lock(&list_lrus_mutex);
519	list_for_each_entry(lru, &list_lrus, list)
520		memcg_drain_list_lru(lru, src_idx, dst_idx);
521	mutex_unlock(&list_lrus_mutex);
522}
523#else
524static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
525{
526	return 0;
527}
528
529static void memcg_destroy_list_lru(struct list_lru *lru)
530{
531}
532#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
533
534int __list_lru_init(struct list_lru *lru, bool memcg_aware,
535		    struct lock_class_key *key)
536{
537	int i;
538	size_t size = sizeof(*lru->node) * nr_node_ids;
539	int err = -ENOMEM;
540
 
 
 
 
 
 
541	memcg_get_cache_ids();
542
543	lru->node = kzalloc(size, GFP_KERNEL);
544	if (!lru->node)
545		goto out;
546
547	for_each_node(i) {
548		spin_lock_init(&lru->node[i].lock);
549		if (key)
550			lockdep_set_class(&lru->node[i].lock, key);
551		init_one_lru(&lru->node[i].lru);
552	}
553
554	err = memcg_init_list_lru(lru, memcg_aware);
555	if (err) {
556		kfree(lru->node);
 
 
557		goto out;
558	}
559
560	list_lru_register(lru);
561out:
562	memcg_put_cache_ids();
563	return err;
564}
565EXPORT_SYMBOL_GPL(__list_lru_init);
566
567void list_lru_destroy(struct list_lru *lru)
568{
569	/* Already destroyed or not yet initialized? */
570	if (!lru->node)
571		return;
572
573	memcg_get_cache_ids();
574
575	list_lru_unregister(lru);
576
577	memcg_destroy_list_lru(lru);
578	kfree(lru->node);
579	lru->node = NULL;
580
 
 
 
581	memcg_put_cache_ids();
582}
583EXPORT_SYMBOL_GPL(list_lru_destroy);