Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3 * Authors: David Chinner and Glauber Costa
  4 *
  5 * Generic LRU infrastructure
  6 */
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/mm.h>
 10#include <linux/list_lru.h>
 11#include <linux/slab.h>
 12#include <linux/mutex.h>
 13#include <linux/memcontrol.h>
 
 
 14
 15#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 16static LIST_HEAD(list_lrus);
 17static DEFINE_MUTEX(list_lrus_mutex);
 18
 
 
 
 
 
 19static void list_lru_register(struct list_lru *lru)
 20{
 
 
 
 21	mutex_lock(&list_lrus_mutex);
 22	list_add(&lru->list, &list_lrus);
 23	mutex_unlock(&list_lrus_mutex);
 24}
 25
 26static void list_lru_unregister(struct list_lru *lru)
 27{
 
 
 
 28	mutex_lock(&list_lrus_mutex);
 29	list_del(&lru->list);
 30	mutex_unlock(&list_lrus_mutex);
 31}
 32#else
 33static void list_lru_register(struct list_lru *lru)
 34{
 35}
 36
 37static void list_lru_unregister(struct list_lru *lru)
 38{
 
 39}
 40#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 41
 42#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 43static inline bool list_lru_memcg_aware(struct list_lru *lru)
 44{
 45	/*
 46	 * This needs node 0 to be always present, even
 47	 * in the systems supporting sparse numa ids.
 48	 */
 49	return !!lru->node[0].memcg_lrus;
 
 50}
 51
 52static inline struct list_lru_one *
 53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 
 54{
 55	struct list_lru_memcg *memcg_lrus;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56	/*
 57	 * Either lock or RCU protects the array of per cgroup lists
 58	 * from relocation (see memcg_update_list_lru_node).
 59	 */
 60	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
 61					   lockdep_is_held(&nlru->lock));
 62	if (memcg_lrus && idx >= 0)
 63		return memcg_lrus->lru[idx];
 64	return &nlru->lru;
 
 
 65}
 66
 67static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
 68{
 69	struct page *page;
 70
 71	if (!memcg_kmem_enabled())
 72		return NULL;
 73	page = virt_to_head_page(ptr);
 74	return page->mem_cgroup;
 75}
 76
 77static inline struct list_lru_one *
 78list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
 79{
 80	struct mem_cgroup *memcg;
 81
 82	if (!nlru->memcg_lrus)
 83		return &nlru->lru;
 84
 85	memcg = mem_cgroup_from_kmem(ptr);
 86	if (!memcg)
 87		return &nlru->lru;
 88
 89	return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
 
 
 90}
 91#else
 92static inline bool list_lru_memcg_aware(struct list_lru *lru)
 93{
 94	return false;
 95}
 96
 97static inline struct list_lru_one *
 98list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 99{
100	return &nlru->lru;
101}
102
103static inline struct list_lru_one *
104list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
 
105{
106	return &nlru->lru;
 
 
 
 
 
 
 
107}
108#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
109
110bool list_lru_add(struct list_lru *lru, struct list_head *item)
 
 
 
 
 
 
 
 
 
 
 
111{
112	int nid = page_to_nid(virt_to_page(item));
113	struct list_lru_node *nlru = &lru->node[nid];
114	struct list_lru_one *l;
115
116	spin_lock(&nlru->lock);
 
 
117	if (list_empty(item)) {
118		l = list_lru_from_kmem(nlru, item);
119		list_add_tail(item, &l->list);
120		l->nr_items++;
121		nlru->nr_items++;
122		spin_unlock(&nlru->lock);
 
 
123		return true;
124	}
125	spin_unlock(&nlru->lock);
126	return false;
127}
128EXPORT_SYMBOL_GPL(list_lru_add);
129
130bool list_lru_del(struct list_lru *lru, struct list_head *item)
131{
 
132	int nid = page_to_nid(virt_to_page(item));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133	struct list_lru_node *nlru = &lru->node[nid];
134	struct list_lru_one *l;
135
136	spin_lock(&nlru->lock);
 
137	if (!list_empty(item)) {
138		l = list_lru_from_kmem(nlru, item);
139		list_del_init(item);
140		l->nr_items--;
141		nlru->nr_items--;
142		spin_unlock(&nlru->lock);
143		return true;
144	}
145	spin_unlock(&nlru->lock);
146	return false;
147}
148EXPORT_SYMBOL_GPL(list_lru_del);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
150void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151{
152	list_del_init(item);
153	list->nr_items--;
154}
155EXPORT_SYMBOL_GPL(list_lru_isolate);
156
157void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158			   struct list_head *head)
159{
160	list_move(item, head);
161	list->nr_items--;
162}
163EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164
165static unsigned long __list_lru_count_one(struct list_lru *lru,
166					  int nid, int memcg_idx)
167{
168	struct list_lru_node *nlru = &lru->node[nid];
169	struct list_lru_one *l;
170	unsigned long count;
171
172	rcu_read_lock();
173	l = list_lru_from_memcg_idx(nlru, memcg_idx);
174	count = l->nr_items;
175	rcu_read_unlock();
176
177	return count;
178}
179
180unsigned long list_lru_count_one(struct list_lru *lru,
181				 int nid, struct mem_cgroup *memcg)
182{
183	return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
184}
185EXPORT_SYMBOL_GPL(list_lru_count_one);
186
187unsigned long list_lru_count_node(struct list_lru *lru, int nid)
188{
189	struct list_lru_node *nlru;
190
191	nlru = &lru->node[nid];
192	return nlru->nr_items;
193}
194EXPORT_SYMBOL_GPL(list_lru_count_node);
195
196static unsigned long
197__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
198		    list_lru_walk_cb isolate, void *cb_arg,
199		    unsigned long *nr_to_walk)
200{
201
202	struct list_lru_node *nlru = &lru->node[nid];
203	struct list_lru_one *l;
204	struct list_head *item, *n;
205	unsigned long isolated = 0;
206
207	spin_lock(&nlru->lock);
208	l = list_lru_from_memcg_idx(nlru, memcg_idx);
209restart:
 
 
 
210	list_for_each_safe(item, n, &l->list) {
211		enum lru_status ret;
212
213		/*
214		 * decrement nr_to_walk first so that we don't livelock if we
215		 * get stuck on large numbesr of LRU_RETRY items
216		 */
217		if (!*nr_to_walk)
218			break;
219		--*nr_to_walk;
220
221		ret = isolate(item, l, &nlru->lock, cb_arg);
222		switch (ret) {
 
 
 
 
 
 
223		case LRU_REMOVED_RETRY:
224			assert_spin_locked(&nlru->lock);
225			/* fall through */
226		case LRU_REMOVED:
227			isolated++;
228			nlru->nr_items--;
229			/*
230			 * If the lru lock has been dropped, our list
231			 * traversal is now invalid and so we have to
232			 * restart from scratch.
233			 */
234			if (ret == LRU_REMOVED_RETRY)
235				goto restart;
236			break;
237		case LRU_ROTATE:
238			list_move_tail(item, &l->list);
239			break;
240		case LRU_SKIP:
241			break;
242		case LRU_RETRY:
243			/*
244			 * The lru lock has been dropped, our list traversal is
245			 * now invalid and so we have to restart from scratch.
246			 */
247			assert_spin_locked(&nlru->lock);
248			goto restart;
249		default:
250			BUG();
251		}
252	}
253
254	spin_unlock(&nlru->lock);
255	return isolated;
256}
257
258unsigned long
259list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
260		  list_lru_walk_cb isolate, void *cb_arg,
261		  unsigned long *nr_to_walk)
262{
263	return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
264				   isolate, cb_arg, nr_to_walk);
265}
266EXPORT_SYMBOL_GPL(list_lru_walk_one);
267
 
 
 
 
 
 
 
 
 
268unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
269				 list_lru_walk_cb isolate, void *cb_arg,
270				 unsigned long *nr_to_walk)
271{
272	long isolated = 0;
273	int memcg_idx;
274
275	isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
276					nr_to_walk);
 
 
277	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
278		for_each_memcg_cache_index(memcg_idx) {
279			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
280						isolate, cb_arg, nr_to_walk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281			if (*nr_to_walk <= 0)
282				break;
283		}
284	}
 
 
285	return isolated;
286}
287EXPORT_SYMBOL_GPL(list_lru_walk_node);
288
289static void init_one_lru(struct list_lru_one *l)
290{
291	INIT_LIST_HEAD(&l->list);
 
292	l->nr_items = 0;
 
 
 
 
293}
294
295#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
296static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
297					  int begin, int end)
298{
299	int i;
300
301	for (i = begin; i < end; i++)
302		kfree(memcg_lrus->lru[i]);
303}
304
305static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
306				      int begin, int end)
307{
308	int i;
309
310	for (i = begin; i < end; i++) {
311		struct list_lru_one *l;
312
313		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
314		if (!l)
315			goto fail;
316
317		init_one_lru(l);
318		memcg_lrus->lru[i] = l;
319	}
320	return 0;
321fail:
322	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
323	return -ENOMEM;
324}
325
326static int memcg_init_list_lru_node(struct list_lru_node *nlru)
327{
328	struct list_lru_memcg *memcg_lrus;
329	int size = memcg_nr_cache_ids;
330
331	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
332			      size * sizeof(void *), GFP_KERNEL);
333	if (!memcg_lrus)
334		return -ENOMEM;
335
336	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
337		kvfree(memcg_lrus);
338		return -ENOMEM;
339	}
340	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
341
342	return 0;
343}
344
345static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
346{
347	struct list_lru_memcg *memcg_lrus;
348	/*
349	 * This is called when shrinker has already been unregistered,
350	 * and nobody can use it. So, there is no need to use kvfree_rcu().
351	 */
352	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
353	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
354	kvfree(memcg_lrus);
355}
356
357static void kvfree_rcu(struct rcu_head *head)
358{
 
359	struct list_lru_memcg *mlru;
360
361	mlru = container_of(head, struct list_lru_memcg, rcu);
362	kvfree(mlru);
363}
364
365static int memcg_update_list_lru_node(struct list_lru_node *nlru,
366				      int old_size, int new_size)
367{
368	struct list_lru_memcg *old, *new;
369
370	BUG_ON(old_size > new_size);
371
372	old = rcu_dereference_protected(nlru->memcg_lrus,
373					lockdep_is_held(&list_lrus_mutex));
374	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
375	if (!new)
376		return -ENOMEM;
377
378	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
379		kvfree(new);
380		return -ENOMEM;
381	}
382
383	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
384
385	/*
386	 * The locking below allows readers that hold nlru->lock avoid taking
387	 * rcu_read_lock (see list_lru_from_memcg_idx).
388	 *
389	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
390	 * we have to use IRQ-safe primitives here to avoid deadlock.
391	 */
392	spin_lock_irq(&nlru->lock);
393	rcu_assign_pointer(nlru->memcg_lrus, new);
394	spin_unlock_irq(&nlru->lock);
395
396	call_rcu(&old->rcu, kvfree_rcu);
397	return 0;
398}
399
400static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
401					      int old_size, int new_size)
402{
403	struct list_lru_memcg *memcg_lrus;
404
405	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
406					       lockdep_is_held(&list_lrus_mutex));
407	/* do not bother shrinking the array back to the old size, because we
408	 * cannot handle allocation failures here */
409	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
410}
411
412static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
413{
414	int i;
415
416	if (!memcg_aware)
417		return 0;
418
419	for_each_node(i) {
420		if (memcg_init_list_lru_node(&lru->node[i]))
421			goto fail;
422	}
423	return 0;
424fail:
425	for (i = i - 1; i >= 0; i--) {
426		if (!lru->node[i].memcg_lrus)
427			continue;
428		memcg_destroy_list_lru_node(&lru->node[i]);
429	}
430	return -ENOMEM;
431}
432
433static void memcg_destroy_list_lru(struct list_lru *lru)
434{
435	int i;
 
436
437	if (!list_lru_memcg_aware(lru))
438		return;
439
440	for_each_node(i)
441		memcg_destroy_list_lru_node(&lru->node[i]);
 
 
 
 
442}
443
444static int memcg_update_list_lru(struct list_lru *lru,
445				 int old_size, int new_size)
 
446{
447	int i;
 
448
449	if (!list_lru_memcg_aware(lru))
450		return 0;
 
451
452	for_each_node(i) {
453		if (memcg_update_list_lru_node(&lru->node[i],
454					       old_size, new_size))
455			goto fail;
 
456	}
457	return 0;
458fail:
459	for (i = i - 1; i >= 0; i--) {
460		if (!lru->node[i].memcg_lrus)
461			continue;
462
463		memcg_cancel_update_list_lru_node(&lru->node[i],
464						  old_size, new_size);
465	}
466	return -ENOMEM;
467}
468
469static void memcg_cancel_update_list_lru(struct list_lru *lru,
470					 int old_size, int new_size)
471{
 
472	int i;
473
474	if (!list_lru_memcg_aware(lru))
475		return;
 
 
476
477	for_each_node(i)
478		memcg_cancel_update_list_lru_node(&lru->node[i],
479						  old_size, new_size);
480}
 
 
 
 
 
481
482int memcg_update_all_list_lrus(int new_size)
483{
484	int ret = 0;
485	struct list_lru *lru;
486	int old_size = memcg_nr_cache_ids;
 
 
487
488	mutex_lock(&list_lrus_mutex);
489	list_for_each_entry(lru, &list_lrus, list) {
490		ret = memcg_update_list_lru(lru, old_size, new_size);
491		if (ret)
492			goto fail;
 
493	}
494out:
495	mutex_unlock(&list_lrus_mutex);
496	return ret;
497fail:
498	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
499		memcg_cancel_update_list_lru(lru, old_size, new_size);
500	goto out;
501}
502
503static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
504				      int src_idx, int dst_idx)
505{
506	struct list_lru_one *src, *dst;
507
508	/*
509	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
510	 * we have to use IRQ-safe primitives here to avoid deadlock.
511	 */
512	spin_lock_irq(&nlru->lock);
513
514	src = list_lru_from_memcg_idx(nlru, src_idx);
515	dst = list_lru_from_memcg_idx(nlru, dst_idx);
516
517	list_splice_init(&src->list, &dst->list);
518	dst->nr_items += src->nr_items;
519	src->nr_items = 0;
520
521	spin_unlock_irq(&nlru->lock);
522}
523
524static void memcg_drain_list_lru(struct list_lru *lru,
525				 int src_idx, int dst_idx)
526{
527	int i;
 
 
 
528
529	if (!list_lru_memcg_aware(lru))
530		return;
531
532	for_each_node(i)
533		memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
534}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
536void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
537{
538	struct list_lru *lru;
 
 
 
 
 
 
 
 
 
 
 
 
 
539
540	mutex_lock(&list_lrus_mutex);
541	list_for_each_entry(lru, &list_lrus, list)
542		memcg_drain_list_lru(lru, src_idx, dst_idx);
543	mutex_unlock(&list_lrus_mutex);
544}
545#else
546static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
547{
548	return 0;
549}
550
551static void memcg_destroy_list_lru(struct list_lru *lru)
552{
553}
554#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
555
556int __list_lru_init(struct list_lru *lru, bool memcg_aware,
557		    struct lock_class_key *key)
558{
559	int i;
560	size_t size = sizeof(*lru->node) * nr_node_ids;
561	int err = -ENOMEM;
562
563	memcg_get_cache_ids();
 
 
 
 
 
 
 
 
564
565	lru->node = kzalloc(size, GFP_KERNEL);
566	if (!lru->node)
567		goto out;
568
569	for_each_node(i) {
570		spin_lock_init(&lru->node[i].lock);
571		if (key)
572			lockdep_set_class(&lru->node[i].lock, key);
573		init_one_lru(&lru->node[i].lru);
574	}
575
576	err = memcg_init_list_lru(lru, memcg_aware);
577	if (err) {
578		kfree(lru->node);
579		/* Do this so a list_lru_destroy() doesn't crash: */
580		lru->node = NULL;
581		goto out;
582	}
583
 
584	list_lru_register(lru);
585out:
586	memcg_put_cache_ids();
587	return err;
588}
589EXPORT_SYMBOL_GPL(__list_lru_init);
590
591void list_lru_destroy(struct list_lru *lru)
592{
593	/* Already destroyed or not yet initialized? */
594	if (!lru->node)
595		return;
596
597	memcg_get_cache_ids();
598
599	list_lru_unregister(lru);
600
601	memcg_destroy_list_lru(lru);
602	kfree(lru->node);
603	lru->node = NULL;
604
605	memcg_put_cache_ids();
 
 
606}
607EXPORT_SYMBOL_GPL(list_lru_destroy);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  4 * Authors: David Chinner and Glauber Costa
  5 *
  6 * Generic LRU infrastructure
  7 */
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/mm.h>
 11#include <linux/list_lru.h>
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/memcontrol.h>
 15#include "slab.h"
 16#include "internal.h"
 17
 18#ifdef CONFIG_MEMCG
 19static LIST_HEAD(memcg_list_lrus);
 20static DEFINE_MUTEX(list_lrus_mutex);
 21
 22static inline bool list_lru_memcg_aware(struct list_lru *lru)
 23{
 24	return lru->memcg_aware;
 25}
 26
 27static void list_lru_register(struct list_lru *lru)
 28{
 29	if (!list_lru_memcg_aware(lru))
 30		return;
 31
 32	mutex_lock(&list_lrus_mutex);
 33	list_add(&lru->list, &memcg_list_lrus);
 34	mutex_unlock(&list_lrus_mutex);
 35}
 36
 37static void list_lru_unregister(struct list_lru *lru)
 38{
 39	if (!list_lru_memcg_aware(lru))
 40		return;
 41
 42	mutex_lock(&list_lrus_mutex);
 43	list_del(&lru->list);
 44	mutex_unlock(&list_lrus_mutex);
 45}
 
 
 
 
 46
 47static int lru_shrinker_id(struct list_lru *lru)
 48{
 49	return lru->shrinker_id;
 50}
 
 51
 52static inline struct list_lru_one *
 53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 54{
 55	if (list_lru_memcg_aware(lru) && idx >= 0) {
 56		struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
 57
 58		return mlru ? &mlru->node[nid] : NULL;
 59	}
 60	return &lru->node[nid].lru;
 61}
 62
 63static inline struct list_lru_one *
 64lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 65		       bool irq, bool skip_empty)
 66{
 67	struct list_lru_one *l;
 68	long nr_items;
 69
 70	rcu_read_lock();
 71again:
 72	l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
 73	if (likely(l)) {
 74		if (irq)
 75			spin_lock_irq(&l->lock);
 76		else
 77			spin_lock(&l->lock);
 78		nr_items = READ_ONCE(l->nr_items);
 79		if (likely(nr_items != LONG_MIN)) {
 80			rcu_read_unlock();
 81			return l;
 82		}
 83		if (irq)
 84			spin_unlock_irq(&l->lock);
 85		else
 86			spin_unlock(&l->lock);
 87	}
 88	/*
 89	 * Caller may simply bail out if raced with reparenting or
 90	 * may iterate through the list_lru and expect empty slots.
 91	 */
 92	if (skip_empty) {
 93		rcu_read_unlock();
 94		return NULL;
 95	}
 96	VM_WARN_ON(!css_is_dying(&memcg->css));
 97	memcg = parent_mem_cgroup(memcg);
 98	goto again;
 99}
100
101static inline void unlock_list_lru(struct list_lru_one *l, bool irq_off)
102{
103	if (irq_off)
104		spin_unlock_irq(&l->lock);
105	else
106		spin_unlock(&l->lock);
 
 
107}
108#else
109static void list_lru_register(struct list_lru *lru)
 
110{
111}
 
 
 
112
113static void list_lru_unregister(struct list_lru *lru)
114{
115}
116
117static int lru_shrinker_id(struct list_lru *lru)
118{
119	return -1;
120}
121
122static inline bool list_lru_memcg_aware(struct list_lru *lru)
123{
124	return false;
125}
126
127static inline struct list_lru_one *
128list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
129{
130	return &lru->node[nid].lru;
131}
132
133static inline struct list_lru_one *
134lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
135		       bool irq, bool skip_empty)
136{
137	struct list_lru_one *l = &lru->node[nid].lru;
138
139	if (irq)
140		spin_lock_irq(&l->lock);
141	else
142		spin_lock(&l->lock);
143
144	return l;
145}
 
146
147static inline void unlock_list_lru(struct list_lru_one *l, bool irq_off)
148{
149	if (irq_off)
150		spin_unlock_irq(&l->lock);
151	else
152		spin_unlock(&l->lock);
153}
154#endif /* CONFIG_MEMCG */
155
156/* The caller must ensure the memcg lifetime. */
157bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
158		  struct mem_cgroup *memcg)
159{
 
160	struct list_lru_node *nlru = &lru->node[nid];
161	struct list_lru_one *l;
162
163	l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
164	if (!l)
165		return false;
166	if (list_empty(item)) {
 
167		list_add_tail(item, &l->list);
168		/* Set shrinker bit if the first element was added */
169		if (!l->nr_items++)
170			set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
171		unlock_list_lru(l, false);
172		atomic_long_inc(&nlru->nr_items);
173		return true;
174	}
175	unlock_list_lru(l, false);
176	return false;
177}
 
178
179bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
180{
181	bool ret;
182	int nid = page_to_nid(virt_to_page(item));
183
184	if (list_lru_memcg_aware(lru)) {
185		rcu_read_lock();
186		ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
187		rcu_read_unlock();
188	} else {
189		ret = list_lru_add(lru, item, nid, NULL);
190	}
191
192	return ret;
193}
194EXPORT_SYMBOL_GPL(list_lru_add_obj);
195
196/* The caller must ensure the memcg lifetime. */
197bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
198		  struct mem_cgroup *memcg)
199{
200	struct list_lru_node *nlru = &lru->node[nid];
201	struct list_lru_one *l;
202	l = lock_list_lru_of_memcg(lru, nid, memcg, false, false);
203	if (!l)
204		return false;
205	if (!list_empty(item)) {
 
206		list_del_init(item);
207		l->nr_items--;
208		unlock_list_lru(l, false);
209		atomic_long_dec(&nlru->nr_items);
210		return true;
211	}
212	unlock_list_lru(l, false);
213	return false;
214}
215
216bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
217{
218	bool ret;
219	int nid = page_to_nid(virt_to_page(item));
220
221	if (list_lru_memcg_aware(lru)) {
222		rcu_read_lock();
223		ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
224		rcu_read_unlock();
225	} else {
226		ret = list_lru_del(lru, item, nid, NULL);
227	}
228
229	return ret;
230}
231EXPORT_SYMBOL_GPL(list_lru_del_obj);
232
233void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
234{
235	list_del_init(item);
236	list->nr_items--;
237}
238EXPORT_SYMBOL_GPL(list_lru_isolate);
239
240void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
241			   struct list_head *head)
242{
243	list_move(item, head);
244	list->nr_items--;
245}
246EXPORT_SYMBOL_GPL(list_lru_isolate_move);
247
248unsigned long list_lru_count_one(struct list_lru *lru,
249				 int nid, struct mem_cgroup *memcg)
250{
 
251	struct list_lru_one *l;
252	long count;
253
254	rcu_read_lock();
255	l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
256	count = l ? READ_ONCE(l->nr_items) : 0;
257	rcu_read_unlock();
258
259	if (unlikely(count < 0))
260		count = 0;
261
262	return count;
 
 
 
263}
264EXPORT_SYMBOL_GPL(list_lru_count_one);
265
266unsigned long list_lru_count_node(struct list_lru *lru, int nid)
267{
268	struct list_lru_node *nlru;
269
270	nlru = &lru->node[nid];
271	return atomic_long_read(&nlru->nr_items);
272}
273EXPORT_SYMBOL_GPL(list_lru_count_node);
274
275static unsigned long
276__list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
277		    list_lru_walk_cb isolate, void *cb_arg,
278		    unsigned long *nr_to_walk, bool irq_off)
279{
 
280	struct list_lru_node *nlru = &lru->node[nid];
281	struct list_lru_one *l = NULL;
282	struct list_head *item, *n;
283	unsigned long isolated = 0;
284
 
 
285restart:
286	l = lock_list_lru_of_memcg(lru, nid, memcg, irq_off, true);
287	if (!l)
288		return isolated;
289	list_for_each_safe(item, n, &l->list) {
290		enum lru_status ret;
291
292		/*
293		 * decrement nr_to_walk first so that we don't livelock if we
294		 * get stuck on large numbers of LRU_RETRY items
295		 */
296		if (!*nr_to_walk)
297			break;
298		--*nr_to_walk;
299
300		ret = isolate(item, l, cb_arg);
301		switch (ret) {
302		/*
303		 * LRU_RETRY, LRU_REMOVED_RETRY and LRU_STOP will drop the lru
304		 * lock. List traversal will have to restart from scratch.
305		 */
306		case LRU_RETRY:
307			goto restart;
308		case LRU_REMOVED_RETRY:
309			fallthrough;
 
310		case LRU_REMOVED:
311			isolated++;
312			atomic_long_dec(&nlru->nr_items);
 
 
 
 
 
313			if (ret == LRU_REMOVED_RETRY)
314				goto restart;
315			break;
316		case LRU_ROTATE:
317			list_move_tail(item, &l->list);
318			break;
319		case LRU_SKIP:
320			break;
321		case LRU_STOP:
322			goto out;
 
 
 
 
 
323		default:
324			BUG();
325		}
326	}
327	unlock_list_lru(l, irq_off);
328out:
329	return isolated;
330}
331
332unsigned long
333list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
334		  list_lru_walk_cb isolate, void *cb_arg,
335		  unsigned long *nr_to_walk)
336{
337	return __list_lru_walk_one(lru, nid, memcg, isolate,
338				   cb_arg, nr_to_walk, false);
339}
340EXPORT_SYMBOL_GPL(list_lru_walk_one);
341
342unsigned long
343list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
344		      list_lru_walk_cb isolate, void *cb_arg,
345		      unsigned long *nr_to_walk)
346{
347	return __list_lru_walk_one(lru, nid, memcg, isolate,
348				   cb_arg, nr_to_walk, true);
349}
350
351unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
352				 list_lru_walk_cb isolate, void *cb_arg,
353				 unsigned long *nr_to_walk)
354{
355	long isolated = 0;
 
356
357	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
358				      nr_to_walk);
359
360#ifdef CONFIG_MEMCG
361	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
362		struct list_lru_memcg *mlru;
363		struct mem_cgroup *memcg;
364		unsigned long index;
365
366		xa_for_each(&lru->xa, index, mlru) {
367			rcu_read_lock();
368			memcg = mem_cgroup_from_id(index);
369			if (!mem_cgroup_tryget(memcg)) {
370				rcu_read_unlock();
371				continue;
372			}
373			rcu_read_unlock();
374			isolated += __list_lru_walk_one(lru, nid, memcg,
375							isolate, cb_arg,
376							nr_to_walk, false);
377			mem_cgroup_put(memcg);
378
379			if (*nr_to_walk <= 0)
380				break;
381		}
382	}
383#endif
384
385	return isolated;
386}
387EXPORT_SYMBOL_GPL(list_lru_walk_node);
388
389static void init_one_lru(struct list_lru *lru, struct list_lru_one *l)
390{
391	INIT_LIST_HEAD(&l->list);
392	spin_lock_init(&l->lock);
393	l->nr_items = 0;
394#ifdef CONFIG_LOCKDEP
395	if (lru->key)
396		lockdep_set_class(&l->lock, lru->key);
397#endif
398}
399
400#ifdef CONFIG_MEMCG
401static struct list_lru_memcg *memcg_init_list_lru_one(struct list_lru *lru, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402{
403	int nid;
404	struct list_lru_memcg *mlru;
405
406	mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
407	if (!mlru)
408		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409
410	for_each_node(nid)
411		init_one_lru(lru, &mlru->node[nid]);
 
 
 
 
 
 
 
 
412
413	return mlru;
 
414}
415
416static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
 
417{
418	if (memcg_aware)
419		xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
420	lru->memcg_aware = memcg_aware;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421}
422
423static void memcg_destroy_list_lru(struct list_lru *lru)
424{
425	XA_STATE(xas, &lru->xa, 0);
426	struct list_lru_memcg *mlru;
427
428	if (!list_lru_memcg_aware(lru))
429		return;
430
431	xas_lock_irq(&xas);
432	xas_for_each(&xas, mlru, ULONG_MAX) {
433		kfree(mlru);
434		xas_store(&xas, NULL);
435	}
436	xas_unlock_irq(&xas);
437}
438
439static void memcg_reparent_list_lru_one(struct list_lru *lru, int nid,
440					struct list_lru_one *src,
441					struct mem_cgroup *dst_memcg)
442{
443	int dst_idx = dst_memcg->kmemcg_id;
444	struct list_lru_one *dst;
445
446	spin_lock_irq(&src->lock);
447	dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
448	spin_lock_nested(&dst->lock, SINGLE_DEPTH_NESTING);
449
450	list_splice_init(&src->list, &dst->list);
451	if (src->nr_items) {
452		WARN_ON(src->nr_items < 0);
453		dst->nr_items += src->nr_items;
454		set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
455	}
456	/* Mark the list_lru_one dead */
457	src->nr_items = LONG_MIN;
 
 
 
458
459	spin_unlock(&dst->lock);
460	spin_unlock_irq(&src->lock);
 
 
461}
462
463void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
 
464{
465	struct list_lru *lru;
466	int i;
467
468	mutex_lock(&list_lrus_mutex);
469	list_for_each_entry(lru, &memcg_list_lrus, list) {
470		struct list_lru_memcg *mlru;
471		XA_STATE(xas, &lru->xa, memcg->kmemcg_id);
472
473		/*
474		 * Lock the Xarray to ensure no on going list_lru_memcg
475		 * allocation and further allocation will see css_is_dying().
476		 */
477		xas_lock_irq(&xas);
478		mlru = xas_store(&xas, NULL);
479		xas_unlock_irq(&xas);
480		if (!mlru)
481			continue;
482
483		/*
484		 * With Xarray value set to NULL, holding the lru lock below
485		 * prevents list_lru_{add,del,isolate} from touching the lru,
486		 * safe to reparent.
487		 */
488		for_each_node(i)
489			memcg_reparent_list_lru_one(lru, i, &mlru->node[i], parent);
490
491		/*
492		 * Here all list_lrus corresponding to the cgroup are guaranteed
493		 * to remain empty, we can safely free this lru, any further
494		 * memcg_list_lru_alloc() call will simply bail out.
495		 */
496		kvfree_rcu(mlru, rcu);
497	}
 
498	mutex_unlock(&list_lrus_mutex);
 
 
 
 
 
499}
500
501static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
502					    struct list_lru *lru)
503{
504	int idx = memcg->kmemcg_id;
505
506	return idx < 0 || xa_load(&lru->xa, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
507}
508
509int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
510			 gfp_t gfp)
511{
512	unsigned long flags;
513	struct list_lru_memcg *mlru;
514	struct mem_cgroup *pos, *parent;
515	XA_STATE(xas, &lru->xa, 0);
516
517	if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
518		return 0;
519
520	gfp &= GFP_RECLAIM_MASK;
521	/*
522	 * Because the list_lru can be reparented to the parent cgroup's
523	 * list_lru, we should make sure that this cgroup and all its
524	 * ancestors have allocated list_lru_memcg.
525	 */
526	do {
527		/*
528		 * Keep finding the farest parent that wasn't populated
529		 * until found memcg itself.
530		 */
531		pos = memcg;
532		parent = parent_mem_cgroup(pos);
533		while (!memcg_list_lru_allocated(parent, lru)) {
534			pos = parent;
535			parent = parent_mem_cgroup(pos);
536		}
537
538		mlru = memcg_init_list_lru_one(lru, gfp);
539		if (!mlru)
540			return -ENOMEM;
541		xas_set(&xas, pos->kmemcg_id);
542		do {
543			xas_lock_irqsave(&xas, flags);
544			if (!xas_load(&xas) && !css_is_dying(&pos->css)) {
545				xas_store(&xas, mlru);
546				if (!xas_error(&xas))
547					mlru = NULL;
548			}
549			xas_unlock_irqrestore(&xas, flags);
550		} while (xas_nomem(&xas, gfp));
551		if (mlru)
552			kfree(mlru);
553	} while (pos != memcg && !css_is_dying(&pos->css));
554
555	return xas_error(&xas);
 
 
 
556}
557#else
558static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
559{
 
560}
561
562static void memcg_destroy_list_lru(struct list_lru *lru)
563{
564}
565#endif /* CONFIG_MEMCG */
566
567int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct shrinker *shrinker)
 
568{
569	int i;
 
 
570
571#ifdef CONFIG_MEMCG
572	if (shrinker)
573		lru->shrinker_id = shrinker->id;
574	else
575		lru->shrinker_id = -1;
576
577	if (mem_cgroup_kmem_disabled())
578		memcg_aware = false;
579#endif
580
581	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
582	if (!lru->node)
583		return -ENOMEM;
 
 
 
 
 
 
 
584
585	for_each_node(i)
586		init_one_lru(lru, &lru->node[i].lru);
 
 
 
 
 
587
588	memcg_init_list_lru(lru, memcg_aware);
589	list_lru_register(lru);
590
591	return 0;
 
592}
593EXPORT_SYMBOL_GPL(__list_lru_init);
594
595void list_lru_destroy(struct list_lru *lru)
596{
597	/* Already destroyed or not yet initialized? */
598	if (!lru->node)
599		return;
600
 
 
601	list_lru_unregister(lru);
602
603	memcg_destroy_list_lru(lru);
604	kfree(lru->node);
605	lru->node = NULL;
606
607#ifdef CONFIG_MEMCG
608	lru->shrinker_id = -1;
609#endif
610}
611EXPORT_SYMBOL_GPL(list_lru_destroy);