Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3 * Authors: David Chinner and Glauber Costa
  4 *
  5 * Generic LRU infrastructure
  6 */
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/mm.h>
 10#include <linux/list_lru.h>
 11#include <linux/slab.h>
 12#include <linux/mutex.h>
 13#include <linux/memcontrol.h>
 
 
 14
 15#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 16static LIST_HEAD(list_lrus);
 17static DEFINE_MUTEX(list_lrus_mutex);
 18
 
 
 
 
 
 19static void list_lru_register(struct list_lru *lru)
 20{
 
 
 
 21	mutex_lock(&list_lrus_mutex);
 22	list_add(&lru->list, &list_lrus);
 23	mutex_unlock(&list_lrus_mutex);
 24}
 25
 26static void list_lru_unregister(struct list_lru *lru)
 27{
 
 
 
 28	mutex_lock(&list_lrus_mutex);
 29	list_del(&lru->list);
 30	mutex_unlock(&list_lrus_mutex);
 31}
 32#else
 33static void list_lru_register(struct list_lru *lru)
 34{
 35}
 36
 37static void list_lru_unregister(struct list_lru *lru)
 38{
 
 39}
 40#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 41
 42#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 43static inline bool list_lru_memcg_aware(struct list_lru *lru)
 44{
 45	/*
 46	 * This needs node 0 to be always present, even
 47	 * in the systems supporting sparse numa ids.
 48	 */
 49	return !!lru->node[0].memcg_lrus;
 50}
 51
 52static inline struct list_lru_one *
 53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 
 
 
 
 54{
 55	struct list_lru_memcg *memcg_lrus;
 56	/*
 57	 * Either lock or RCU protects the array of per cgroup lists
 58	 * from relocation (see memcg_update_list_lru_node).
 59	 */
 60	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
 61					   lockdep_is_held(&nlru->lock));
 62	if (memcg_lrus && idx >= 0)
 63		return memcg_lrus->lru[idx];
 64	return &nlru->lru;
 65}
 66
 67static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
 68{
 69	struct page *page;
 70
 71	if (!memcg_kmem_enabled())
 72		return NULL;
 73	page = virt_to_head_page(ptr);
 74	return page->mem_cgroup;
 75}
 76
 77static inline struct list_lru_one *
 78list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
 79{
 80	struct mem_cgroup *memcg;
 81
 82	if (!nlru->memcg_lrus)
 83		return &nlru->lru;
 84
 85	memcg = mem_cgroup_from_kmem(ptr);
 86	if (!memcg)
 87		return &nlru->lru;
 88
 89	return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
 90}
 91#else
 92static inline bool list_lru_memcg_aware(struct list_lru *lru)
 93{
 94	return false;
 95}
 96
 97static inline struct list_lru_one *
 98list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 99{
100	return &nlru->lru;
101}
 
102
103static inline struct list_lru_one *
104list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
105{
106	return &nlru->lru;
107}
108#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
109
110bool list_lru_add(struct list_lru *lru, struct list_head *item)
111{
112	int nid = page_to_nid(virt_to_page(item));
113	struct list_lru_node *nlru = &lru->node[nid];
114	struct list_lru_one *l;
115
116	spin_lock(&nlru->lock);
117	if (list_empty(item)) {
118		l = list_lru_from_kmem(nlru, item);
119		list_add_tail(item, &l->list);
120		l->nr_items++;
 
 
121		nlru->nr_items++;
122		spin_unlock(&nlru->lock);
123		return true;
124	}
125	spin_unlock(&nlru->lock);
126	return false;
127}
128EXPORT_SYMBOL_GPL(list_lru_add);
129
130bool list_lru_del(struct list_lru *lru, struct list_head *item)
131{
132	int nid = page_to_nid(virt_to_page(item));
 
 
 
 
 
 
 
 
 
 
133	struct list_lru_node *nlru = &lru->node[nid];
134	struct list_lru_one *l;
135
136	spin_lock(&nlru->lock);
137	if (!list_empty(item)) {
138		l = list_lru_from_kmem(nlru, item);
139		list_del_init(item);
140		l->nr_items--;
141		nlru->nr_items--;
142		spin_unlock(&nlru->lock);
143		return true;
144	}
145	spin_unlock(&nlru->lock);
146	return false;
147}
148EXPORT_SYMBOL_GPL(list_lru_del);
149
 
 
 
 
 
 
 
 
 
 
150void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151{
152	list_del_init(item);
153	list->nr_items--;
154}
155EXPORT_SYMBOL_GPL(list_lru_isolate);
156
157void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158			   struct list_head *head)
159{
160	list_move(item, head);
161	list->nr_items--;
162}
163EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164
165static unsigned long __list_lru_count_one(struct list_lru *lru,
166					  int nid, int memcg_idx)
167{
168	struct list_lru_node *nlru = &lru->node[nid];
169	struct list_lru_one *l;
170	unsigned long count;
171
172	rcu_read_lock();
173	l = list_lru_from_memcg_idx(nlru, memcg_idx);
174	count = l->nr_items;
175	rcu_read_unlock();
176
177	return count;
 
 
 
 
178}
 
179
180unsigned long list_lru_count_one(struct list_lru *lru,
181				 int nid, struct mem_cgroup *memcg)
182{
183	return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
 
 
 
 
 
 
 
 
 
 
 
184}
185EXPORT_SYMBOL_GPL(list_lru_count_one);
186
187unsigned long list_lru_count_node(struct list_lru *lru, int nid)
188{
189	struct list_lru_node *nlru;
190
191	nlru = &lru->node[nid];
192	return nlru->nr_items;
193}
194EXPORT_SYMBOL_GPL(list_lru_count_node);
195
196static unsigned long
197__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
198		    list_lru_walk_cb isolate, void *cb_arg,
199		    unsigned long *nr_to_walk)
200{
201
202	struct list_lru_node *nlru = &lru->node[nid];
203	struct list_lru_one *l;
204	struct list_head *item, *n;
205	unsigned long isolated = 0;
206
207	spin_lock(&nlru->lock);
208	l = list_lru_from_memcg_idx(nlru, memcg_idx);
209restart:
 
 
 
 
210	list_for_each_safe(item, n, &l->list) {
211		enum lru_status ret;
212
213		/*
214		 * decrement nr_to_walk first so that we don't livelock if we
215		 * get stuck on large numbesr of LRU_RETRY items
216		 */
217		if (!*nr_to_walk)
218			break;
219		--*nr_to_walk;
220
221		ret = isolate(item, l, &nlru->lock, cb_arg);
222		switch (ret) {
223		case LRU_REMOVED_RETRY:
224			assert_spin_locked(&nlru->lock);
225			/* fall through */
226		case LRU_REMOVED:
227			isolated++;
228			nlru->nr_items--;
229			/*
230			 * If the lru lock has been dropped, our list
231			 * traversal is now invalid and so we have to
232			 * restart from scratch.
233			 */
234			if (ret == LRU_REMOVED_RETRY)
235				goto restart;
236			break;
237		case LRU_ROTATE:
238			list_move_tail(item, &l->list);
239			break;
240		case LRU_SKIP:
241			break;
242		case LRU_RETRY:
243			/*
244			 * The lru lock has been dropped, our list traversal is
245			 * now invalid and so we have to restart from scratch.
246			 */
247			assert_spin_locked(&nlru->lock);
248			goto restart;
249		default:
250			BUG();
251		}
252	}
253
254	spin_unlock(&nlru->lock);
255	return isolated;
256}
257
258unsigned long
259list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
260		  list_lru_walk_cb isolate, void *cb_arg,
261		  unsigned long *nr_to_walk)
262{
263	return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
264				   isolate, cb_arg, nr_to_walk);
 
 
 
 
 
 
265}
266EXPORT_SYMBOL_GPL(list_lru_walk_one);
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
269				 list_lru_walk_cb isolate, void *cb_arg,
270				 unsigned long *nr_to_walk)
271{
272	long isolated = 0;
273	int memcg_idx;
274
275	isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
276					nr_to_walk);
 
 
277	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
278		for_each_memcg_cache_index(memcg_idx) {
279			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
280						isolate, cb_arg, nr_to_walk);
 
 
 
 
 
 
 
 
 
281			if (*nr_to_walk <= 0)
282				break;
283		}
284	}
 
 
285	return isolated;
286}
287EXPORT_SYMBOL_GPL(list_lru_walk_node);
288
289static void init_one_lru(struct list_lru_one *l)
290{
291	INIT_LIST_HEAD(&l->list);
292	l->nr_items = 0;
293}
294
295#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
296static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
297					  int begin, int end)
298{
299	int i;
300
301	for (i = begin; i < end; i++)
302		kfree(memcg_lrus->lru[i]);
303}
304
305static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
306				      int begin, int end)
307{
308	int i;
309
310	for (i = begin; i < end; i++) {
311		struct list_lru_one *l;
 
312
313		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
314		if (!l)
315			goto fail;
316
317		init_one_lru(l);
318		memcg_lrus->lru[i] = l;
319	}
320	return 0;
321fail:
322	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
323	return -ENOMEM;
324}
325
326static int memcg_init_list_lru_node(struct list_lru_node *nlru)
327{
328	struct list_lru_memcg *memcg_lrus;
329	int size = memcg_nr_cache_ids;
330
331	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
332			      size * sizeof(void *), GFP_KERNEL);
333	if (!memcg_lrus)
334		return -ENOMEM;
335
336	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
337		kvfree(memcg_lrus);
338		return -ENOMEM;
339	}
340	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
341
342	return 0;
343}
344
345static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
346{
347	struct list_lru_memcg *memcg_lrus;
348	/*
349	 * This is called when shrinker has already been unregistered,
350	 * and nobody can use it. So, there is no need to use kvfree_rcu().
 
 
351	 */
352	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
353	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
354	kvfree(memcg_lrus);
355}
356
357static void kvfree_rcu(struct rcu_head *head)
358{
359	struct list_lru_memcg *mlru;
360
361	mlru = container_of(head, struct list_lru_memcg, rcu);
362	kvfree(mlru);
363}
364
365static int memcg_update_list_lru_node(struct list_lru_node *nlru,
366				      int old_size, int new_size)
367{
368	struct list_lru_memcg *old, *new;
369
370	BUG_ON(old_size > new_size);
371
372	old = rcu_dereference_protected(nlru->memcg_lrus,
373					lockdep_is_held(&list_lrus_mutex));
374	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
375	if (!new)
376		return -ENOMEM;
377
378	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
379		kvfree(new);
380		return -ENOMEM;
 
381	}
 
 
382
383	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
 
 
 
 
 
384
385	/*
386	 * The locking below allows readers that hold nlru->lock avoid taking
387	 * rcu_read_lock (see list_lru_from_memcg_idx).
388	 *
389	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
390	 * we have to use IRQ-safe primitives here to avoid deadlock.
391	 */
392	spin_lock_irq(&nlru->lock);
393	rcu_assign_pointer(nlru->memcg_lrus, new);
394	spin_unlock_irq(&nlru->lock);
395
396	call_rcu(&old->rcu, kvfree_rcu);
397	return 0;
398}
399
400static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
401					      int old_size, int new_size)
402{
403	struct list_lru_memcg *memcg_lrus;
404
405	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
406					       lockdep_is_held(&list_lrus_mutex));
407	/* do not bother shrinking the array back to the old size, because we
408	 * cannot handle allocation failures here */
409	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
410}
411
412static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
413{
414	int i;
415
416	if (!memcg_aware)
417		return 0;
418
419	for_each_node(i) {
420		if (memcg_init_list_lru_node(&lru->node[i]))
421			goto fail;
 
422	}
423	return 0;
424fail:
425	for (i = i - 1; i >= 0; i--) {
426		if (!lru->node[i].memcg_lrus)
427			continue;
428		memcg_destroy_list_lru_node(&lru->node[i]);
429	}
430	return -ENOMEM;
431}
432
433static void memcg_destroy_list_lru(struct list_lru *lru)
 
434{
435	int i;
436
437	if (!list_lru_memcg_aware(lru))
438		return;
439
440	for_each_node(i)
441		memcg_destroy_list_lru_node(&lru->node[i]);
 
 
442}
443
444static int memcg_update_list_lru(struct list_lru *lru,
445				 int old_size, int new_size)
446{
447	int i;
 
 
448
449	if (!list_lru_memcg_aware(lru))
450		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
452	for_each_node(i) {
453		if (memcg_update_list_lru_node(&lru->node[i],
454					       old_size, new_size))
455			goto fail;
456	}
457	return 0;
458fail:
459	for (i = i - 1; i >= 0; i--) {
460		if (!lru->node[i].memcg_lrus)
461			continue;
462
463		memcg_cancel_update_list_lru_node(&lru->node[i],
464						  old_size, new_size);
465	}
466	return -ENOMEM;
467}
468
469static void memcg_cancel_update_list_lru(struct list_lru *lru,
470					 int old_size, int new_size)
471{
472	int i;
473
474	if (!list_lru_memcg_aware(lru))
475		return;
476
477	for_each_node(i)
478		memcg_cancel_update_list_lru_node(&lru->node[i],
479						  old_size, new_size);
480}
481
482int memcg_update_all_list_lrus(int new_size)
 
483{
484	int ret = 0;
485	struct list_lru *lru;
486	int old_size = memcg_nr_cache_ids;
 
 
 
 
487
488	mutex_lock(&list_lrus_mutex);
489	list_for_each_entry(lru, &list_lrus, list) {
490		ret = memcg_update_list_lru(lru, old_size, new_size);
491		if (ret)
492			goto fail;
493	}
494out:
495	mutex_unlock(&list_lrus_mutex);
496	return ret;
497fail:
498	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
499		memcg_cancel_update_list_lru(lru, old_size, new_size);
500	goto out;
501}
502
503static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
504				      int src_idx, int dst_idx)
505{
506	struct list_lru_one *src, *dst;
507
508	/*
509	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
510	 * we have to use IRQ-safe primitives here to avoid deadlock.
 
511	 */
512	spin_lock_irq(&nlru->lock);
513
514	src = list_lru_from_memcg_idx(nlru, src_idx);
515	dst = list_lru_from_memcg_idx(nlru, dst_idx);
516
517	list_splice_init(&src->list, &dst->list);
518	dst->nr_items += src->nr_items;
519	src->nr_items = 0;
520
521	spin_unlock_irq(&nlru->lock);
522}
523
524static void memcg_drain_list_lru(struct list_lru *lru,
525				 int src_idx, int dst_idx)
526{
527	int i;
528
529	if (!list_lru_memcg_aware(lru))
530		return;
531
532	for_each_node(i)
533		memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
534}
 
 
 
 
 
 
535
536void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
537{
538	struct list_lru *lru;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539
540	mutex_lock(&list_lrus_mutex);
541	list_for_each_entry(lru, &list_lrus, list)
542		memcg_drain_list_lru(lru, src_idx, dst_idx);
543	mutex_unlock(&list_lrus_mutex);
544}
545#else
546static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
547{
548	return 0;
549}
550
551static void memcg_destroy_list_lru(struct list_lru *lru)
552{
553}
554#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
555
556int __list_lru_init(struct list_lru *lru, bool memcg_aware,
557		    struct lock_class_key *key)
558{
559	int i;
560	size_t size = sizeof(*lru->node) * nr_node_ids;
561	int err = -ENOMEM;
562
563	memcg_get_cache_ids();
 
 
 
 
 
564
565	lru->node = kzalloc(size, GFP_KERNEL);
566	if (!lru->node)
567		goto out;
568
569	for_each_node(i) {
570		spin_lock_init(&lru->node[i].lock);
571		if (key)
572			lockdep_set_class(&lru->node[i].lock, key);
573		init_one_lru(&lru->node[i].lru);
574	}
575
576	err = memcg_init_list_lru(lru, memcg_aware);
577	if (err) {
578		kfree(lru->node);
579		/* Do this so a list_lru_destroy() doesn't crash: */
580		lru->node = NULL;
581		goto out;
582	}
583
584	list_lru_register(lru);
585out:
586	memcg_put_cache_ids();
587	return err;
588}
589EXPORT_SYMBOL_GPL(__list_lru_init);
590
591void list_lru_destroy(struct list_lru *lru)
592{
593	/* Already destroyed or not yet initialized? */
594	if (!lru->node)
595		return;
596
597	memcg_get_cache_ids();
598
599	list_lru_unregister(lru);
600
601	memcg_destroy_list_lru(lru);
602	kfree(lru->node);
603	lru->node = NULL;
604
605	memcg_put_cache_ids();
 
 
606}
607EXPORT_SYMBOL_GPL(list_lru_destroy);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  4 * Authors: David Chinner and Glauber Costa
  5 *
  6 * Generic LRU infrastructure
  7 */
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/mm.h>
 11#include <linux/list_lru.h>
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/memcontrol.h>
 15#include "slab.h"
 16#include "internal.h"
 17
 18#ifdef CONFIG_MEMCG_KMEM
 19static LIST_HEAD(memcg_list_lrus);
 20static DEFINE_MUTEX(list_lrus_mutex);
 21
 22static inline bool list_lru_memcg_aware(struct list_lru *lru)
 23{
 24	return lru->memcg_aware;
 25}
 26
 27static void list_lru_register(struct list_lru *lru)
 28{
 29	if (!list_lru_memcg_aware(lru))
 30		return;
 31
 32	mutex_lock(&list_lrus_mutex);
 33	list_add(&lru->list, &memcg_list_lrus);
 34	mutex_unlock(&list_lrus_mutex);
 35}
 36
 37static void list_lru_unregister(struct list_lru *lru)
 38{
 39	if (!list_lru_memcg_aware(lru))
 40		return;
 41
 42	mutex_lock(&list_lrus_mutex);
 43	list_del(&lru->list);
 44	mutex_unlock(&list_lrus_mutex);
 45}
 
 
 
 
 46
 47static int lru_shrinker_id(struct list_lru *lru)
 48{
 49	return lru->shrinker_id;
 50}
 
 51
 52static inline struct list_lru_one *
 53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 54{
 55	if (list_lru_memcg_aware(lru) && idx >= 0) {
 56		struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
 
 
 
 
 57
 58		return mlru ? &mlru->node[nid] : NULL;
 59	}
 60	return &lru->node[nid].lru;
 61}
 62#else
 63static void list_lru_register(struct list_lru *lru)
 64{
 
 
 
 
 
 
 
 
 
 
 65}
 66
 67static void list_lru_unregister(struct list_lru *lru)
 68{
 
 
 
 
 
 
 69}
 70
 71static int lru_shrinker_id(struct list_lru *lru)
 
 72{
 73	return -1;
 
 
 
 
 
 
 
 
 
 74}
 75
 76static inline bool list_lru_memcg_aware(struct list_lru *lru)
 77{
 78	return false;
 79}
 80
 81static inline struct list_lru_one *
 82list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 83{
 84	return &lru->node[nid].lru;
 85}
 86#endif /* CONFIG_MEMCG_KMEM */
 87
 88bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
 89		    struct mem_cgroup *memcg)
 90{
 
 
 
 
 
 
 
 91	struct list_lru_node *nlru = &lru->node[nid];
 92	struct list_lru_one *l;
 93
 94	spin_lock(&nlru->lock);
 95	if (list_empty(item)) {
 96		l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
 97		list_add_tail(item, &l->list);
 98		/* Set shrinker bit if the first element was added */
 99		if (!l->nr_items++)
100			set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
101		nlru->nr_items++;
102		spin_unlock(&nlru->lock);
103		return true;
104	}
105	spin_unlock(&nlru->lock);
106	return false;
107}
108EXPORT_SYMBOL_GPL(list_lru_add);
109
110bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
111{
112	int nid = page_to_nid(virt_to_page(item));
113	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
114		mem_cgroup_from_slab_obj(item) : NULL;
115
116	return list_lru_add(lru, item, nid, memcg);
117}
118EXPORT_SYMBOL_GPL(list_lru_add_obj);
119
120bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
121		    struct mem_cgroup *memcg)
122{
123	struct list_lru_node *nlru = &lru->node[nid];
124	struct list_lru_one *l;
125
126	spin_lock(&nlru->lock);
127	if (!list_empty(item)) {
128		l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
129		list_del_init(item);
130		l->nr_items--;
131		nlru->nr_items--;
132		spin_unlock(&nlru->lock);
133		return true;
134	}
135	spin_unlock(&nlru->lock);
136	return false;
137}
138EXPORT_SYMBOL_GPL(list_lru_del);
139
140bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
141{
142	int nid = page_to_nid(virt_to_page(item));
143	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
144		mem_cgroup_from_slab_obj(item) : NULL;
145
146	return list_lru_del(lru, item, nid, memcg);
147}
148EXPORT_SYMBOL_GPL(list_lru_del_obj);
149
150void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151{
152	list_del_init(item);
153	list->nr_items--;
154}
155EXPORT_SYMBOL_GPL(list_lru_isolate);
156
157void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158			   struct list_head *head)
159{
160	list_move(item, head);
161	list->nr_items--;
162}
163EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164
165void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
166		      struct mem_cgroup *memcg)
167{
168	struct list_lru_one *list =
169		list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
 
 
 
 
 
 
170
171	if (list_empty(item)) {
172		list_add_tail(item, &list->list);
173		if (!list->nr_items++)
174			set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
175	}
176}
177EXPORT_SYMBOL_GPL(list_lru_putback);
178
179unsigned long list_lru_count_one(struct list_lru *lru,
180				 int nid, struct mem_cgroup *memcg)
181{
182	struct list_lru_one *l;
183	long count;
184
185	rcu_read_lock();
186	l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
187	count = l ? READ_ONCE(l->nr_items) : 0;
188	rcu_read_unlock();
189
190	if (unlikely(count < 0))
191		count = 0;
192
193	return count;
194}
195EXPORT_SYMBOL_GPL(list_lru_count_one);
196
197unsigned long list_lru_count_node(struct list_lru *lru, int nid)
198{
199	struct list_lru_node *nlru;
200
201	nlru = &lru->node[nid];
202	return nlru->nr_items;
203}
204EXPORT_SYMBOL_GPL(list_lru_count_node);
205
206static unsigned long
207__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
208		    list_lru_walk_cb isolate, void *cb_arg,
209		    unsigned long *nr_to_walk)
210{
 
211	struct list_lru_node *nlru = &lru->node[nid];
212	struct list_lru_one *l;
213	struct list_head *item, *n;
214	unsigned long isolated = 0;
215
 
 
216restart:
217	l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
218	if (!l)
219		goto out;
220
221	list_for_each_safe(item, n, &l->list) {
222		enum lru_status ret;
223
224		/*
225		 * decrement nr_to_walk first so that we don't livelock if we
226		 * get stuck on large numbers of LRU_RETRY items
227		 */
228		if (!*nr_to_walk)
229			break;
230		--*nr_to_walk;
231
232		ret = isolate(item, l, &nlru->lock, cb_arg);
233		switch (ret) {
234		case LRU_REMOVED_RETRY:
235			assert_spin_locked(&nlru->lock);
236			fallthrough;
237		case LRU_REMOVED:
238			isolated++;
239			nlru->nr_items--;
240			/*
241			 * If the lru lock has been dropped, our list
242			 * traversal is now invalid and so we have to
243			 * restart from scratch.
244			 */
245			if (ret == LRU_REMOVED_RETRY)
246				goto restart;
247			break;
248		case LRU_ROTATE:
249			list_move_tail(item, &l->list);
250			break;
251		case LRU_SKIP:
252			break;
253		case LRU_RETRY:
254			/*
255			 * The lru lock has been dropped, our list traversal is
256			 * now invalid and so we have to restart from scratch.
257			 */
258			assert_spin_locked(&nlru->lock);
259			goto restart;
260		default:
261			BUG();
262		}
263	}
264out:
 
265	return isolated;
266}
267
268unsigned long
269list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
270		  list_lru_walk_cb isolate, void *cb_arg,
271		  unsigned long *nr_to_walk)
272{
273	struct list_lru_node *nlru = &lru->node[nid];
274	unsigned long ret;
275
276	spin_lock(&nlru->lock);
277	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
278				  cb_arg, nr_to_walk);
279	spin_unlock(&nlru->lock);
280	return ret;
281}
282EXPORT_SYMBOL_GPL(list_lru_walk_one);
283
284unsigned long
285list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
286		      list_lru_walk_cb isolate, void *cb_arg,
287		      unsigned long *nr_to_walk)
288{
289	struct list_lru_node *nlru = &lru->node[nid];
290	unsigned long ret;
291
292	spin_lock_irq(&nlru->lock);
293	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
294				  cb_arg, nr_to_walk);
295	spin_unlock_irq(&nlru->lock);
296	return ret;
297}
298
299unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
300				 list_lru_walk_cb isolate, void *cb_arg,
301				 unsigned long *nr_to_walk)
302{
303	long isolated = 0;
 
304
305	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
306				      nr_to_walk);
307
308#ifdef CONFIG_MEMCG_KMEM
309	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
310		struct list_lru_memcg *mlru;
311		unsigned long index;
312
313		xa_for_each(&lru->xa, index, mlru) {
314			struct list_lru_node *nlru = &lru->node[nid];
315
316			spin_lock(&nlru->lock);
317			isolated += __list_lru_walk_one(lru, nid, index,
318							isolate, cb_arg,
319							nr_to_walk);
320			spin_unlock(&nlru->lock);
321
322			if (*nr_to_walk <= 0)
323				break;
324		}
325	}
326#endif
327
328	return isolated;
329}
330EXPORT_SYMBOL_GPL(list_lru_walk_node);
331
332static void init_one_lru(struct list_lru_one *l)
333{
334	INIT_LIST_HEAD(&l->list);
335	l->nr_items = 0;
336}
337
338#ifdef CONFIG_MEMCG_KMEM
339static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
 
340{
341	int nid;
342	struct list_lru_memcg *mlru;
 
 
 
 
 
 
 
 
343
344	mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
345	if (!mlru)
346		return NULL;
347
348	for_each_node(nid)
349		init_one_lru(&mlru->node[nid]);
 
350
351	return mlru;
 
 
 
 
 
 
352}
353
354static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
355{
356	struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
 
357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358	/*
359	 * The __list_lru_walk_one() can walk the list of this node.
360	 * We need kvfree_rcu() here. And the walking of the list
361	 * is under lru->node[nid]->lock, which can serve as a RCU
362	 * read-side critical section.
363	 */
364	if (mlru)
365		kvfree_rcu(mlru, rcu);
 
366}
367
368static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
369{
370	if (memcg_aware)
371		xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
372	lru->memcg_aware = memcg_aware;
 
373}
374
375static void memcg_destroy_list_lru(struct list_lru *lru)
 
376{
377	XA_STATE(xas, &lru->xa, 0);
378	struct list_lru_memcg *mlru;
 
379
380	if (!list_lru_memcg_aware(lru))
381		return;
 
 
 
382
383	xas_lock_irq(&xas);
384	xas_for_each(&xas, mlru, ULONG_MAX) {
385		kfree(mlru);
386		xas_store(&xas, NULL);
387	}
388	xas_unlock_irq(&xas);
389}
390
391static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
392					 int src_idx, struct mem_cgroup *dst_memcg)
393{
394	struct list_lru_node *nlru = &lru->node[nid];
395	int dst_idx = dst_memcg->kmemcg_id;
396	struct list_lru_one *src, *dst;
397
398	/*
 
 
 
399	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
400	 * we have to use IRQ-safe primitives here to avoid deadlock.
401	 */
402	spin_lock_irq(&nlru->lock);
 
 
403
404	src = list_lru_from_memcg_idx(lru, nid, src_idx);
405	if (!src)
406		goto out;
407	dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
409	list_splice_init(&src->list, &dst->list);
 
410
411	if (src->nr_items) {
412		dst->nr_items += src->nr_items;
413		set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
414		src->nr_items = 0;
415	}
416out:
417	spin_unlock_irq(&nlru->lock);
 
 
 
 
 
 
418}
419
420static void memcg_reparent_list_lru(struct list_lru *lru,
421				    int src_idx, struct mem_cgroup *dst_memcg)
422{
423	int i;
424
 
 
 
425	for_each_node(i)
426		memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
427
428	memcg_list_lru_free(lru, src_idx);
429}
430
431void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
 
432{
433	struct cgroup_subsys_state *css;
434	struct list_lru *lru;
435	int src_idx = memcg->kmemcg_id;
436
437	/*
438	 * Change kmemcg_id of this cgroup and all its descendants to the
439	 * parent's id, and then move all entries from this cgroup's list_lrus
440	 * to ones of the parent.
441	 *
442	 * After we have finished, all list_lrus corresponding to this cgroup
443	 * are guaranteed to remain empty. So we can safely free this cgroup's
444	 * list lrus in memcg_list_lru_free().
445	 *
446	 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
447	 * from allocating list lrus for this cgroup after memcg_list_lru_free()
448	 * call.
449	 */
450	rcu_read_lock();
451	css_for_each_descendant_pre(css, &memcg->css) {
452		struct mem_cgroup *child;
453
454		child = mem_cgroup_from_css(css);
455		WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
 
 
456	}
457	rcu_read_unlock();
 
 
 
 
458
459	mutex_lock(&list_lrus_mutex);
460	list_for_each_entry(lru, &memcg_list_lrus, list)
461		memcg_reparent_list_lru(lru, src_idx, parent);
462	mutex_unlock(&list_lrus_mutex);
463}
464
465static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
466					    struct list_lru *lru)
467{
468	int idx = memcg->kmemcg_id;
 
 
 
469
470	return idx < 0 || xa_load(&lru->xa, idx);
 
 
471}
472
473int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
474			 gfp_t gfp)
475{
476	int i;
477	unsigned long flags;
478	struct list_lru_memcg_table {
479		struct list_lru_memcg *mlru;
480		struct mem_cgroup *memcg;
481	} *table;
482	XA_STATE(xas, &lru->xa, 0);
483
484	if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
485		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
486
487	gfp &= GFP_RECLAIM_MASK;
488	table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
489	if (!table)
490		return -ENOMEM;
491
492	/*
493	 * Because the list_lru can be reparented to the parent cgroup's
494	 * list_lru, we should make sure that this cgroup and all its
495	 * ancestors have allocated list_lru_memcg.
496	 */
497	for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
498		if (memcg_list_lru_allocated(memcg, lru))
499			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
501		table[i].memcg = memcg;
502		table[i].mlru = memcg_init_list_lru_one(gfp);
503		if (!table[i].mlru) {
504			while (i--)
505				kfree(table[i].mlru);
506			kfree(table);
507			return -ENOMEM;
508		}
509	}
510
511	xas_lock_irqsave(&xas, flags);
512	while (i--) {
513		int index = READ_ONCE(table[i].memcg->kmemcg_id);
514		struct list_lru_memcg *mlru = table[i].mlru;
515
516		xas_set(&xas, index);
517retry:
518		if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
519			kfree(mlru);
520		} else {
521			xas_store(&xas, mlru);
522			if (xas_error(&xas) == -ENOMEM) {
523				xas_unlock_irqrestore(&xas, flags);
524				if (xas_nomem(&xas, gfp))
525					xas_set_err(&xas, 0);
526				xas_lock_irqsave(&xas, flags);
527				/*
528				 * The xas lock has been released, this memcg
529				 * can be reparented before us. So reload
530				 * memcg id. More details see the comments
531				 * in memcg_reparent_list_lrus().
532				 */
533				index = READ_ONCE(table[i].memcg->kmemcg_id);
534				if (index < 0)
535					xas_set_err(&xas, 0);
536				else if (!xas_error(&xas) && index != xas.xa_index)
537					xas_set(&xas, index);
538				goto retry;
539			}
540		}
541	}
542	/* xas_nomem() is used to free memory instead of memory allocation. */
543	if (xas.xa_alloc)
544		xas_nomem(&xas, gfp);
545	xas_unlock_irqrestore(&xas, flags);
546	kfree(table);
547
548	return xas_error(&xas);
 
 
 
549}
550#else
551static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
552{
 
553}
554
555static void memcg_destroy_list_lru(struct list_lru *lru)
556{
557}
558#endif /* CONFIG_MEMCG_KMEM */
559
560int __list_lru_init(struct list_lru *lru, bool memcg_aware,
561		    struct lock_class_key *key, struct shrinker *shrinker)
562{
563	int i;
 
 
564
565#ifdef CONFIG_MEMCG_KMEM
566	if (shrinker)
567		lru->shrinker_id = shrinker->id;
568	else
569		lru->shrinker_id = -1;
570#endif
571
572	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
573	if (!lru->node)
574		return -ENOMEM;
575
576	for_each_node(i) {
577		spin_lock_init(&lru->node[i].lock);
578		if (key)
579			lockdep_set_class(&lru->node[i].lock, key);
580		init_one_lru(&lru->node[i].lru);
581	}
582
583	memcg_init_list_lru(lru, memcg_aware);
 
 
 
 
 
 
 
584	list_lru_register(lru);
585
586	return 0;
 
587}
588EXPORT_SYMBOL_GPL(__list_lru_init);
589
590void list_lru_destroy(struct list_lru *lru)
591{
592	/* Already destroyed or not yet initialized? */
593	if (!lru->node)
594		return;
595
 
 
596	list_lru_unregister(lru);
597
598	memcg_destroy_list_lru(lru);
599	kfree(lru->node);
600	lru->node = NULL;
601
602#ifdef CONFIG_MEMCG_KMEM
603	lru->shrinker_id = -1;
604#endif
605}
606EXPORT_SYMBOL_GPL(list_lru_destroy);