Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  4 * Authors: David Chinner and Glauber Costa
  5 *
  6 * Generic LRU infrastructure
  7 */
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/mm.h>
 11#include <linux/list_lru.h>
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/memcontrol.h>
 15#include "slab.h"
 16#include "internal.h"
 17
 18#ifdef CONFIG_MEMCG_KMEM
 19static LIST_HEAD(memcg_list_lrus);
 20static DEFINE_MUTEX(list_lrus_mutex);
 21
 22static inline bool list_lru_memcg_aware(struct list_lru *lru)
 23{
 24	return lru->memcg_aware;
 25}
 26
 27static void list_lru_register(struct list_lru *lru)
 28{
 29	if (!list_lru_memcg_aware(lru))
 30		return;
 31
 32	mutex_lock(&list_lrus_mutex);
 33	list_add(&lru->list, &memcg_list_lrus);
 34	mutex_unlock(&list_lrus_mutex);
 35}
 36
 37static void list_lru_unregister(struct list_lru *lru)
 38{
 39	if (!list_lru_memcg_aware(lru))
 40		return;
 41
 42	mutex_lock(&list_lrus_mutex);
 43	list_del(&lru->list);
 44	mutex_unlock(&list_lrus_mutex);
 45}
 46
 47static int lru_shrinker_id(struct list_lru *lru)
 48{
 49	return lru->shrinker_id;
 50}
 51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52static inline struct list_lru_one *
 53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 
 54{
 55	if (list_lru_memcg_aware(lru) && idx >= 0) {
 56		struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
 57
 58		return mlru ? &mlru->node[nid] : NULL;
 59	}
 60	return &lru->node[nid].lru;
 
 
 
 
 
 
 
 
 
 61}
 62#else
 63static void list_lru_register(struct list_lru *lru)
 64{
 65}
 66
 67static void list_lru_unregister(struct list_lru *lru)
 68{
 69}
 70
 71static int lru_shrinker_id(struct list_lru *lru)
 72{
 73	return -1;
 74}
 75
 76static inline bool list_lru_memcg_aware(struct list_lru *lru)
 77{
 78	return false;
 79}
 80
 81static inline struct list_lru_one *
 82list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 
 
 
 
 
 
 
 83{
 84	return &lru->node[nid].lru;
 
 
 85}
 86#endif /* CONFIG_MEMCG_KMEM */
 87
 88bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
 89		    struct mem_cgroup *memcg)
 90{
 
 91	struct list_lru_node *nlru = &lru->node[nid];
 
 92	struct list_lru_one *l;
 93
 94	spin_lock(&nlru->lock);
 95	if (list_empty(item)) {
 96		l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
 97		list_add_tail(item, &l->list);
 98		/* Set shrinker bit if the first element was added */
 99		if (!l->nr_items++)
100			set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
 
101		nlru->nr_items++;
102		spin_unlock(&nlru->lock);
103		return true;
104	}
105	spin_unlock(&nlru->lock);
106	return false;
107}
108EXPORT_SYMBOL_GPL(list_lru_add);
109
110bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
111{
112	int nid = page_to_nid(virt_to_page(item));
113	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
114		mem_cgroup_from_slab_obj(item) : NULL;
115
116	return list_lru_add(lru, item, nid, memcg);
117}
118EXPORT_SYMBOL_GPL(list_lru_add_obj);
119
120bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
121		    struct mem_cgroup *memcg)
122{
123	struct list_lru_node *nlru = &lru->node[nid];
124	struct list_lru_one *l;
125
126	spin_lock(&nlru->lock);
127	if (!list_empty(item)) {
128		l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
129		list_del_init(item);
130		l->nr_items--;
131		nlru->nr_items--;
132		spin_unlock(&nlru->lock);
133		return true;
134	}
135	spin_unlock(&nlru->lock);
136	return false;
137}
138EXPORT_SYMBOL_GPL(list_lru_del);
139
140bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
141{
142	int nid = page_to_nid(virt_to_page(item));
143	struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
144		mem_cgroup_from_slab_obj(item) : NULL;
145
146	return list_lru_del(lru, item, nid, memcg);
147}
148EXPORT_SYMBOL_GPL(list_lru_del_obj);
149
150void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151{
152	list_del_init(item);
153	list->nr_items--;
154}
155EXPORT_SYMBOL_GPL(list_lru_isolate);
156
157void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158			   struct list_head *head)
159{
160	list_move(item, head);
161	list->nr_items--;
162}
163EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164
165void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
166		      struct mem_cgroup *memcg)
167{
168	struct list_lru_one *list =
169		list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
170
171	if (list_empty(item)) {
172		list_add_tail(item, &list->list);
173		if (!list->nr_items++)
174			set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
175	}
176}
177EXPORT_SYMBOL_GPL(list_lru_putback);
178
179unsigned long list_lru_count_one(struct list_lru *lru,
180				 int nid, struct mem_cgroup *memcg)
181{
 
182	struct list_lru_one *l;
183	long count;
184
185	rcu_read_lock();
186	l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
187	count = l ? READ_ONCE(l->nr_items) : 0;
188	rcu_read_unlock();
189
190	if (unlikely(count < 0))
191		count = 0;
192
193	return count;
194}
195EXPORT_SYMBOL_GPL(list_lru_count_one);
196
197unsigned long list_lru_count_node(struct list_lru *lru, int nid)
198{
199	struct list_lru_node *nlru;
200
201	nlru = &lru->node[nid];
202	return nlru->nr_items;
203}
204EXPORT_SYMBOL_GPL(list_lru_count_node);
205
206static unsigned long
207__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
208		    list_lru_walk_cb isolate, void *cb_arg,
209		    unsigned long *nr_to_walk)
210{
211	struct list_lru_node *nlru = &lru->node[nid];
212	struct list_lru_one *l;
213	struct list_head *item, *n;
214	unsigned long isolated = 0;
215
 
216restart:
217	l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
218	if (!l)
219		goto out;
220
221	list_for_each_safe(item, n, &l->list) {
222		enum lru_status ret;
223
224		/*
225		 * decrement nr_to_walk first so that we don't livelock if we
226		 * get stuck on large numbers of LRU_RETRY items
227		 */
228		if (!*nr_to_walk)
229			break;
230		--*nr_to_walk;
231
232		ret = isolate(item, l, &nlru->lock, cb_arg);
233		switch (ret) {
234		case LRU_REMOVED_RETRY:
235			assert_spin_locked(&nlru->lock);
236			fallthrough;
237		case LRU_REMOVED:
238			isolated++;
239			nlru->nr_items--;
240			/*
241			 * If the lru lock has been dropped, our list
242			 * traversal is now invalid and so we have to
243			 * restart from scratch.
244			 */
245			if (ret == LRU_REMOVED_RETRY)
246				goto restart;
247			break;
248		case LRU_ROTATE:
249			list_move_tail(item, &l->list);
250			break;
251		case LRU_SKIP:
252			break;
253		case LRU_RETRY:
254			/*
255			 * The lru lock has been dropped, our list traversal is
256			 * now invalid and so we have to restart from scratch.
257			 */
258			assert_spin_locked(&nlru->lock);
259			goto restart;
260		default:
261			BUG();
262		}
263	}
264out:
265	return isolated;
266}
267
268unsigned long
269list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
270		  list_lru_walk_cb isolate, void *cb_arg,
271		  unsigned long *nr_to_walk)
272{
273	struct list_lru_node *nlru = &lru->node[nid];
274	unsigned long ret;
275
276	spin_lock(&nlru->lock);
277	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
278				  cb_arg, nr_to_walk);
279	spin_unlock(&nlru->lock);
280	return ret;
281}
282EXPORT_SYMBOL_GPL(list_lru_walk_one);
283
284unsigned long
285list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
286		      list_lru_walk_cb isolate, void *cb_arg,
287		      unsigned long *nr_to_walk)
288{
289	struct list_lru_node *nlru = &lru->node[nid];
290	unsigned long ret;
291
292	spin_lock_irq(&nlru->lock);
293	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
294				  cb_arg, nr_to_walk);
295	spin_unlock_irq(&nlru->lock);
296	return ret;
297}
298
299unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
300				 list_lru_walk_cb isolate, void *cb_arg,
301				 unsigned long *nr_to_walk)
302{
303	long isolated = 0;
 
304
305	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
306				      nr_to_walk);
307
308#ifdef CONFIG_MEMCG_KMEM
309	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
310		struct list_lru_memcg *mlru;
311		unsigned long index;
312
313		xa_for_each(&lru->xa, index, mlru) {
314			struct list_lru_node *nlru = &lru->node[nid];
315
316			spin_lock(&nlru->lock);
317			isolated += __list_lru_walk_one(lru, nid, index,
318							isolate, cb_arg,
319							nr_to_walk);
320			spin_unlock(&nlru->lock);
321
322			if (*nr_to_walk <= 0)
323				break;
324		}
325	}
326#endif
327
328	return isolated;
329}
330EXPORT_SYMBOL_GPL(list_lru_walk_node);
331
332static void init_one_lru(struct list_lru_one *l)
333{
334	INIT_LIST_HEAD(&l->list);
335	l->nr_items = 0;
336}
337
338#ifdef CONFIG_MEMCG_KMEM
339static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
 
340{
341	int nid;
342	struct list_lru_memcg *mlru;
 
 
 
 
 
 
 
 
343
344	mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
345	if (!mlru)
346		return NULL;
347
348	for_each_node(nid)
349		init_one_lru(&mlru->node[nid]);
 
350
351	return mlru;
 
 
 
 
 
 
352}
353
354static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
355{
356	struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
 
357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358	/*
359	 * The __list_lru_walk_one() can walk the list of this node.
360	 * We need kvfree_rcu() here. And the walking of the list
361	 * is under lru->node[nid]->lock, which can serve as a RCU
362	 * read-side critical section.
363	 */
364	if (mlru)
365		kvfree_rcu(mlru, rcu);
 
366}
367
368static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
369{
370	if (memcg_aware)
371		xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
372	lru->memcg_aware = memcg_aware;
 
373}
374
375static void memcg_destroy_list_lru(struct list_lru *lru)
 
376{
377	XA_STATE(xas, &lru->xa, 0);
378	struct list_lru_memcg *mlru;
379
380	if (!list_lru_memcg_aware(lru))
381		return;
 
 
 
 
 
382
383	xas_lock_irq(&xas);
384	xas_for_each(&xas, mlru, ULONG_MAX) {
385		kfree(mlru);
386		xas_store(&xas, NULL);
387	}
388	xas_unlock_irq(&xas);
389}
390
391static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
392					 int src_idx, struct mem_cgroup *dst_memcg)
393{
394	struct list_lru_node *nlru = &lru->node[nid];
395	int dst_idx = dst_memcg->kmemcg_id;
396	struct list_lru_one *src, *dst;
397
398	/*
 
 
 
399	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
400	 * we have to use IRQ-safe primitives here to avoid deadlock.
401	 */
402	spin_lock_irq(&nlru->lock);
 
 
403
404	src = list_lru_from_memcg_idx(lru, nid, src_idx);
405	if (!src)
406		goto out;
407	dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
408
409	list_splice_init(&src->list, &dst->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
411	if (src->nr_items) {
412		dst->nr_items += src->nr_items;
413		set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
414		src->nr_items = 0;
 
 
415	}
416out:
417	spin_unlock_irq(&nlru->lock);
 
 
 
 
 
 
418}
419
420static void memcg_reparent_list_lru(struct list_lru *lru,
421				    int src_idx, struct mem_cgroup *dst_memcg)
422{
423	int i;
424
425	for_each_node(i)
426		memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
427
428	memcg_list_lru_free(lru, src_idx);
 
429}
430
431void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
 
432{
433	struct cgroup_subsys_state *css;
434	struct list_lru *lru;
435	int src_idx = memcg->kmemcg_id;
436
437	/*
438	 * Change kmemcg_id of this cgroup and all its descendants to the
439	 * parent's id, and then move all entries from this cgroup's list_lrus
440	 * to ones of the parent.
441	 *
442	 * After we have finished, all list_lrus corresponding to this cgroup
443	 * are guaranteed to remain empty. So we can safely free this cgroup's
444	 * list lrus in memcg_list_lru_free().
445	 *
446	 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
447	 * from allocating list lrus for this cgroup after memcg_list_lru_free()
448	 * call.
449	 */
450	rcu_read_lock();
451	css_for_each_descendant_pre(css, &memcg->css) {
452		struct mem_cgroup *child;
453
454		child = mem_cgroup_from_css(css);
455		WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
 
 
456	}
457	rcu_read_unlock();
 
 
 
 
458
459	mutex_lock(&list_lrus_mutex);
460	list_for_each_entry(lru, &memcg_list_lrus, list)
461		memcg_reparent_list_lru(lru, src_idx, parent);
462	mutex_unlock(&list_lrus_mutex);
463}
464
465static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
466					    struct list_lru *lru)
467{
468	int idx = memcg->kmemcg_id;
469
470	return idx < 0 || xa_load(&lru->xa, idx);
 
 
 
 
 
471}
472
473int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
474			 gfp_t gfp)
475{
476	int i;
477	unsigned long flags;
478	struct list_lru_memcg_table {
479		struct list_lru_memcg *mlru;
480		struct mem_cgroup *memcg;
481	} *table;
482	XA_STATE(xas, &lru->xa, 0);
483
484	if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
485		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
486
487	gfp &= GFP_RECLAIM_MASK;
488	table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
489	if (!table)
490		return -ENOMEM;
 
 
 
491
492	/*
493	 * Because the list_lru can be reparented to the parent cgroup's
494	 * list_lru, we should make sure that this cgroup and all its
495	 * ancestors have allocated list_lru_memcg.
496	 */
497	for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
498		if (memcg_list_lru_allocated(memcg, lru))
499			break;
500
501		table[i].memcg = memcg;
502		table[i].mlru = memcg_init_list_lru_one(gfp);
503		if (!table[i].mlru) {
504			while (i--)
505				kfree(table[i].mlru);
506			kfree(table);
507			return -ENOMEM;
508		}
509	}
510
511	xas_lock_irqsave(&xas, flags);
512	while (i--) {
513		int index = READ_ONCE(table[i].memcg->kmemcg_id);
514		struct list_lru_memcg *mlru = table[i].mlru;
515
516		xas_set(&xas, index);
517retry:
518		if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
519			kfree(mlru);
520		} else {
521			xas_store(&xas, mlru);
522			if (xas_error(&xas) == -ENOMEM) {
523				xas_unlock_irqrestore(&xas, flags);
524				if (xas_nomem(&xas, gfp))
525					xas_set_err(&xas, 0);
526				xas_lock_irqsave(&xas, flags);
527				/*
528				 * The xas lock has been released, this memcg
529				 * can be reparented before us. So reload
530				 * memcg id. More details see the comments
531				 * in memcg_reparent_list_lrus().
532				 */
533				index = READ_ONCE(table[i].memcg->kmemcg_id);
534				if (index < 0)
535					xas_set_err(&xas, 0);
536				else if (!xas_error(&xas) && index != xas.xa_index)
537					xas_set(&xas, index);
538				goto retry;
539			}
540		}
541	}
542	/* xas_nomem() is used to free memory instead of memory allocation. */
543	if (xas.xa_alloc)
544		xas_nomem(&xas, gfp);
545	xas_unlock_irqrestore(&xas, flags);
546	kfree(table);
547
548	return xas_error(&xas);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549}
550#else
551static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
552{
 
553}
554
555static void memcg_destroy_list_lru(struct list_lru *lru)
556{
557}
558#endif /* CONFIG_MEMCG_KMEM */
559
560int __list_lru_init(struct list_lru *lru, bool memcg_aware,
561		    struct lock_class_key *key, struct shrinker *shrinker)
562{
563	int i;
 
564
565#ifdef CONFIG_MEMCG_KMEM
566	if (shrinker)
567		lru->shrinker_id = shrinker->id;
568	else
569		lru->shrinker_id = -1;
570#endif
 
571
572	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
573	if (!lru->node)
574		return -ENOMEM;
575
576	for_each_node(i) {
577		spin_lock_init(&lru->node[i].lock);
578		if (key)
579			lockdep_set_class(&lru->node[i].lock, key);
580		init_one_lru(&lru->node[i].lru);
581	}
582
583	memcg_init_list_lru(lru, memcg_aware);
584	list_lru_register(lru);
 
 
 
 
 
585
586	return 0;
 
 
 
587}
588EXPORT_SYMBOL_GPL(__list_lru_init);
589
590void list_lru_destroy(struct list_lru *lru)
591{
592	/* Already destroyed or not yet initialized? */
593	if (!lru->node)
594		return;
595
 
 
596	list_lru_unregister(lru);
597
598	memcg_destroy_list_lru(lru);
599	kfree(lru->node);
600	lru->node = NULL;
601
602#ifdef CONFIG_MEMCG_KMEM
603	lru->shrinker_id = -1;
604#endif
 
605}
606EXPORT_SYMBOL_GPL(list_lru_destroy);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  4 * Authors: David Chinner and Glauber Costa
  5 *
  6 * Generic LRU infrastructure
  7 */
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/mm.h>
 11#include <linux/list_lru.h>
 12#include <linux/slab.h>
 13#include <linux/mutex.h>
 14#include <linux/memcontrol.h>
 15#include "slab.h"
 
 16
 17#ifdef CONFIG_MEMCG_KMEM
 18static LIST_HEAD(list_lrus);
 19static DEFINE_MUTEX(list_lrus_mutex);
 20
 
 
 
 
 
 21static void list_lru_register(struct list_lru *lru)
 22{
 
 
 
 23	mutex_lock(&list_lrus_mutex);
 24	list_add(&lru->list, &list_lrus);
 25	mutex_unlock(&list_lrus_mutex);
 26}
 27
 28static void list_lru_unregister(struct list_lru *lru)
 29{
 
 
 
 30	mutex_lock(&list_lrus_mutex);
 31	list_del(&lru->list);
 32	mutex_unlock(&list_lrus_mutex);
 33}
 34
 35static int lru_shrinker_id(struct list_lru *lru)
 36{
 37	return lru->shrinker_id;
 38}
 39
 40static inline bool list_lru_memcg_aware(struct list_lru *lru)
 41{
 42	return lru->memcg_aware;
 43}
 44
 45static inline struct list_lru_one *
 46list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
 47{
 48	struct list_lru_memcg *memcg_lrus;
 49	/*
 50	 * Either lock or RCU protects the array of per cgroup lists
 51	 * from relocation (see memcg_update_list_lru_node).
 52	 */
 53	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
 54					   lockdep_is_held(&nlru->lock));
 55	if (memcg_lrus && idx >= 0)
 56		return memcg_lrus->lru[idx];
 57	return &nlru->lru;
 58}
 59
 60static inline struct list_lru_one *
 61list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
 62		   struct mem_cgroup **memcg_ptr)
 63{
 64	struct list_lru_one *l = &nlru->lru;
 65	struct mem_cgroup *memcg = NULL;
 66
 67	if (!nlru->memcg_lrus)
 68		goto out;
 69
 70	memcg = mem_cgroup_from_obj(ptr);
 71	if (!memcg)
 72		goto out;
 73
 74	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
 75out:
 76	if (memcg_ptr)
 77		*memcg_ptr = memcg;
 78	return l;
 79}
 80#else
 81static void list_lru_register(struct list_lru *lru)
 82{
 83}
 84
 85static void list_lru_unregister(struct list_lru *lru)
 86{
 87}
 88
 89static int lru_shrinker_id(struct list_lru *lru)
 90{
 91	return -1;
 92}
 93
 94static inline bool list_lru_memcg_aware(struct list_lru *lru)
 95{
 96	return false;
 97}
 98
 99static inline struct list_lru_one *
100list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
101{
102	return &nlru->lru;
103}
104
105static inline struct list_lru_one *
106list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
107		   struct mem_cgroup **memcg_ptr)
108{
109	if (memcg_ptr)
110		*memcg_ptr = NULL;
111	return &nlru->lru;
112}
113#endif /* CONFIG_MEMCG_KMEM */
114
115bool list_lru_add(struct list_lru *lru, struct list_head *item)
 
116{
117	int nid = page_to_nid(virt_to_page(item));
118	struct list_lru_node *nlru = &lru->node[nid];
119	struct mem_cgroup *memcg;
120	struct list_lru_one *l;
121
122	spin_lock(&nlru->lock);
123	if (list_empty(item)) {
124		l = list_lru_from_kmem(nlru, item, &memcg);
125		list_add_tail(item, &l->list);
126		/* Set shrinker bit if the first element was added */
127		if (!l->nr_items++)
128			memcg_set_shrinker_bit(memcg, nid,
129					       lru_shrinker_id(lru));
130		nlru->nr_items++;
131		spin_unlock(&nlru->lock);
132		return true;
133	}
134	spin_unlock(&nlru->lock);
135	return false;
136}
137EXPORT_SYMBOL_GPL(list_lru_add);
138
139bool list_lru_del(struct list_lru *lru, struct list_head *item)
140{
141	int nid = page_to_nid(virt_to_page(item));
 
 
 
 
 
 
 
 
 
 
142	struct list_lru_node *nlru = &lru->node[nid];
143	struct list_lru_one *l;
144
145	spin_lock(&nlru->lock);
146	if (!list_empty(item)) {
147		l = list_lru_from_kmem(nlru, item, NULL);
148		list_del_init(item);
149		l->nr_items--;
150		nlru->nr_items--;
151		spin_unlock(&nlru->lock);
152		return true;
153	}
154	spin_unlock(&nlru->lock);
155	return false;
156}
157EXPORT_SYMBOL_GPL(list_lru_del);
158
 
 
 
 
 
 
 
 
 
 
159void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
160{
161	list_del_init(item);
162	list->nr_items--;
163}
164EXPORT_SYMBOL_GPL(list_lru_isolate);
165
166void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
167			   struct list_head *head)
168{
169	list_move(item, head);
170	list->nr_items--;
171}
172EXPORT_SYMBOL_GPL(list_lru_isolate_move);
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174unsigned long list_lru_count_one(struct list_lru *lru,
175				 int nid, struct mem_cgroup *memcg)
176{
177	struct list_lru_node *nlru = &lru->node[nid];
178	struct list_lru_one *l;
179	unsigned long count;
180
181	rcu_read_lock();
182	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
183	count = READ_ONCE(l->nr_items);
184	rcu_read_unlock();
185
 
 
 
186	return count;
187}
188EXPORT_SYMBOL_GPL(list_lru_count_one);
189
190unsigned long list_lru_count_node(struct list_lru *lru, int nid)
191{
192	struct list_lru_node *nlru;
193
194	nlru = &lru->node[nid];
195	return nlru->nr_items;
196}
197EXPORT_SYMBOL_GPL(list_lru_count_node);
198
199static unsigned long
200__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
201		    list_lru_walk_cb isolate, void *cb_arg,
202		    unsigned long *nr_to_walk)
203{
204
205	struct list_lru_one *l;
206	struct list_head *item, *n;
207	unsigned long isolated = 0;
208
209	l = list_lru_from_memcg_idx(nlru, memcg_idx);
210restart:
 
 
 
 
211	list_for_each_safe(item, n, &l->list) {
212		enum lru_status ret;
213
214		/*
215		 * decrement nr_to_walk first so that we don't livelock if we
216		 * get stuck on large numbers of LRU_RETRY items
217		 */
218		if (!*nr_to_walk)
219			break;
220		--*nr_to_walk;
221
222		ret = isolate(item, l, &nlru->lock, cb_arg);
223		switch (ret) {
224		case LRU_REMOVED_RETRY:
225			assert_spin_locked(&nlru->lock);
226			fallthrough;
227		case LRU_REMOVED:
228			isolated++;
229			nlru->nr_items--;
230			/*
231			 * If the lru lock has been dropped, our list
232			 * traversal is now invalid and so we have to
233			 * restart from scratch.
234			 */
235			if (ret == LRU_REMOVED_RETRY)
236				goto restart;
237			break;
238		case LRU_ROTATE:
239			list_move_tail(item, &l->list);
240			break;
241		case LRU_SKIP:
242			break;
243		case LRU_RETRY:
244			/*
245			 * The lru lock has been dropped, our list traversal is
246			 * now invalid and so we have to restart from scratch.
247			 */
248			assert_spin_locked(&nlru->lock);
249			goto restart;
250		default:
251			BUG();
252		}
253	}
 
254	return isolated;
255}
256
257unsigned long
258list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
259		  list_lru_walk_cb isolate, void *cb_arg,
260		  unsigned long *nr_to_walk)
261{
262	struct list_lru_node *nlru = &lru->node[nid];
263	unsigned long ret;
264
265	spin_lock(&nlru->lock);
266	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
267				  nr_to_walk);
268	spin_unlock(&nlru->lock);
269	return ret;
270}
271EXPORT_SYMBOL_GPL(list_lru_walk_one);
272
273unsigned long
274list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
275		      list_lru_walk_cb isolate, void *cb_arg,
276		      unsigned long *nr_to_walk)
277{
278	struct list_lru_node *nlru = &lru->node[nid];
279	unsigned long ret;
280
281	spin_lock_irq(&nlru->lock);
282	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
283				  nr_to_walk);
284	spin_unlock_irq(&nlru->lock);
285	return ret;
286}
287
288unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
289				 list_lru_walk_cb isolate, void *cb_arg,
290				 unsigned long *nr_to_walk)
291{
292	long isolated = 0;
293	int memcg_idx;
294
295	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
296				      nr_to_walk);
 
 
297	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
298		for_each_memcg_cache_index(memcg_idx) {
 
 
 
299			struct list_lru_node *nlru = &lru->node[nid];
300
301			spin_lock(&nlru->lock);
302			isolated += __list_lru_walk_one(nlru, memcg_idx,
303							isolate, cb_arg,
304							nr_to_walk);
305			spin_unlock(&nlru->lock);
306
307			if (*nr_to_walk <= 0)
308				break;
309		}
310	}
 
 
311	return isolated;
312}
313EXPORT_SYMBOL_GPL(list_lru_walk_node);
314
315static void init_one_lru(struct list_lru_one *l)
316{
317	INIT_LIST_HEAD(&l->list);
318	l->nr_items = 0;
319}
320
321#ifdef CONFIG_MEMCG_KMEM
322static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
323					  int begin, int end)
324{
325	int i;
326
327	for (i = begin; i < end; i++)
328		kfree(memcg_lrus->lru[i]);
329}
330
331static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
332				      int begin, int end)
333{
334	int i;
335
336	for (i = begin; i < end; i++) {
337		struct list_lru_one *l;
 
338
339		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
340		if (!l)
341			goto fail;
342
343		init_one_lru(l);
344		memcg_lrus->lru[i] = l;
345	}
346	return 0;
347fail:
348	__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
349	return -ENOMEM;
350}
351
352static int memcg_init_list_lru_node(struct list_lru_node *nlru)
353{
354	struct list_lru_memcg *memcg_lrus;
355	int size = memcg_nr_cache_ids;
356
357	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
358			      size * sizeof(void *), GFP_KERNEL);
359	if (!memcg_lrus)
360		return -ENOMEM;
361
362	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
363		kvfree(memcg_lrus);
364		return -ENOMEM;
365	}
366	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
367
368	return 0;
369}
370
371static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
372{
373	struct list_lru_memcg *memcg_lrus;
374	/*
375	 * This is called when shrinker has already been unregistered,
376	 * and nobody can use it. So, there is no need to use kvfree_rcu_local().
 
 
377	 */
378	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
379	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
380	kvfree(memcg_lrus);
381}
382
383static void kvfree_rcu_local(struct rcu_head *head)
384{
385	struct list_lru_memcg *mlru;
386
387	mlru = container_of(head, struct list_lru_memcg, rcu);
388	kvfree(mlru);
389}
390
391static int memcg_update_list_lru_node(struct list_lru_node *nlru,
392				      int old_size, int new_size)
393{
394	struct list_lru_memcg *old, *new;
 
395
396	BUG_ON(old_size > new_size);
397
398	old = rcu_dereference_protected(nlru->memcg_lrus,
399					lockdep_is_held(&list_lrus_mutex));
400	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
401	if (!new)
402		return -ENOMEM;
403
404	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
405		kvfree(new);
406		return -ENOMEM;
 
407	}
 
 
408
409	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
 
 
 
 
 
410
411	/*
412	 * The locking below allows readers that hold nlru->lock avoid taking
413	 * rcu_read_lock (see list_lru_from_memcg_idx).
414	 *
415	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
416	 * we have to use IRQ-safe primitives here to avoid deadlock.
417	 */
418	spin_lock_irq(&nlru->lock);
419	rcu_assign_pointer(nlru->memcg_lrus, new);
420	spin_unlock_irq(&nlru->lock);
421
422	call_rcu(&old->rcu, kvfree_rcu_local);
423	return 0;
424}
 
425
426static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
427					      int old_size, int new_size)
428{
429	struct list_lru_memcg *memcg_lrus;
430
431	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
432					       lockdep_is_held(&list_lrus_mutex));
433	/* do not bother shrinking the array back to the old size, because we
434	 * cannot handle allocation failures here */
435	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
436}
437
438static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
439{
440	int i;
441
442	lru->memcg_aware = memcg_aware;
443
444	if (!memcg_aware)
445		return 0;
446
447	for_each_node(i) {
448		if (memcg_init_list_lru_node(&lru->node[i]))
449			goto fail;
450	}
451	return 0;
452fail:
453	for (i = i - 1; i >= 0; i--) {
454		if (!lru->node[i].memcg_lrus)
455			continue;
456		memcg_destroy_list_lru_node(&lru->node[i]);
457	}
458	return -ENOMEM;
459}
460
461static void memcg_destroy_list_lru(struct list_lru *lru)
 
462{
463	int i;
464
465	if (!list_lru_memcg_aware(lru))
466		return;
467
468	for_each_node(i)
469		memcg_destroy_list_lru_node(&lru->node[i]);
470}
471
472static int memcg_update_list_lru(struct list_lru *lru,
473				 int old_size, int new_size)
474{
475	int i;
 
 
476
477	if (!list_lru_memcg_aware(lru))
478		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
480	for_each_node(i) {
481		if (memcg_update_list_lru_node(&lru->node[i],
482					       old_size, new_size))
483			goto fail;
484	}
485	return 0;
486fail:
487	for (i = i - 1; i >= 0; i--) {
488		if (!lru->node[i].memcg_lrus)
489			continue;
490
491		memcg_cancel_update_list_lru_node(&lru->node[i],
492						  old_size, new_size);
493	}
494	return -ENOMEM;
495}
496
497static void memcg_cancel_update_list_lru(struct list_lru *lru,
498					 int old_size, int new_size)
499{
500	int i;
501
502	if (!list_lru_memcg_aware(lru))
503		return;
504
505	for_each_node(i)
506		memcg_cancel_update_list_lru_node(&lru->node[i],
507						  old_size, new_size);
508}
509
510int memcg_update_all_list_lrus(int new_size)
 
511{
512	int ret = 0;
513	struct list_lru *lru;
514	int old_size = memcg_nr_cache_ids;
 
 
 
 
515
516	mutex_lock(&list_lrus_mutex);
517	list_for_each_entry(lru, &list_lrus, list) {
518		ret = memcg_update_list_lru(lru, old_size, new_size);
519		if (ret)
520			goto fail;
521	}
522out:
523	mutex_unlock(&list_lrus_mutex);
524	return ret;
525fail:
526	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
527		memcg_cancel_update_list_lru(lru, old_size, new_size);
528	goto out;
529}
530
531static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
532				      int src_idx, struct mem_cgroup *dst_memcg)
533{
534	struct list_lru_node *nlru = &lru->node[nid];
535	int dst_idx = dst_memcg->kmemcg_id;
536	struct list_lru_one *src, *dst;
537	bool set;
538
539	/*
540	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
541	 * we have to use IRQ-safe primitives here to avoid deadlock.
 
542	 */
543	spin_lock_irq(&nlru->lock);
 
 
544
545	src = list_lru_from_memcg_idx(nlru, src_idx);
546	dst = list_lru_from_memcg_idx(nlru, dst_idx);
 
 
 
 
 
 
 
547
548	list_splice_init(&src->list, &dst->list);
549	set = (!dst->nr_items && src->nr_items);
550	dst->nr_items += src->nr_items;
551	if (set)
552		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
553	src->nr_items = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
555	spin_unlock_irq(&nlru->lock);
556}
557
558static void memcg_drain_list_lru(struct list_lru *lru,
559				 int src_idx, struct mem_cgroup *dst_memcg)
560{
561	int i;
562
563	if (!list_lru_memcg_aware(lru))
564		return;
565
566	for_each_node(i)
567		memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
568}
569
570void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
571{
572	struct list_lru *lru;
573
574	mutex_lock(&list_lrus_mutex);
575	list_for_each_entry(lru, &list_lrus, list)
576		memcg_drain_list_lru(lru, src_idx, dst_memcg);
577	mutex_unlock(&list_lrus_mutex);
578}
579#else
580static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
581{
582	return 0;
583}
584
585static void memcg_destroy_list_lru(struct list_lru *lru)
586{
587}
588#endif /* CONFIG_MEMCG_KMEM */
589
590int __list_lru_init(struct list_lru *lru, bool memcg_aware,
591		    struct lock_class_key *key, struct shrinker *shrinker)
592{
593	int i;
594	int err = -ENOMEM;
595
596#ifdef CONFIG_MEMCG_KMEM
597	if (shrinker)
598		lru->shrinker_id = shrinker->id;
599	else
600		lru->shrinker_id = -1;
601#endif
602	memcg_get_cache_ids();
603
604	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
605	if (!lru->node)
606		goto out;
607
608	for_each_node(i) {
609		spin_lock_init(&lru->node[i].lock);
610		if (key)
611			lockdep_set_class(&lru->node[i].lock, key);
612		init_one_lru(&lru->node[i].lru);
613	}
614
615	err = memcg_init_list_lru(lru, memcg_aware);
616	if (err) {
617		kfree(lru->node);
618		/* Do this so a list_lru_destroy() doesn't crash: */
619		lru->node = NULL;
620		goto out;
621	}
622
623	list_lru_register(lru);
624out:
625	memcg_put_cache_ids();
626	return err;
627}
628EXPORT_SYMBOL_GPL(__list_lru_init);
629
630void list_lru_destroy(struct list_lru *lru)
631{
632	/* Already destroyed or not yet initialized? */
633	if (!lru->node)
634		return;
635
636	memcg_get_cache_ids();
637
638	list_lru_unregister(lru);
639
640	memcg_destroy_list_lru(lru);
641	kfree(lru->node);
642	lru->node = NULL;
643
644#ifdef CONFIG_MEMCG_KMEM
645	lru->shrinker_id = -1;
646#endif
647	memcg_put_cache_ids();
648}
649EXPORT_SYMBOL_GPL(list_lru_destroy);