Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/list_lru.h>
12#include <linux/slab.h>
13#include <linux/mutex.h>
14#include <linux/memcontrol.h>
15#include "slab.h"
16#include "internal.h"
17
18#ifdef CONFIG_MEMCG_KMEM
19static LIST_HEAD(memcg_list_lrus);
20static DEFINE_MUTEX(list_lrus_mutex);
21
22static inline bool list_lru_memcg_aware(struct list_lru *lru)
23{
24 return lru->memcg_aware;
25}
26
27static void list_lru_register(struct list_lru *lru)
28{
29 if (!list_lru_memcg_aware(lru))
30 return;
31
32 mutex_lock(&list_lrus_mutex);
33 list_add(&lru->list, &memcg_list_lrus);
34 mutex_unlock(&list_lrus_mutex);
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39 if (!list_lru_memcg_aware(lru))
40 return;
41
42 mutex_lock(&list_lrus_mutex);
43 list_del(&lru->list);
44 mutex_unlock(&list_lrus_mutex);
45}
46
47static int lru_shrinker_id(struct list_lru *lru)
48{
49 return lru->shrinker_id;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
54{
55 if (list_lru_memcg_aware(lru) && idx >= 0) {
56 struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
57
58 return mlru ? &mlru->node[nid] : NULL;
59 }
60 return &lru->node[nid].lru;
61}
62#else
63static void list_lru_register(struct list_lru *lru)
64{
65}
66
67static void list_lru_unregister(struct list_lru *lru)
68{
69}
70
71static int lru_shrinker_id(struct list_lru *lru)
72{
73 return -1;
74}
75
76static inline bool list_lru_memcg_aware(struct list_lru *lru)
77{
78 return false;
79}
80
81static inline struct list_lru_one *
82list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
83{
84 return &lru->node[nid].lru;
85}
86#endif /* CONFIG_MEMCG_KMEM */
87
88bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
89 struct mem_cgroup *memcg)
90{
91 struct list_lru_node *nlru = &lru->node[nid];
92 struct list_lru_one *l;
93
94 spin_lock(&nlru->lock);
95 if (list_empty(item)) {
96 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
97 list_add_tail(item, &l->list);
98 /* Set shrinker bit if the first element was added */
99 if (!l->nr_items++)
100 set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
101 nlru->nr_items++;
102 spin_unlock(&nlru->lock);
103 return true;
104 }
105 spin_unlock(&nlru->lock);
106 return false;
107}
108EXPORT_SYMBOL_GPL(list_lru_add);
109
110bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
111{
112 int nid = page_to_nid(virt_to_page(item));
113 struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
114 mem_cgroup_from_slab_obj(item) : NULL;
115
116 return list_lru_add(lru, item, nid, memcg);
117}
118EXPORT_SYMBOL_GPL(list_lru_add_obj);
119
120bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
121 struct mem_cgroup *memcg)
122{
123 struct list_lru_node *nlru = &lru->node[nid];
124 struct list_lru_one *l;
125
126 spin_lock(&nlru->lock);
127 if (!list_empty(item)) {
128 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
129 list_del_init(item);
130 l->nr_items--;
131 nlru->nr_items--;
132 spin_unlock(&nlru->lock);
133 return true;
134 }
135 spin_unlock(&nlru->lock);
136 return false;
137}
138EXPORT_SYMBOL_GPL(list_lru_del);
139
140bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
141{
142 int nid = page_to_nid(virt_to_page(item));
143 struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
144 mem_cgroup_from_slab_obj(item) : NULL;
145
146 return list_lru_del(lru, item, nid, memcg);
147}
148EXPORT_SYMBOL_GPL(list_lru_del_obj);
149
150void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151{
152 list_del_init(item);
153 list->nr_items--;
154}
155EXPORT_SYMBOL_GPL(list_lru_isolate);
156
157void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158 struct list_head *head)
159{
160 list_move(item, head);
161 list->nr_items--;
162}
163EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164
165void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
166 struct mem_cgroup *memcg)
167{
168 struct list_lru_one *list =
169 list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
170
171 if (list_empty(item)) {
172 list_add_tail(item, &list->list);
173 if (!list->nr_items++)
174 set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
175 }
176}
177EXPORT_SYMBOL_GPL(list_lru_putback);
178
179unsigned long list_lru_count_one(struct list_lru *lru,
180 int nid, struct mem_cgroup *memcg)
181{
182 struct list_lru_one *l;
183 long count;
184
185 rcu_read_lock();
186 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
187 count = l ? READ_ONCE(l->nr_items) : 0;
188 rcu_read_unlock();
189
190 if (unlikely(count < 0))
191 count = 0;
192
193 return count;
194}
195EXPORT_SYMBOL_GPL(list_lru_count_one);
196
197unsigned long list_lru_count_node(struct list_lru *lru, int nid)
198{
199 struct list_lru_node *nlru;
200
201 nlru = &lru->node[nid];
202 return nlru->nr_items;
203}
204EXPORT_SYMBOL_GPL(list_lru_count_node);
205
206static unsigned long
207__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
208 list_lru_walk_cb isolate, void *cb_arg,
209 unsigned long *nr_to_walk)
210{
211 struct list_lru_node *nlru = &lru->node[nid];
212 struct list_lru_one *l;
213 struct list_head *item, *n;
214 unsigned long isolated = 0;
215
216restart:
217 l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
218 if (!l)
219 goto out;
220
221 list_for_each_safe(item, n, &l->list) {
222 enum lru_status ret;
223
224 /*
225 * decrement nr_to_walk first so that we don't livelock if we
226 * get stuck on large numbers of LRU_RETRY items
227 */
228 if (!*nr_to_walk)
229 break;
230 --*nr_to_walk;
231
232 ret = isolate(item, l, &nlru->lock, cb_arg);
233 switch (ret) {
234 case LRU_REMOVED_RETRY:
235 assert_spin_locked(&nlru->lock);
236 fallthrough;
237 case LRU_REMOVED:
238 isolated++;
239 nlru->nr_items--;
240 /*
241 * If the lru lock has been dropped, our list
242 * traversal is now invalid and so we have to
243 * restart from scratch.
244 */
245 if (ret == LRU_REMOVED_RETRY)
246 goto restart;
247 break;
248 case LRU_ROTATE:
249 list_move_tail(item, &l->list);
250 break;
251 case LRU_SKIP:
252 break;
253 case LRU_RETRY:
254 /*
255 * The lru lock has been dropped, our list traversal is
256 * now invalid and so we have to restart from scratch.
257 */
258 assert_spin_locked(&nlru->lock);
259 goto restart;
260 default:
261 BUG();
262 }
263 }
264out:
265 return isolated;
266}
267
268unsigned long
269list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
270 list_lru_walk_cb isolate, void *cb_arg,
271 unsigned long *nr_to_walk)
272{
273 struct list_lru_node *nlru = &lru->node[nid];
274 unsigned long ret;
275
276 spin_lock(&nlru->lock);
277 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
278 cb_arg, nr_to_walk);
279 spin_unlock(&nlru->lock);
280 return ret;
281}
282EXPORT_SYMBOL_GPL(list_lru_walk_one);
283
284unsigned long
285list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
286 list_lru_walk_cb isolate, void *cb_arg,
287 unsigned long *nr_to_walk)
288{
289 struct list_lru_node *nlru = &lru->node[nid];
290 unsigned long ret;
291
292 spin_lock_irq(&nlru->lock);
293 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
294 cb_arg, nr_to_walk);
295 spin_unlock_irq(&nlru->lock);
296 return ret;
297}
298
299unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
300 list_lru_walk_cb isolate, void *cb_arg,
301 unsigned long *nr_to_walk)
302{
303 long isolated = 0;
304
305 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
306 nr_to_walk);
307
308#ifdef CONFIG_MEMCG_KMEM
309 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
310 struct list_lru_memcg *mlru;
311 unsigned long index;
312
313 xa_for_each(&lru->xa, index, mlru) {
314 struct list_lru_node *nlru = &lru->node[nid];
315
316 spin_lock(&nlru->lock);
317 isolated += __list_lru_walk_one(lru, nid, index,
318 isolate, cb_arg,
319 nr_to_walk);
320 spin_unlock(&nlru->lock);
321
322 if (*nr_to_walk <= 0)
323 break;
324 }
325 }
326#endif
327
328 return isolated;
329}
330EXPORT_SYMBOL_GPL(list_lru_walk_node);
331
332static void init_one_lru(struct list_lru_one *l)
333{
334 INIT_LIST_HEAD(&l->list);
335 l->nr_items = 0;
336}
337
338#ifdef CONFIG_MEMCG_KMEM
339static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
340{
341 int nid;
342 struct list_lru_memcg *mlru;
343
344 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
345 if (!mlru)
346 return NULL;
347
348 for_each_node(nid)
349 init_one_lru(&mlru->node[nid]);
350
351 return mlru;
352}
353
354static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
355{
356 struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
357
358 /*
359 * The __list_lru_walk_one() can walk the list of this node.
360 * We need kvfree_rcu() here. And the walking of the list
361 * is under lru->node[nid]->lock, which can serve as a RCU
362 * read-side critical section.
363 */
364 if (mlru)
365 kvfree_rcu(mlru, rcu);
366}
367
368static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
369{
370 if (memcg_aware)
371 xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
372 lru->memcg_aware = memcg_aware;
373}
374
375static void memcg_destroy_list_lru(struct list_lru *lru)
376{
377 XA_STATE(xas, &lru->xa, 0);
378 struct list_lru_memcg *mlru;
379
380 if (!list_lru_memcg_aware(lru))
381 return;
382
383 xas_lock_irq(&xas);
384 xas_for_each(&xas, mlru, ULONG_MAX) {
385 kfree(mlru);
386 xas_store(&xas, NULL);
387 }
388 xas_unlock_irq(&xas);
389}
390
391static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
392 int src_idx, struct mem_cgroup *dst_memcg)
393{
394 struct list_lru_node *nlru = &lru->node[nid];
395 int dst_idx = dst_memcg->kmemcg_id;
396 struct list_lru_one *src, *dst;
397
398 /*
399 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
400 * we have to use IRQ-safe primitives here to avoid deadlock.
401 */
402 spin_lock_irq(&nlru->lock);
403
404 src = list_lru_from_memcg_idx(lru, nid, src_idx);
405 if (!src)
406 goto out;
407 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
408
409 list_splice_init(&src->list, &dst->list);
410
411 if (src->nr_items) {
412 dst->nr_items += src->nr_items;
413 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
414 src->nr_items = 0;
415 }
416out:
417 spin_unlock_irq(&nlru->lock);
418}
419
420static void memcg_reparent_list_lru(struct list_lru *lru,
421 int src_idx, struct mem_cgroup *dst_memcg)
422{
423 int i;
424
425 for_each_node(i)
426 memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
427
428 memcg_list_lru_free(lru, src_idx);
429}
430
431void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
432{
433 struct cgroup_subsys_state *css;
434 struct list_lru *lru;
435 int src_idx = memcg->kmemcg_id;
436
437 /*
438 * Change kmemcg_id of this cgroup and all its descendants to the
439 * parent's id, and then move all entries from this cgroup's list_lrus
440 * to ones of the parent.
441 *
442 * After we have finished, all list_lrus corresponding to this cgroup
443 * are guaranteed to remain empty. So we can safely free this cgroup's
444 * list lrus in memcg_list_lru_free().
445 *
446 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
447 * from allocating list lrus for this cgroup after memcg_list_lru_free()
448 * call.
449 */
450 rcu_read_lock();
451 css_for_each_descendant_pre(css, &memcg->css) {
452 struct mem_cgroup *child;
453
454 child = mem_cgroup_from_css(css);
455 WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
456 }
457 rcu_read_unlock();
458
459 mutex_lock(&list_lrus_mutex);
460 list_for_each_entry(lru, &memcg_list_lrus, list)
461 memcg_reparent_list_lru(lru, src_idx, parent);
462 mutex_unlock(&list_lrus_mutex);
463}
464
465static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
466 struct list_lru *lru)
467{
468 int idx = memcg->kmemcg_id;
469
470 return idx < 0 || xa_load(&lru->xa, idx);
471}
472
473int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
474 gfp_t gfp)
475{
476 int i;
477 unsigned long flags;
478 struct list_lru_memcg_table {
479 struct list_lru_memcg *mlru;
480 struct mem_cgroup *memcg;
481 } *table;
482 XA_STATE(xas, &lru->xa, 0);
483
484 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
485 return 0;
486
487 gfp &= GFP_RECLAIM_MASK;
488 table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
489 if (!table)
490 return -ENOMEM;
491
492 /*
493 * Because the list_lru can be reparented to the parent cgroup's
494 * list_lru, we should make sure that this cgroup and all its
495 * ancestors have allocated list_lru_memcg.
496 */
497 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
498 if (memcg_list_lru_allocated(memcg, lru))
499 break;
500
501 table[i].memcg = memcg;
502 table[i].mlru = memcg_init_list_lru_one(gfp);
503 if (!table[i].mlru) {
504 while (i--)
505 kfree(table[i].mlru);
506 kfree(table);
507 return -ENOMEM;
508 }
509 }
510
511 xas_lock_irqsave(&xas, flags);
512 while (i--) {
513 int index = READ_ONCE(table[i].memcg->kmemcg_id);
514 struct list_lru_memcg *mlru = table[i].mlru;
515
516 xas_set(&xas, index);
517retry:
518 if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
519 kfree(mlru);
520 } else {
521 xas_store(&xas, mlru);
522 if (xas_error(&xas) == -ENOMEM) {
523 xas_unlock_irqrestore(&xas, flags);
524 if (xas_nomem(&xas, gfp))
525 xas_set_err(&xas, 0);
526 xas_lock_irqsave(&xas, flags);
527 /*
528 * The xas lock has been released, this memcg
529 * can be reparented before us. So reload
530 * memcg id. More details see the comments
531 * in memcg_reparent_list_lrus().
532 */
533 index = READ_ONCE(table[i].memcg->kmemcg_id);
534 if (index < 0)
535 xas_set_err(&xas, 0);
536 else if (!xas_error(&xas) && index != xas.xa_index)
537 xas_set(&xas, index);
538 goto retry;
539 }
540 }
541 }
542 /* xas_nomem() is used to free memory instead of memory allocation. */
543 if (xas.xa_alloc)
544 xas_nomem(&xas, gfp);
545 xas_unlock_irqrestore(&xas, flags);
546 kfree(table);
547
548 return xas_error(&xas);
549}
550#else
551static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
552{
553}
554
555static void memcg_destroy_list_lru(struct list_lru *lru)
556{
557}
558#endif /* CONFIG_MEMCG_KMEM */
559
560int __list_lru_init(struct list_lru *lru, bool memcg_aware,
561 struct lock_class_key *key, struct shrinker *shrinker)
562{
563 int i;
564
565#ifdef CONFIG_MEMCG_KMEM
566 if (shrinker)
567 lru->shrinker_id = shrinker->id;
568 else
569 lru->shrinker_id = -1;
570#endif
571
572 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
573 if (!lru->node)
574 return -ENOMEM;
575
576 for_each_node(i) {
577 spin_lock_init(&lru->node[i].lock);
578 if (key)
579 lockdep_set_class(&lru->node[i].lock, key);
580 init_one_lru(&lru->node[i].lru);
581 }
582
583 memcg_init_list_lru(lru, memcg_aware);
584 list_lru_register(lru);
585
586 return 0;
587}
588EXPORT_SYMBOL_GPL(__list_lru_init);
589
590void list_lru_destroy(struct list_lru *lru)
591{
592 /* Already destroyed or not yet initialized? */
593 if (!lru->node)
594 return;
595
596 list_lru_unregister(lru);
597
598 memcg_destroy_list_lru(lru);
599 kfree(lru->node);
600 lru->node = NULL;
601
602#ifdef CONFIG_MEMCG_KMEM
603 lru->shrinker_id = -1;
604#endif
605}
606EXPORT_SYMBOL_GPL(list_lru_destroy);
1/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/list_lru.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13#include <linux/memcontrol.h>
14
15#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
16static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
32#else
33static void list_lru_register(struct list_lru *lru)
34{
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39}
40#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
41
42#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{
45 /*
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
48 */
49 return !!lru->node[0].memcg_lrus;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
54{
55 /*
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
58 */
59 lockdep_assert_held(&nlru->lock);
60 if (nlru->memcg_lrus && idx >= 0)
61 return nlru->memcg_lrus->lru[idx];
62
63 return &nlru->lru;
64}
65
66static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
67{
68 struct page *page;
69
70 if (!memcg_kmem_enabled())
71 return NULL;
72 page = virt_to_head_page(ptr);
73 return page->mem_cgroup;
74}
75
76static inline struct list_lru_one *
77list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
78{
79 struct mem_cgroup *memcg;
80
81 if (!nlru->memcg_lrus)
82 return &nlru->lru;
83
84 memcg = mem_cgroup_from_kmem(ptr);
85 if (!memcg)
86 return &nlru->lru;
87
88 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
89}
90#else
91static inline bool list_lru_memcg_aware(struct list_lru *lru)
92{
93 return false;
94}
95
96static inline struct list_lru_one *
97list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
98{
99 return &nlru->lru;
100}
101
102static inline struct list_lru_one *
103list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
104{
105 return &nlru->lru;
106}
107#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
108
109bool list_lru_add(struct list_lru *lru, struct list_head *item)
110{
111 int nid = page_to_nid(virt_to_page(item));
112 struct list_lru_node *nlru = &lru->node[nid];
113 struct list_lru_one *l;
114
115 spin_lock(&nlru->lock);
116 if (list_empty(item)) {
117 l = list_lru_from_kmem(nlru, item);
118 list_add_tail(item, &l->list);
119 l->nr_items++;
120 spin_unlock(&nlru->lock);
121 return true;
122 }
123 spin_unlock(&nlru->lock);
124 return false;
125}
126EXPORT_SYMBOL_GPL(list_lru_add);
127
128bool list_lru_del(struct list_lru *lru, struct list_head *item)
129{
130 int nid = page_to_nid(virt_to_page(item));
131 struct list_lru_node *nlru = &lru->node[nid];
132 struct list_lru_one *l;
133
134 spin_lock(&nlru->lock);
135 if (!list_empty(item)) {
136 l = list_lru_from_kmem(nlru, item);
137 list_del_init(item);
138 l->nr_items--;
139 spin_unlock(&nlru->lock);
140 return true;
141 }
142 spin_unlock(&nlru->lock);
143 return false;
144}
145EXPORT_SYMBOL_GPL(list_lru_del);
146
147void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
148{
149 list_del_init(item);
150 list->nr_items--;
151}
152EXPORT_SYMBOL_GPL(list_lru_isolate);
153
154void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
155 struct list_head *head)
156{
157 list_move(item, head);
158 list->nr_items--;
159}
160EXPORT_SYMBOL_GPL(list_lru_isolate_move);
161
162static unsigned long __list_lru_count_one(struct list_lru *lru,
163 int nid, int memcg_idx)
164{
165 struct list_lru_node *nlru = &lru->node[nid];
166 struct list_lru_one *l;
167 unsigned long count;
168
169 spin_lock(&nlru->lock);
170 l = list_lru_from_memcg_idx(nlru, memcg_idx);
171 count = l->nr_items;
172 spin_unlock(&nlru->lock);
173
174 return count;
175}
176
177unsigned long list_lru_count_one(struct list_lru *lru,
178 int nid, struct mem_cgroup *memcg)
179{
180 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
181}
182EXPORT_SYMBOL_GPL(list_lru_count_one);
183
184unsigned long list_lru_count_node(struct list_lru *lru, int nid)
185{
186 long count = 0;
187 int memcg_idx;
188
189 count += __list_lru_count_one(lru, nid, -1);
190 if (list_lru_memcg_aware(lru)) {
191 for_each_memcg_cache_index(memcg_idx)
192 count += __list_lru_count_one(lru, nid, memcg_idx);
193 }
194 return count;
195}
196EXPORT_SYMBOL_GPL(list_lru_count_node);
197
198static unsigned long
199__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
200 list_lru_walk_cb isolate, void *cb_arg,
201 unsigned long *nr_to_walk)
202{
203
204 struct list_lru_node *nlru = &lru->node[nid];
205 struct list_lru_one *l;
206 struct list_head *item, *n;
207 unsigned long isolated = 0;
208
209 spin_lock(&nlru->lock);
210 l = list_lru_from_memcg_idx(nlru, memcg_idx);
211restart:
212 list_for_each_safe(item, n, &l->list) {
213 enum lru_status ret;
214
215 /*
216 * decrement nr_to_walk first so that we don't livelock if we
217 * get stuck on large numbesr of LRU_RETRY items
218 */
219 if (!*nr_to_walk)
220 break;
221 --*nr_to_walk;
222
223 ret = isolate(item, l, &nlru->lock, cb_arg);
224 switch (ret) {
225 case LRU_REMOVED_RETRY:
226 assert_spin_locked(&nlru->lock);
227 case LRU_REMOVED:
228 isolated++;
229 /*
230 * If the lru lock has been dropped, our list
231 * traversal is now invalid and so we have to
232 * restart from scratch.
233 */
234 if (ret == LRU_REMOVED_RETRY)
235 goto restart;
236 break;
237 case LRU_ROTATE:
238 list_move_tail(item, &l->list);
239 break;
240 case LRU_SKIP:
241 break;
242 case LRU_RETRY:
243 /*
244 * The lru lock has been dropped, our list traversal is
245 * now invalid and so we have to restart from scratch.
246 */
247 assert_spin_locked(&nlru->lock);
248 goto restart;
249 default:
250 BUG();
251 }
252 }
253
254 spin_unlock(&nlru->lock);
255 return isolated;
256}
257
258unsigned long
259list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
260 list_lru_walk_cb isolate, void *cb_arg,
261 unsigned long *nr_to_walk)
262{
263 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
264 isolate, cb_arg, nr_to_walk);
265}
266EXPORT_SYMBOL_GPL(list_lru_walk_one);
267
268unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
271{
272 long isolated = 0;
273 int memcg_idx;
274
275 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
276 nr_to_walk);
277 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
278 for_each_memcg_cache_index(memcg_idx) {
279 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
280 isolate, cb_arg, nr_to_walk);
281 if (*nr_to_walk <= 0)
282 break;
283 }
284 }
285 return isolated;
286}
287EXPORT_SYMBOL_GPL(list_lru_walk_node);
288
289static void init_one_lru(struct list_lru_one *l)
290{
291 INIT_LIST_HEAD(&l->list);
292 l->nr_items = 0;
293}
294
295#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
296static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
297 int begin, int end)
298{
299 int i;
300
301 for (i = begin; i < end; i++)
302 kfree(memcg_lrus->lru[i]);
303}
304
305static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
306 int begin, int end)
307{
308 int i;
309
310 for (i = begin; i < end; i++) {
311 struct list_lru_one *l;
312
313 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
314 if (!l)
315 goto fail;
316
317 init_one_lru(l);
318 memcg_lrus->lru[i] = l;
319 }
320 return 0;
321fail:
322 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
323 return -ENOMEM;
324}
325
326static int memcg_init_list_lru_node(struct list_lru_node *nlru)
327{
328 int size = memcg_nr_cache_ids;
329
330 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
331 if (!nlru->memcg_lrus)
332 return -ENOMEM;
333
334 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
335 kfree(nlru->memcg_lrus);
336 return -ENOMEM;
337 }
338
339 return 0;
340}
341
342static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
343{
344 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
345 kfree(nlru->memcg_lrus);
346}
347
348static int memcg_update_list_lru_node(struct list_lru_node *nlru,
349 int old_size, int new_size)
350{
351 struct list_lru_memcg *old, *new;
352
353 BUG_ON(old_size > new_size);
354
355 old = nlru->memcg_lrus;
356 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
357 if (!new)
358 return -ENOMEM;
359
360 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
361 kfree(new);
362 return -ENOMEM;
363 }
364
365 memcpy(new, old, old_size * sizeof(void *));
366
367 /*
368 * The lock guarantees that we won't race with a reader
369 * (see list_lru_from_memcg_idx).
370 *
371 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
372 * we have to use IRQ-safe primitives here to avoid deadlock.
373 */
374 spin_lock_irq(&nlru->lock);
375 nlru->memcg_lrus = new;
376 spin_unlock_irq(&nlru->lock);
377
378 kfree(old);
379 return 0;
380}
381
382static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
383 int old_size, int new_size)
384{
385 /* do not bother shrinking the array back to the old size, because we
386 * cannot handle allocation failures here */
387 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
388}
389
390static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
391{
392 int i;
393
394 if (!memcg_aware)
395 return 0;
396
397 for_each_node(i) {
398 if (memcg_init_list_lru_node(&lru->node[i]))
399 goto fail;
400 }
401 return 0;
402fail:
403 for (i = i - 1; i >= 0; i--) {
404 if (!lru->node[i].memcg_lrus)
405 continue;
406 memcg_destroy_list_lru_node(&lru->node[i]);
407 }
408 return -ENOMEM;
409}
410
411static void memcg_destroy_list_lru(struct list_lru *lru)
412{
413 int i;
414
415 if (!list_lru_memcg_aware(lru))
416 return;
417
418 for_each_node(i)
419 memcg_destroy_list_lru_node(&lru->node[i]);
420}
421
422static int memcg_update_list_lru(struct list_lru *lru,
423 int old_size, int new_size)
424{
425 int i;
426
427 if (!list_lru_memcg_aware(lru))
428 return 0;
429
430 for_each_node(i) {
431 if (memcg_update_list_lru_node(&lru->node[i],
432 old_size, new_size))
433 goto fail;
434 }
435 return 0;
436fail:
437 for (i = i - 1; i >= 0; i--) {
438 if (!lru->node[i].memcg_lrus)
439 continue;
440
441 memcg_cancel_update_list_lru_node(&lru->node[i],
442 old_size, new_size);
443 }
444 return -ENOMEM;
445}
446
447static void memcg_cancel_update_list_lru(struct list_lru *lru,
448 int old_size, int new_size)
449{
450 int i;
451
452 if (!list_lru_memcg_aware(lru))
453 return;
454
455 for_each_node(i)
456 memcg_cancel_update_list_lru_node(&lru->node[i],
457 old_size, new_size);
458}
459
460int memcg_update_all_list_lrus(int new_size)
461{
462 int ret = 0;
463 struct list_lru *lru;
464 int old_size = memcg_nr_cache_ids;
465
466 mutex_lock(&list_lrus_mutex);
467 list_for_each_entry(lru, &list_lrus, list) {
468 ret = memcg_update_list_lru(lru, old_size, new_size);
469 if (ret)
470 goto fail;
471 }
472out:
473 mutex_unlock(&list_lrus_mutex);
474 return ret;
475fail:
476 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
477 memcg_cancel_update_list_lru(lru, old_size, new_size);
478 goto out;
479}
480
481static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
482 int src_idx, int dst_idx)
483{
484 struct list_lru_one *src, *dst;
485
486 /*
487 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
488 * we have to use IRQ-safe primitives here to avoid deadlock.
489 */
490 spin_lock_irq(&nlru->lock);
491
492 src = list_lru_from_memcg_idx(nlru, src_idx);
493 dst = list_lru_from_memcg_idx(nlru, dst_idx);
494
495 list_splice_init(&src->list, &dst->list);
496 dst->nr_items += src->nr_items;
497 src->nr_items = 0;
498
499 spin_unlock_irq(&nlru->lock);
500}
501
502static void memcg_drain_list_lru(struct list_lru *lru,
503 int src_idx, int dst_idx)
504{
505 int i;
506
507 if (!list_lru_memcg_aware(lru))
508 return;
509
510 for_each_node(i)
511 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
512}
513
514void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
515{
516 struct list_lru *lru;
517
518 mutex_lock(&list_lrus_mutex);
519 list_for_each_entry(lru, &list_lrus, list)
520 memcg_drain_list_lru(lru, src_idx, dst_idx);
521 mutex_unlock(&list_lrus_mutex);
522}
523#else
524static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
525{
526 return 0;
527}
528
529static void memcg_destroy_list_lru(struct list_lru *lru)
530{
531}
532#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
533
534int __list_lru_init(struct list_lru *lru, bool memcg_aware,
535 struct lock_class_key *key)
536{
537 int i;
538 size_t size = sizeof(*lru->node) * nr_node_ids;
539 int err = -ENOMEM;
540
541 memcg_get_cache_ids();
542
543 lru->node = kzalloc(size, GFP_KERNEL);
544 if (!lru->node)
545 goto out;
546
547 for_each_node(i) {
548 spin_lock_init(&lru->node[i].lock);
549 if (key)
550 lockdep_set_class(&lru->node[i].lock, key);
551 init_one_lru(&lru->node[i].lru);
552 }
553
554 err = memcg_init_list_lru(lru, memcg_aware);
555 if (err) {
556 kfree(lru->node);
557 goto out;
558 }
559
560 list_lru_register(lru);
561out:
562 memcg_put_cache_ids();
563 return err;
564}
565EXPORT_SYMBOL_GPL(__list_lru_init);
566
567void list_lru_destroy(struct list_lru *lru)
568{
569 /* Already destroyed or not yet initialized? */
570 if (!lru->node)
571 return;
572
573 memcg_get_cache_ids();
574
575 list_lru_unregister(lru);
576
577 memcg_destroy_list_lru(lru);
578 kfree(lru->node);
579 lru->node = NULL;
580
581 memcg_put_cache_ids();
582}
583EXPORT_SYMBOL_GPL(list_lru_destroy);