Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/list_lru.h>
12#include <linux/slab.h>
13#include <linux/mutex.h>
14#include <linux/memcontrol.h>
15#include "slab.h"
16
17#ifdef CONFIG_MEMCG_KMEM
18static LIST_HEAD(list_lrus);
19static DEFINE_MUTEX(list_lrus_mutex);
20
21static void list_lru_register(struct list_lru *lru)
22{
23 mutex_lock(&list_lrus_mutex);
24 list_add(&lru->list, &list_lrus);
25 mutex_unlock(&list_lrus_mutex);
26}
27
28static void list_lru_unregister(struct list_lru *lru)
29{
30 mutex_lock(&list_lrus_mutex);
31 list_del(&lru->list);
32 mutex_unlock(&list_lrus_mutex);
33}
34
35static int lru_shrinker_id(struct list_lru *lru)
36{
37 return lru->shrinker_id;
38}
39
40static inline bool list_lru_memcg_aware(struct list_lru *lru)
41{
42 return lru->memcg_aware;
43}
44
45static inline struct list_lru_one *
46list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
47{
48 struct list_lru_memcg *memcg_lrus;
49 /*
50 * Either lock or RCU protects the array of per cgroup lists
51 * from relocation (see memcg_update_list_lru_node).
52 */
53 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
54 lockdep_is_held(&nlru->lock));
55 if (memcg_lrus && idx >= 0)
56 return memcg_lrus->lru[idx];
57 return &nlru->lru;
58}
59
60static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
61{
62 struct page *page;
63
64 if (!memcg_kmem_enabled())
65 return NULL;
66 page = virt_to_head_page(ptr);
67 return memcg_from_slab_page(page);
68}
69
70static inline struct list_lru_one *
71list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
72 struct mem_cgroup **memcg_ptr)
73{
74 struct list_lru_one *l = &nlru->lru;
75 struct mem_cgroup *memcg = NULL;
76
77 if (!nlru->memcg_lrus)
78 goto out;
79
80 memcg = mem_cgroup_from_kmem(ptr);
81 if (!memcg)
82 goto out;
83
84 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
85out:
86 if (memcg_ptr)
87 *memcg_ptr = memcg;
88 return l;
89}
90#else
91static void list_lru_register(struct list_lru *lru)
92{
93}
94
95static void list_lru_unregister(struct list_lru *lru)
96{
97}
98
99static int lru_shrinker_id(struct list_lru *lru)
100{
101 return -1;
102}
103
104static inline bool list_lru_memcg_aware(struct list_lru *lru)
105{
106 return false;
107}
108
109static inline struct list_lru_one *
110list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
111{
112 return &nlru->lru;
113}
114
115static inline struct list_lru_one *
116list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
117 struct mem_cgroup **memcg_ptr)
118{
119 if (memcg_ptr)
120 *memcg_ptr = NULL;
121 return &nlru->lru;
122}
123#endif /* CONFIG_MEMCG_KMEM */
124
125bool list_lru_add(struct list_lru *lru, struct list_head *item)
126{
127 int nid = page_to_nid(virt_to_page(item));
128 struct list_lru_node *nlru = &lru->node[nid];
129 struct mem_cgroup *memcg;
130 struct list_lru_one *l;
131
132 spin_lock(&nlru->lock);
133 if (list_empty(item)) {
134 l = list_lru_from_kmem(nlru, item, &memcg);
135 list_add_tail(item, &l->list);
136 /* Set shrinker bit if the first element was added */
137 if (!l->nr_items++)
138 memcg_set_shrinker_bit(memcg, nid,
139 lru_shrinker_id(lru));
140 nlru->nr_items++;
141 spin_unlock(&nlru->lock);
142 return true;
143 }
144 spin_unlock(&nlru->lock);
145 return false;
146}
147EXPORT_SYMBOL_GPL(list_lru_add);
148
149bool list_lru_del(struct list_lru *lru, struct list_head *item)
150{
151 int nid = page_to_nid(virt_to_page(item));
152 struct list_lru_node *nlru = &lru->node[nid];
153 struct list_lru_one *l;
154
155 spin_lock(&nlru->lock);
156 if (!list_empty(item)) {
157 l = list_lru_from_kmem(nlru, item, NULL);
158 list_del_init(item);
159 l->nr_items--;
160 nlru->nr_items--;
161 spin_unlock(&nlru->lock);
162 return true;
163 }
164 spin_unlock(&nlru->lock);
165 return false;
166}
167EXPORT_SYMBOL_GPL(list_lru_del);
168
169void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
170{
171 list_del_init(item);
172 list->nr_items--;
173}
174EXPORT_SYMBOL_GPL(list_lru_isolate);
175
176void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
177 struct list_head *head)
178{
179 list_move(item, head);
180 list->nr_items--;
181}
182EXPORT_SYMBOL_GPL(list_lru_isolate_move);
183
184unsigned long list_lru_count_one(struct list_lru *lru,
185 int nid, struct mem_cgroup *memcg)
186{
187 struct list_lru_node *nlru = &lru->node[nid];
188 struct list_lru_one *l;
189 unsigned long count;
190
191 rcu_read_lock();
192 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
193 count = l->nr_items;
194 rcu_read_unlock();
195
196 return count;
197}
198EXPORT_SYMBOL_GPL(list_lru_count_one);
199
200unsigned long list_lru_count_node(struct list_lru *lru, int nid)
201{
202 struct list_lru_node *nlru;
203
204 nlru = &lru->node[nid];
205 return nlru->nr_items;
206}
207EXPORT_SYMBOL_GPL(list_lru_count_node);
208
209static unsigned long
210__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
211 list_lru_walk_cb isolate, void *cb_arg,
212 unsigned long *nr_to_walk)
213{
214
215 struct list_lru_one *l;
216 struct list_head *item, *n;
217 unsigned long isolated = 0;
218
219 l = list_lru_from_memcg_idx(nlru, memcg_idx);
220restart:
221 list_for_each_safe(item, n, &l->list) {
222 enum lru_status ret;
223
224 /*
225 * decrement nr_to_walk first so that we don't livelock if we
226 * get stuck on large numbesr of LRU_RETRY items
227 */
228 if (!*nr_to_walk)
229 break;
230 --*nr_to_walk;
231
232 ret = isolate(item, l, &nlru->lock, cb_arg);
233 switch (ret) {
234 case LRU_REMOVED_RETRY:
235 assert_spin_locked(&nlru->lock);
236 /* fall through */
237 case LRU_REMOVED:
238 isolated++;
239 nlru->nr_items--;
240 /*
241 * If the lru lock has been dropped, our list
242 * traversal is now invalid and so we have to
243 * restart from scratch.
244 */
245 if (ret == LRU_REMOVED_RETRY)
246 goto restart;
247 break;
248 case LRU_ROTATE:
249 list_move_tail(item, &l->list);
250 break;
251 case LRU_SKIP:
252 break;
253 case LRU_RETRY:
254 /*
255 * The lru lock has been dropped, our list traversal is
256 * now invalid and so we have to restart from scratch.
257 */
258 assert_spin_locked(&nlru->lock);
259 goto restart;
260 default:
261 BUG();
262 }
263 }
264 return isolated;
265}
266
267unsigned long
268list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
271{
272 struct list_lru_node *nlru = &lru->node[nid];
273 unsigned long ret;
274
275 spin_lock(&nlru->lock);
276 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
277 nr_to_walk);
278 spin_unlock(&nlru->lock);
279 return ret;
280}
281EXPORT_SYMBOL_GPL(list_lru_walk_one);
282
283unsigned long
284list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
285 list_lru_walk_cb isolate, void *cb_arg,
286 unsigned long *nr_to_walk)
287{
288 struct list_lru_node *nlru = &lru->node[nid];
289 unsigned long ret;
290
291 spin_lock_irq(&nlru->lock);
292 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
293 nr_to_walk);
294 spin_unlock_irq(&nlru->lock);
295 return ret;
296}
297
298unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
299 list_lru_walk_cb isolate, void *cb_arg,
300 unsigned long *nr_to_walk)
301{
302 long isolated = 0;
303 int memcg_idx;
304
305 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
306 nr_to_walk);
307 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
308 for_each_memcg_cache_index(memcg_idx) {
309 struct list_lru_node *nlru = &lru->node[nid];
310
311 spin_lock(&nlru->lock);
312 isolated += __list_lru_walk_one(nlru, memcg_idx,
313 isolate, cb_arg,
314 nr_to_walk);
315 spin_unlock(&nlru->lock);
316
317 if (*nr_to_walk <= 0)
318 break;
319 }
320 }
321 return isolated;
322}
323EXPORT_SYMBOL_GPL(list_lru_walk_node);
324
325static void init_one_lru(struct list_lru_one *l)
326{
327 INIT_LIST_HEAD(&l->list);
328 l->nr_items = 0;
329}
330
331#ifdef CONFIG_MEMCG_KMEM
332static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
333 int begin, int end)
334{
335 int i;
336
337 for (i = begin; i < end; i++)
338 kfree(memcg_lrus->lru[i]);
339}
340
341static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
342 int begin, int end)
343{
344 int i;
345
346 for (i = begin; i < end; i++) {
347 struct list_lru_one *l;
348
349 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
350 if (!l)
351 goto fail;
352
353 init_one_lru(l);
354 memcg_lrus->lru[i] = l;
355 }
356 return 0;
357fail:
358 __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
359 return -ENOMEM;
360}
361
362static int memcg_init_list_lru_node(struct list_lru_node *nlru)
363{
364 struct list_lru_memcg *memcg_lrus;
365 int size = memcg_nr_cache_ids;
366
367 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
368 size * sizeof(void *), GFP_KERNEL);
369 if (!memcg_lrus)
370 return -ENOMEM;
371
372 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
373 kvfree(memcg_lrus);
374 return -ENOMEM;
375 }
376 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
377
378 return 0;
379}
380
381static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
382{
383 struct list_lru_memcg *memcg_lrus;
384 /*
385 * This is called when shrinker has already been unregistered,
386 * and nobody can use it. So, there is no need to use kvfree_rcu().
387 */
388 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
389 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
390 kvfree(memcg_lrus);
391}
392
393static void kvfree_rcu(struct rcu_head *head)
394{
395 struct list_lru_memcg *mlru;
396
397 mlru = container_of(head, struct list_lru_memcg, rcu);
398 kvfree(mlru);
399}
400
401static int memcg_update_list_lru_node(struct list_lru_node *nlru,
402 int old_size, int new_size)
403{
404 struct list_lru_memcg *old, *new;
405
406 BUG_ON(old_size > new_size);
407
408 old = rcu_dereference_protected(nlru->memcg_lrus,
409 lockdep_is_held(&list_lrus_mutex));
410 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
411 if (!new)
412 return -ENOMEM;
413
414 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
415 kvfree(new);
416 return -ENOMEM;
417 }
418
419 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
420
421 /*
422 * The locking below allows readers that hold nlru->lock avoid taking
423 * rcu_read_lock (see list_lru_from_memcg_idx).
424 *
425 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
426 * we have to use IRQ-safe primitives here to avoid deadlock.
427 */
428 spin_lock_irq(&nlru->lock);
429 rcu_assign_pointer(nlru->memcg_lrus, new);
430 spin_unlock_irq(&nlru->lock);
431
432 call_rcu(&old->rcu, kvfree_rcu);
433 return 0;
434}
435
436static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
437 int old_size, int new_size)
438{
439 struct list_lru_memcg *memcg_lrus;
440
441 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
442 lockdep_is_held(&list_lrus_mutex));
443 /* do not bother shrinking the array back to the old size, because we
444 * cannot handle allocation failures here */
445 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
446}
447
448static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
449{
450 int i;
451
452 lru->memcg_aware = memcg_aware;
453
454 if (!memcg_aware)
455 return 0;
456
457 for_each_node(i) {
458 if (memcg_init_list_lru_node(&lru->node[i]))
459 goto fail;
460 }
461 return 0;
462fail:
463 for (i = i - 1; i >= 0; i--) {
464 if (!lru->node[i].memcg_lrus)
465 continue;
466 memcg_destroy_list_lru_node(&lru->node[i]);
467 }
468 return -ENOMEM;
469}
470
471static void memcg_destroy_list_lru(struct list_lru *lru)
472{
473 int i;
474
475 if (!list_lru_memcg_aware(lru))
476 return;
477
478 for_each_node(i)
479 memcg_destroy_list_lru_node(&lru->node[i]);
480}
481
482static int memcg_update_list_lru(struct list_lru *lru,
483 int old_size, int new_size)
484{
485 int i;
486
487 if (!list_lru_memcg_aware(lru))
488 return 0;
489
490 for_each_node(i) {
491 if (memcg_update_list_lru_node(&lru->node[i],
492 old_size, new_size))
493 goto fail;
494 }
495 return 0;
496fail:
497 for (i = i - 1; i >= 0; i--) {
498 if (!lru->node[i].memcg_lrus)
499 continue;
500
501 memcg_cancel_update_list_lru_node(&lru->node[i],
502 old_size, new_size);
503 }
504 return -ENOMEM;
505}
506
507static void memcg_cancel_update_list_lru(struct list_lru *lru,
508 int old_size, int new_size)
509{
510 int i;
511
512 if (!list_lru_memcg_aware(lru))
513 return;
514
515 for_each_node(i)
516 memcg_cancel_update_list_lru_node(&lru->node[i],
517 old_size, new_size);
518}
519
520int memcg_update_all_list_lrus(int new_size)
521{
522 int ret = 0;
523 struct list_lru *lru;
524 int old_size = memcg_nr_cache_ids;
525
526 mutex_lock(&list_lrus_mutex);
527 list_for_each_entry(lru, &list_lrus, list) {
528 ret = memcg_update_list_lru(lru, old_size, new_size);
529 if (ret)
530 goto fail;
531 }
532out:
533 mutex_unlock(&list_lrus_mutex);
534 return ret;
535fail:
536 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
537 memcg_cancel_update_list_lru(lru, old_size, new_size);
538 goto out;
539}
540
541static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
542 int src_idx, struct mem_cgroup *dst_memcg)
543{
544 struct list_lru_node *nlru = &lru->node[nid];
545 int dst_idx = dst_memcg->kmemcg_id;
546 struct list_lru_one *src, *dst;
547 bool set;
548
549 /*
550 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
551 * we have to use IRQ-safe primitives here to avoid deadlock.
552 */
553 spin_lock_irq(&nlru->lock);
554
555 src = list_lru_from_memcg_idx(nlru, src_idx);
556 dst = list_lru_from_memcg_idx(nlru, dst_idx);
557
558 list_splice_init(&src->list, &dst->list);
559 set = (!dst->nr_items && src->nr_items);
560 dst->nr_items += src->nr_items;
561 if (set)
562 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
563 src->nr_items = 0;
564
565 spin_unlock_irq(&nlru->lock);
566}
567
568static void memcg_drain_list_lru(struct list_lru *lru,
569 int src_idx, struct mem_cgroup *dst_memcg)
570{
571 int i;
572
573 if (!list_lru_memcg_aware(lru))
574 return;
575
576 for_each_node(i)
577 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
578}
579
580void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
581{
582 struct list_lru *lru;
583
584 mutex_lock(&list_lrus_mutex);
585 list_for_each_entry(lru, &list_lrus, list)
586 memcg_drain_list_lru(lru, src_idx, dst_memcg);
587 mutex_unlock(&list_lrus_mutex);
588}
589#else
590static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
591{
592 return 0;
593}
594
595static void memcg_destroy_list_lru(struct list_lru *lru)
596{
597}
598#endif /* CONFIG_MEMCG_KMEM */
599
600int __list_lru_init(struct list_lru *lru, bool memcg_aware,
601 struct lock_class_key *key, struct shrinker *shrinker)
602{
603 int i;
604 int err = -ENOMEM;
605
606#ifdef CONFIG_MEMCG_KMEM
607 if (shrinker)
608 lru->shrinker_id = shrinker->id;
609 else
610 lru->shrinker_id = -1;
611#endif
612 memcg_get_cache_ids();
613
614 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
615 if (!lru->node)
616 goto out;
617
618 for_each_node(i) {
619 spin_lock_init(&lru->node[i].lock);
620 if (key)
621 lockdep_set_class(&lru->node[i].lock, key);
622 init_one_lru(&lru->node[i].lru);
623 }
624
625 err = memcg_init_list_lru(lru, memcg_aware);
626 if (err) {
627 kfree(lru->node);
628 /* Do this so a list_lru_destroy() doesn't crash: */
629 lru->node = NULL;
630 goto out;
631 }
632
633 list_lru_register(lru);
634out:
635 memcg_put_cache_ids();
636 return err;
637}
638EXPORT_SYMBOL_GPL(__list_lru_init);
639
640void list_lru_destroy(struct list_lru *lru)
641{
642 /* Already destroyed or not yet initialized? */
643 if (!lru->node)
644 return;
645
646 memcg_get_cache_ids();
647
648 list_lru_unregister(lru);
649
650 memcg_destroy_list_lru(lru);
651 kfree(lru->node);
652 lru->node = NULL;
653
654#ifdef CONFIG_MEMCG_KMEM
655 lru->shrinker_id = -1;
656#endif
657 memcg_put_cache_ids();
658}
659EXPORT_SYMBOL_GPL(list_lru_destroy);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/list_lru.h>
12#include <linux/slab.h>
13#include <linux/mutex.h>
14#include <linux/memcontrol.h>
15#include "slab.h"
16
17#ifdef CONFIG_MEMCG_KMEM
18static LIST_HEAD(list_lrus);
19static DEFINE_MUTEX(list_lrus_mutex);
20
21static void list_lru_register(struct list_lru *lru)
22{
23 mutex_lock(&list_lrus_mutex);
24 list_add(&lru->list, &list_lrus);
25 mutex_unlock(&list_lrus_mutex);
26}
27
28static void list_lru_unregister(struct list_lru *lru)
29{
30 mutex_lock(&list_lrus_mutex);
31 list_del(&lru->list);
32 mutex_unlock(&list_lrus_mutex);
33}
34
35static int lru_shrinker_id(struct list_lru *lru)
36{
37 return lru->shrinker_id;
38}
39
40static inline bool list_lru_memcg_aware(struct list_lru *lru)
41{
42 return lru->memcg_aware;
43}
44
45static inline struct list_lru_one *
46list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
47{
48 struct list_lru_memcg *memcg_lrus;
49 /*
50 * Either lock or RCU protects the array of per cgroup lists
51 * from relocation (see memcg_update_list_lru_node).
52 */
53 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
54 lockdep_is_held(&nlru->lock));
55 if (memcg_lrus && idx >= 0)
56 return memcg_lrus->lru[idx];
57 return &nlru->lru;
58}
59
60static inline struct list_lru_one *
61list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
62 struct mem_cgroup **memcg_ptr)
63{
64 struct list_lru_one *l = &nlru->lru;
65 struct mem_cgroup *memcg = NULL;
66
67 if (!nlru->memcg_lrus)
68 goto out;
69
70 memcg = mem_cgroup_from_obj(ptr);
71 if (!memcg)
72 goto out;
73
74 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
75out:
76 if (memcg_ptr)
77 *memcg_ptr = memcg;
78 return l;
79}
80#else
81static void list_lru_register(struct list_lru *lru)
82{
83}
84
85static void list_lru_unregister(struct list_lru *lru)
86{
87}
88
89static int lru_shrinker_id(struct list_lru *lru)
90{
91 return -1;
92}
93
94static inline bool list_lru_memcg_aware(struct list_lru *lru)
95{
96 return false;
97}
98
99static inline struct list_lru_one *
100list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
101{
102 return &nlru->lru;
103}
104
105static inline struct list_lru_one *
106list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
107 struct mem_cgroup **memcg_ptr)
108{
109 if (memcg_ptr)
110 *memcg_ptr = NULL;
111 return &nlru->lru;
112}
113#endif /* CONFIG_MEMCG_KMEM */
114
115bool list_lru_add(struct list_lru *lru, struct list_head *item)
116{
117 int nid = page_to_nid(virt_to_page(item));
118 struct list_lru_node *nlru = &lru->node[nid];
119 struct mem_cgroup *memcg;
120 struct list_lru_one *l;
121
122 spin_lock(&nlru->lock);
123 if (list_empty(item)) {
124 l = list_lru_from_kmem(nlru, item, &memcg);
125 list_add_tail(item, &l->list);
126 /* Set shrinker bit if the first element was added */
127 if (!l->nr_items++)
128 memcg_set_shrinker_bit(memcg, nid,
129 lru_shrinker_id(lru));
130 nlru->nr_items++;
131 spin_unlock(&nlru->lock);
132 return true;
133 }
134 spin_unlock(&nlru->lock);
135 return false;
136}
137EXPORT_SYMBOL_GPL(list_lru_add);
138
139bool list_lru_del(struct list_lru *lru, struct list_head *item)
140{
141 int nid = page_to_nid(virt_to_page(item));
142 struct list_lru_node *nlru = &lru->node[nid];
143 struct list_lru_one *l;
144
145 spin_lock(&nlru->lock);
146 if (!list_empty(item)) {
147 l = list_lru_from_kmem(nlru, item, NULL);
148 list_del_init(item);
149 l->nr_items--;
150 nlru->nr_items--;
151 spin_unlock(&nlru->lock);
152 return true;
153 }
154 spin_unlock(&nlru->lock);
155 return false;
156}
157EXPORT_SYMBOL_GPL(list_lru_del);
158
159void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
160{
161 list_del_init(item);
162 list->nr_items--;
163}
164EXPORT_SYMBOL_GPL(list_lru_isolate);
165
166void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
167 struct list_head *head)
168{
169 list_move(item, head);
170 list->nr_items--;
171}
172EXPORT_SYMBOL_GPL(list_lru_isolate_move);
173
174unsigned long list_lru_count_one(struct list_lru *lru,
175 int nid, struct mem_cgroup *memcg)
176{
177 struct list_lru_node *nlru = &lru->node[nid];
178 struct list_lru_one *l;
179 unsigned long count;
180
181 rcu_read_lock();
182 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
183 count = READ_ONCE(l->nr_items);
184 rcu_read_unlock();
185
186 return count;
187}
188EXPORT_SYMBOL_GPL(list_lru_count_one);
189
190unsigned long list_lru_count_node(struct list_lru *lru, int nid)
191{
192 struct list_lru_node *nlru;
193
194 nlru = &lru->node[nid];
195 return nlru->nr_items;
196}
197EXPORT_SYMBOL_GPL(list_lru_count_node);
198
199static unsigned long
200__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
201 list_lru_walk_cb isolate, void *cb_arg,
202 unsigned long *nr_to_walk)
203{
204
205 struct list_lru_one *l;
206 struct list_head *item, *n;
207 unsigned long isolated = 0;
208
209 l = list_lru_from_memcg_idx(nlru, memcg_idx);
210restart:
211 list_for_each_safe(item, n, &l->list) {
212 enum lru_status ret;
213
214 /*
215 * decrement nr_to_walk first so that we don't livelock if we
216 * get stuck on large numbers of LRU_RETRY items
217 */
218 if (!*nr_to_walk)
219 break;
220 --*nr_to_walk;
221
222 ret = isolate(item, l, &nlru->lock, cb_arg);
223 switch (ret) {
224 case LRU_REMOVED_RETRY:
225 assert_spin_locked(&nlru->lock);
226 fallthrough;
227 case LRU_REMOVED:
228 isolated++;
229 nlru->nr_items--;
230 /*
231 * If the lru lock has been dropped, our list
232 * traversal is now invalid and so we have to
233 * restart from scratch.
234 */
235 if (ret == LRU_REMOVED_RETRY)
236 goto restart;
237 break;
238 case LRU_ROTATE:
239 list_move_tail(item, &l->list);
240 break;
241 case LRU_SKIP:
242 break;
243 case LRU_RETRY:
244 /*
245 * The lru lock has been dropped, our list traversal is
246 * now invalid and so we have to restart from scratch.
247 */
248 assert_spin_locked(&nlru->lock);
249 goto restart;
250 default:
251 BUG();
252 }
253 }
254 return isolated;
255}
256
257unsigned long
258list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
259 list_lru_walk_cb isolate, void *cb_arg,
260 unsigned long *nr_to_walk)
261{
262 struct list_lru_node *nlru = &lru->node[nid];
263 unsigned long ret;
264
265 spin_lock(&nlru->lock);
266 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
267 nr_to_walk);
268 spin_unlock(&nlru->lock);
269 return ret;
270}
271EXPORT_SYMBOL_GPL(list_lru_walk_one);
272
273unsigned long
274list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
275 list_lru_walk_cb isolate, void *cb_arg,
276 unsigned long *nr_to_walk)
277{
278 struct list_lru_node *nlru = &lru->node[nid];
279 unsigned long ret;
280
281 spin_lock_irq(&nlru->lock);
282 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
283 nr_to_walk);
284 spin_unlock_irq(&nlru->lock);
285 return ret;
286}
287
288unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
289 list_lru_walk_cb isolate, void *cb_arg,
290 unsigned long *nr_to_walk)
291{
292 long isolated = 0;
293 int memcg_idx;
294
295 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
296 nr_to_walk);
297 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
298 for_each_memcg_cache_index(memcg_idx) {
299 struct list_lru_node *nlru = &lru->node[nid];
300
301 spin_lock(&nlru->lock);
302 isolated += __list_lru_walk_one(nlru, memcg_idx,
303 isolate, cb_arg,
304 nr_to_walk);
305 spin_unlock(&nlru->lock);
306
307 if (*nr_to_walk <= 0)
308 break;
309 }
310 }
311 return isolated;
312}
313EXPORT_SYMBOL_GPL(list_lru_walk_node);
314
315static void init_one_lru(struct list_lru_one *l)
316{
317 INIT_LIST_HEAD(&l->list);
318 l->nr_items = 0;
319}
320
321#ifdef CONFIG_MEMCG_KMEM
322static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
323 int begin, int end)
324{
325 int i;
326
327 for (i = begin; i < end; i++)
328 kfree(memcg_lrus->lru[i]);
329}
330
331static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
332 int begin, int end)
333{
334 int i;
335
336 for (i = begin; i < end; i++) {
337 struct list_lru_one *l;
338
339 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
340 if (!l)
341 goto fail;
342
343 init_one_lru(l);
344 memcg_lrus->lru[i] = l;
345 }
346 return 0;
347fail:
348 __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
349 return -ENOMEM;
350}
351
352static int memcg_init_list_lru_node(struct list_lru_node *nlru)
353{
354 struct list_lru_memcg *memcg_lrus;
355 int size = memcg_nr_cache_ids;
356
357 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
358 size * sizeof(void *), GFP_KERNEL);
359 if (!memcg_lrus)
360 return -ENOMEM;
361
362 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
363 kvfree(memcg_lrus);
364 return -ENOMEM;
365 }
366 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
367
368 return 0;
369}
370
371static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
372{
373 struct list_lru_memcg *memcg_lrus;
374 /*
375 * This is called when shrinker has already been unregistered,
376 * and nobody can use it. So, there is no need to use kvfree_rcu_local().
377 */
378 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
379 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
380 kvfree(memcg_lrus);
381}
382
383static void kvfree_rcu_local(struct rcu_head *head)
384{
385 struct list_lru_memcg *mlru;
386
387 mlru = container_of(head, struct list_lru_memcg, rcu);
388 kvfree(mlru);
389}
390
391static int memcg_update_list_lru_node(struct list_lru_node *nlru,
392 int old_size, int new_size)
393{
394 struct list_lru_memcg *old, *new;
395
396 BUG_ON(old_size > new_size);
397
398 old = rcu_dereference_protected(nlru->memcg_lrus,
399 lockdep_is_held(&list_lrus_mutex));
400 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
401 if (!new)
402 return -ENOMEM;
403
404 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
405 kvfree(new);
406 return -ENOMEM;
407 }
408
409 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
410
411 /*
412 * The locking below allows readers that hold nlru->lock avoid taking
413 * rcu_read_lock (see list_lru_from_memcg_idx).
414 *
415 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
416 * we have to use IRQ-safe primitives here to avoid deadlock.
417 */
418 spin_lock_irq(&nlru->lock);
419 rcu_assign_pointer(nlru->memcg_lrus, new);
420 spin_unlock_irq(&nlru->lock);
421
422 call_rcu(&old->rcu, kvfree_rcu_local);
423 return 0;
424}
425
426static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
427 int old_size, int new_size)
428{
429 struct list_lru_memcg *memcg_lrus;
430
431 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
432 lockdep_is_held(&list_lrus_mutex));
433 /* do not bother shrinking the array back to the old size, because we
434 * cannot handle allocation failures here */
435 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
436}
437
438static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
439{
440 int i;
441
442 lru->memcg_aware = memcg_aware;
443
444 if (!memcg_aware)
445 return 0;
446
447 for_each_node(i) {
448 if (memcg_init_list_lru_node(&lru->node[i]))
449 goto fail;
450 }
451 return 0;
452fail:
453 for (i = i - 1; i >= 0; i--) {
454 if (!lru->node[i].memcg_lrus)
455 continue;
456 memcg_destroy_list_lru_node(&lru->node[i]);
457 }
458 return -ENOMEM;
459}
460
461static void memcg_destroy_list_lru(struct list_lru *lru)
462{
463 int i;
464
465 if (!list_lru_memcg_aware(lru))
466 return;
467
468 for_each_node(i)
469 memcg_destroy_list_lru_node(&lru->node[i]);
470}
471
472static int memcg_update_list_lru(struct list_lru *lru,
473 int old_size, int new_size)
474{
475 int i;
476
477 if (!list_lru_memcg_aware(lru))
478 return 0;
479
480 for_each_node(i) {
481 if (memcg_update_list_lru_node(&lru->node[i],
482 old_size, new_size))
483 goto fail;
484 }
485 return 0;
486fail:
487 for (i = i - 1; i >= 0; i--) {
488 if (!lru->node[i].memcg_lrus)
489 continue;
490
491 memcg_cancel_update_list_lru_node(&lru->node[i],
492 old_size, new_size);
493 }
494 return -ENOMEM;
495}
496
497static void memcg_cancel_update_list_lru(struct list_lru *lru,
498 int old_size, int new_size)
499{
500 int i;
501
502 if (!list_lru_memcg_aware(lru))
503 return;
504
505 for_each_node(i)
506 memcg_cancel_update_list_lru_node(&lru->node[i],
507 old_size, new_size);
508}
509
510int memcg_update_all_list_lrus(int new_size)
511{
512 int ret = 0;
513 struct list_lru *lru;
514 int old_size = memcg_nr_cache_ids;
515
516 mutex_lock(&list_lrus_mutex);
517 list_for_each_entry(lru, &list_lrus, list) {
518 ret = memcg_update_list_lru(lru, old_size, new_size);
519 if (ret)
520 goto fail;
521 }
522out:
523 mutex_unlock(&list_lrus_mutex);
524 return ret;
525fail:
526 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
527 memcg_cancel_update_list_lru(lru, old_size, new_size);
528 goto out;
529}
530
531static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
532 int src_idx, struct mem_cgroup *dst_memcg)
533{
534 struct list_lru_node *nlru = &lru->node[nid];
535 int dst_idx = dst_memcg->kmemcg_id;
536 struct list_lru_one *src, *dst;
537 bool set;
538
539 /*
540 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
541 * we have to use IRQ-safe primitives here to avoid deadlock.
542 */
543 spin_lock_irq(&nlru->lock);
544
545 src = list_lru_from_memcg_idx(nlru, src_idx);
546 dst = list_lru_from_memcg_idx(nlru, dst_idx);
547
548 list_splice_init(&src->list, &dst->list);
549 set = (!dst->nr_items && src->nr_items);
550 dst->nr_items += src->nr_items;
551 if (set)
552 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
553 src->nr_items = 0;
554
555 spin_unlock_irq(&nlru->lock);
556}
557
558static void memcg_drain_list_lru(struct list_lru *lru,
559 int src_idx, struct mem_cgroup *dst_memcg)
560{
561 int i;
562
563 if (!list_lru_memcg_aware(lru))
564 return;
565
566 for_each_node(i)
567 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
568}
569
570void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
571{
572 struct list_lru *lru;
573
574 mutex_lock(&list_lrus_mutex);
575 list_for_each_entry(lru, &list_lrus, list)
576 memcg_drain_list_lru(lru, src_idx, dst_memcg);
577 mutex_unlock(&list_lrus_mutex);
578}
579#else
580static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
581{
582 return 0;
583}
584
585static void memcg_destroy_list_lru(struct list_lru *lru)
586{
587}
588#endif /* CONFIG_MEMCG_KMEM */
589
590int __list_lru_init(struct list_lru *lru, bool memcg_aware,
591 struct lock_class_key *key, struct shrinker *shrinker)
592{
593 int i;
594 int err = -ENOMEM;
595
596#ifdef CONFIG_MEMCG_KMEM
597 if (shrinker)
598 lru->shrinker_id = shrinker->id;
599 else
600 lru->shrinker_id = -1;
601#endif
602 memcg_get_cache_ids();
603
604 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
605 if (!lru->node)
606 goto out;
607
608 for_each_node(i) {
609 spin_lock_init(&lru->node[i].lock);
610 if (key)
611 lockdep_set_class(&lru->node[i].lock, key);
612 init_one_lru(&lru->node[i].lru);
613 }
614
615 err = memcg_init_list_lru(lru, memcg_aware);
616 if (err) {
617 kfree(lru->node);
618 /* Do this so a list_lru_destroy() doesn't crash: */
619 lru->node = NULL;
620 goto out;
621 }
622
623 list_lru_register(lru);
624out:
625 memcg_put_cache_ids();
626 return err;
627}
628EXPORT_SYMBOL_GPL(__list_lru_init);
629
630void list_lru_destroy(struct list_lru *lru)
631{
632 /* Already destroyed or not yet initialized? */
633 if (!lru->node)
634 return;
635
636 memcg_get_cache_ids();
637
638 list_lru_unregister(lru);
639
640 memcg_destroy_list_lru(lru);
641 kfree(lru->node);
642 lru->node = NULL;
643
644#ifdef CONFIG_MEMCG_KMEM
645 lru->shrinker_id = -1;
646#endif
647 memcg_put_cache_ids();
648}
649EXPORT_SYMBOL_GPL(list_lru_destroy);