Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/list_lru.h>
12#include <linux/slab.h>
13#include <linux/mutex.h>
14#include <linux/memcontrol.h>
15#include "slab.h"
16
17#ifdef CONFIG_MEMCG_KMEM
18static LIST_HEAD(list_lrus);
19static DEFINE_MUTEX(list_lrus_mutex);
20
21static void list_lru_register(struct list_lru *lru)
22{
23 mutex_lock(&list_lrus_mutex);
24 list_add(&lru->list, &list_lrus);
25 mutex_unlock(&list_lrus_mutex);
26}
27
28static void list_lru_unregister(struct list_lru *lru)
29{
30 mutex_lock(&list_lrus_mutex);
31 list_del(&lru->list);
32 mutex_unlock(&list_lrus_mutex);
33}
34
35static int lru_shrinker_id(struct list_lru *lru)
36{
37 return lru->shrinker_id;
38}
39
40static inline bool list_lru_memcg_aware(struct list_lru *lru)
41{
42 return lru->memcg_aware;
43}
44
45static inline struct list_lru_one *
46list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
47{
48 struct list_lru_memcg *memcg_lrus;
49 /*
50 * Either lock or RCU protects the array of per cgroup lists
51 * from relocation (see memcg_update_list_lru_node).
52 */
53 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
54 lockdep_is_held(&nlru->lock));
55 if (memcg_lrus && idx >= 0)
56 return memcg_lrus->lru[idx];
57 return &nlru->lru;
58}
59
60static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
61{
62 struct page *page;
63
64 if (!memcg_kmem_enabled())
65 return NULL;
66 page = virt_to_head_page(ptr);
67 return memcg_from_slab_page(page);
68}
69
70static inline struct list_lru_one *
71list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
72 struct mem_cgroup **memcg_ptr)
73{
74 struct list_lru_one *l = &nlru->lru;
75 struct mem_cgroup *memcg = NULL;
76
77 if (!nlru->memcg_lrus)
78 goto out;
79
80 memcg = mem_cgroup_from_kmem(ptr);
81 if (!memcg)
82 goto out;
83
84 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
85out:
86 if (memcg_ptr)
87 *memcg_ptr = memcg;
88 return l;
89}
90#else
91static void list_lru_register(struct list_lru *lru)
92{
93}
94
95static void list_lru_unregister(struct list_lru *lru)
96{
97}
98
99static int lru_shrinker_id(struct list_lru *lru)
100{
101 return -1;
102}
103
104static inline bool list_lru_memcg_aware(struct list_lru *lru)
105{
106 return false;
107}
108
109static inline struct list_lru_one *
110list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
111{
112 return &nlru->lru;
113}
114
115static inline struct list_lru_one *
116list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
117 struct mem_cgroup **memcg_ptr)
118{
119 if (memcg_ptr)
120 *memcg_ptr = NULL;
121 return &nlru->lru;
122}
123#endif /* CONFIG_MEMCG_KMEM */
124
125bool list_lru_add(struct list_lru *lru, struct list_head *item)
126{
127 int nid = page_to_nid(virt_to_page(item));
128 struct list_lru_node *nlru = &lru->node[nid];
129 struct mem_cgroup *memcg;
130 struct list_lru_one *l;
131
132 spin_lock(&nlru->lock);
133 if (list_empty(item)) {
134 l = list_lru_from_kmem(nlru, item, &memcg);
135 list_add_tail(item, &l->list);
136 /* Set shrinker bit if the first element was added */
137 if (!l->nr_items++)
138 memcg_set_shrinker_bit(memcg, nid,
139 lru_shrinker_id(lru));
140 nlru->nr_items++;
141 spin_unlock(&nlru->lock);
142 return true;
143 }
144 spin_unlock(&nlru->lock);
145 return false;
146}
147EXPORT_SYMBOL_GPL(list_lru_add);
148
149bool list_lru_del(struct list_lru *lru, struct list_head *item)
150{
151 int nid = page_to_nid(virt_to_page(item));
152 struct list_lru_node *nlru = &lru->node[nid];
153 struct list_lru_one *l;
154
155 spin_lock(&nlru->lock);
156 if (!list_empty(item)) {
157 l = list_lru_from_kmem(nlru, item, NULL);
158 list_del_init(item);
159 l->nr_items--;
160 nlru->nr_items--;
161 spin_unlock(&nlru->lock);
162 return true;
163 }
164 spin_unlock(&nlru->lock);
165 return false;
166}
167EXPORT_SYMBOL_GPL(list_lru_del);
168
169void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
170{
171 list_del_init(item);
172 list->nr_items--;
173}
174EXPORT_SYMBOL_GPL(list_lru_isolate);
175
176void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
177 struct list_head *head)
178{
179 list_move(item, head);
180 list->nr_items--;
181}
182EXPORT_SYMBOL_GPL(list_lru_isolate_move);
183
184unsigned long list_lru_count_one(struct list_lru *lru,
185 int nid, struct mem_cgroup *memcg)
186{
187 struct list_lru_node *nlru = &lru->node[nid];
188 struct list_lru_one *l;
189 unsigned long count;
190
191 rcu_read_lock();
192 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
193 count = l->nr_items;
194 rcu_read_unlock();
195
196 return count;
197}
198EXPORT_SYMBOL_GPL(list_lru_count_one);
199
200unsigned long list_lru_count_node(struct list_lru *lru, int nid)
201{
202 struct list_lru_node *nlru;
203
204 nlru = &lru->node[nid];
205 return nlru->nr_items;
206}
207EXPORT_SYMBOL_GPL(list_lru_count_node);
208
209static unsigned long
210__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
211 list_lru_walk_cb isolate, void *cb_arg,
212 unsigned long *nr_to_walk)
213{
214
215 struct list_lru_one *l;
216 struct list_head *item, *n;
217 unsigned long isolated = 0;
218
219 l = list_lru_from_memcg_idx(nlru, memcg_idx);
220restart:
221 list_for_each_safe(item, n, &l->list) {
222 enum lru_status ret;
223
224 /*
225 * decrement nr_to_walk first so that we don't livelock if we
226 * get stuck on large numbesr of LRU_RETRY items
227 */
228 if (!*nr_to_walk)
229 break;
230 --*nr_to_walk;
231
232 ret = isolate(item, l, &nlru->lock, cb_arg);
233 switch (ret) {
234 case LRU_REMOVED_RETRY:
235 assert_spin_locked(&nlru->lock);
236 /* fall through */
237 case LRU_REMOVED:
238 isolated++;
239 nlru->nr_items--;
240 /*
241 * If the lru lock has been dropped, our list
242 * traversal is now invalid and so we have to
243 * restart from scratch.
244 */
245 if (ret == LRU_REMOVED_RETRY)
246 goto restart;
247 break;
248 case LRU_ROTATE:
249 list_move_tail(item, &l->list);
250 break;
251 case LRU_SKIP:
252 break;
253 case LRU_RETRY:
254 /*
255 * The lru lock has been dropped, our list traversal is
256 * now invalid and so we have to restart from scratch.
257 */
258 assert_spin_locked(&nlru->lock);
259 goto restart;
260 default:
261 BUG();
262 }
263 }
264 return isolated;
265}
266
267unsigned long
268list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
271{
272 struct list_lru_node *nlru = &lru->node[nid];
273 unsigned long ret;
274
275 spin_lock(&nlru->lock);
276 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
277 nr_to_walk);
278 spin_unlock(&nlru->lock);
279 return ret;
280}
281EXPORT_SYMBOL_GPL(list_lru_walk_one);
282
283unsigned long
284list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
285 list_lru_walk_cb isolate, void *cb_arg,
286 unsigned long *nr_to_walk)
287{
288 struct list_lru_node *nlru = &lru->node[nid];
289 unsigned long ret;
290
291 spin_lock_irq(&nlru->lock);
292 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
293 nr_to_walk);
294 spin_unlock_irq(&nlru->lock);
295 return ret;
296}
297
298unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
299 list_lru_walk_cb isolate, void *cb_arg,
300 unsigned long *nr_to_walk)
301{
302 long isolated = 0;
303 int memcg_idx;
304
305 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
306 nr_to_walk);
307 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
308 for_each_memcg_cache_index(memcg_idx) {
309 struct list_lru_node *nlru = &lru->node[nid];
310
311 spin_lock(&nlru->lock);
312 isolated += __list_lru_walk_one(nlru, memcg_idx,
313 isolate, cb_arg,
314 nr_to_walk);
315 spin_unlock(&nlru->lock);
316
317 if (*nr_to_walk <= 0)
318 break;
319 }
320 }
321 return isolated;
322}
323EXPORT_SYMBOL_GPL(list_lru_walk_node);
324
325static void init_one_lru(struct list_lru_one *l)
326{
327 INIT_LIST_HEAD(&l->list);
328 l->nr_items = 0;
329}
330
331#ifdef CONFIG_MEMCG_KMEM
332static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
333 int begin, int end)
334{
335 int i;
336
337 for (i = begin; i < end; i++)
338 kfree(memcg_lrus->lru[i]);
339}
340
341static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
342 int begin, int end)
343{
344 int i;
345
346 for (i = begin; i < end; i++) {
347 struct list_lru_one *l;
348
349 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
350 if (!l)
351 goto fail;
352
353 init_one_lru(l);
354 memcg_lrus->lru[i] = l;
355 }
356 return 0;
357fail:
358 __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
359 return -ENOMEM;
360}
361
362static int memcg_init_list_lru_node(struct list_lru_node *nlru)
363{
364 struct list_lru_memcg *memcg_lrus;
365 int size = memcg_nr_cache_ids;
366
367 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
368 size * sizeof(void *), GFP_KERNEL);
369 if (!memcg_lrus)
370 return -ENOMEM;
371
372 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
373 kvfree(memcg_lrus);
374 return -ENOMEM;
375 }
376 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
377
378 return 0;
379}
380
381static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
382{
383 struct list_lru_memcg *memcg_lrus;
384 /*
385 * This is called when shrinker has already been unregistered,
386 * and nobody can use it. So, there is no need to use kvfree_rcu().
387 */
388 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
389 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
390 kvfree(memcg_lrus);
391}
392
393static void kvfree_rcu(struct rcu_head *head)
394{
395 struct list_lru_memcg *mlru;
396
397 mlru = container_of(head, struct list_lru_memcg, rcu);
398 kvfree(mlru);
399}
400
401static int memcg_update_list_lru_node(struct list_lru_node *nlru,
402 int old_size, int new_size)
403{
404 struct list_lru_memcg *old, *new;
405
406 BUG_ON(old_size > new_size);
407
408 old = rcu_dereference_protected(nlru->memcg_lrus,
409 lockdep_is_held(&list_lrus_mutex));
410 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
411 if (!new)
412 return -ENOMEM;
413
414 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
415 kvfree(new);
416 return -ENOMEM;
417 }
418
419 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
420
421 /*
422 * The locking below allows readers that hold nlru->lock avoid taking
423 * rcu_read_lock (see list_lru_from_memcg_idx).
424 *
425 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
426 * we have to use IRQ-safe primitives here to avoid deadlock.
427 */
428 spin_lock_irq(&nlru->lock);
429 rcu_assign_pointer(nlru->memcg_lrus, new);
430 spin_unlock_irq(&nlru->lock);
431
432 call_rcu(&old->rcu, kvfree_rcu);
433 return 0;
434}
435
436static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
437 int old_size, int new_size)
438{
439 struct list_lru_memcg *memcg_lrus;
440
441 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
442 lockdep_is_held(&list_lrus_mutex));
443 /* do not bother shrinking the array back to the old size, because we
444 * cannot handle allocation failures here */
445 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
446}
447
448static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
449{
450 int i;
451
452 lru->memcg_aware = memcg_aware;
453
454 if (!memcg_aware)
455 return 0;
456
457 for_each_node(i) {
458 if (memcg_init_list_lru_node(&lru->node[i]))
459 goto fail;
460 }
461 return 0;
462fail:
463 for (i = i - 1; i >= 0; i--) {
464 if (!lru->node[i].memcg_lrus)
465 continue;
466 memcg_destroy_list_lru_node(&lru->node[i]);
467 }
468 return -ENOMEM;
469}
470
471static void memcg_destroy_list_lru(struct list_lru *lru)
472{
473 int i;
474
475 if (!list_lru_memcg_aware(lru))
476 return;
477
478 for_each_node(i)
479 memcg_destroy_list_lru_node(&lru->node[i]);
480}
481
482static int memcg_update_list_lru(struct list_lru *lru,
483 int old_size, int new_size)
484{
485 int i;
486
487 if (!list_lru_memcg_aware(lru))
488 return 0;
489
490 for_each_node(i) {
491 if (memcg_update_list_lru_node(&lru->node[i],
492 old_size, new_size))
493 goto fail;
494 }
495 return 0;
496fail:
497 for (i = i - 1; i >= 0; i--) {
498 if (!lru->node[i].memcg_lrus)
499 continue;
500
501 memcg_cancel_update_list_lru_node(&lru->node[i],
502 old_size, new_size);
503 }
504 return -ENOMEM;
505}
506
507static void memcg_cancel_update_list_lru(struct list_lru *lru,
508 int old_size, int new_size)
509{
510 int i;
511
512 if (!list_lru_memcg_aware(lru))
513 return;
514
515 for_each_node(i)
516 memcg_cancel_update_list_lru_node(&lru->node[i],
517 old_size, new_size);
518}
519
520int memcg_update_all_list_lrus(int new_size)
521{
522 int ret = 0;
523 struct list_lru *lru;
524 int old_size = memcg_nr_cache_ids;
525
526 mutex_lock(&list_lrus_mutex);
527 list_for_each_entry(lru, &list_lrus, list) {
528 ret = memcg_update_list_lru(lru, old_size, new_size);
529 if (ret)
530 goto fail;
531 }
532out:
533 mutex_unlock(&list_lrus_mutex);
534 return ret;
535fail:
536 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
537 memcg_cancel_update_list_lru(lru, old_size, new_size);
538 goto out;
539}
540
541static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
542 int src_idx, struct mem_cgroup *dst_memcg)
543{
544 struct list_lru_node *nlru = &lru->node[nid];
545 int dst_idx = dst_memcg->kmemcg_id;
546 struct list_lru_one *src, *dst;
547 bool set;
548
549 /*
550 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
551 * we have to use IRQ-safe primitives here to avoid deadlock.
552 */
553 spin_lock_irq(&nlru->lock);
554
555 src = list_lru_from_memcg_idx(nlru, src_idx);
556 dst = list_lru_from_memcg_idx(nlru, dst_idx);
557
558 list_splice_init(&src->list, &dst->list);
559 set = (!dst->nr_items && src->nr_items);
560 dst->nr_items += src->nr_items;
561 if (set)
562 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
563 src->nr_items = 0;
564
565 spin_unlock_irq(&nlru->lock);
566}
567
568static void memcg_drain_list_lru(struct list_lru *lru,
569 int src_idx, struct mem_cgroup *dst_memcg)
570{
571 int i;
572
573 if (!list_lru_memcg_aware(lru))
574 return;
575
576 for_each_node(i)
577 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
578}
579
580void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
581{
582 struct list_lru *lru;
583
584 mutex_lock(&list_lrus_mutex);
585 list_for_each_entry(lru, &list_lrus, list)
586 memcg_drain_list_lru(lru, src_idx, dst_memcg);
587 mutex_unlock(&list_lrus_mutex);
588}
589#else
590static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
591{
592 return 0;
593}
594
595static void memcg_destroy_list_lru(struct list_lru *lru)
596{
597}
598#endif /* CONFIG_MEMCG_KMEM */
599
600int __list_lru_init(struct list_lru *lru, bool memcg_aware,
601 struct lock_class_key *key, struct shrinker *shrinker)
602{
603 int i;
604 int err = -ENOMEM;
605
606#ifdef CONFIG_MEMCG_KMEM
607 if (shrinker)
608 lru->shrinker_id = shrinker->id;
609 else
610 lru->shrinker_id = -1;
611#endif
612 memcg_get_cache_ids();
613
614 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
615 if (!lru->node)
616 goto out;
617
618 for_each_node(i) {
619 spin_lock_init(&lru->node[i].lock);
620 if (key)
621 lockdep_set_class(&lru->node[i].lock, key);
622 init_one_lru(&lru->node[i].lru);
623 }
624
625 err = memcg_init_list_lru(lru, memcg_aware);
626 if (err) {
627 kfree(lru->node);
628 /* Do this so a list_lru_destroy() doesn't crash: */
629 lru->node = NULL;
630 goto out;
631 }
632
633 list_lru_register(lru);
634out:
635 memcg_put_cache_ids();
636 return err;
637}
638EXPORT_SYMBOL_GPL(__list_lru_init);
639
640void list_lru_destroy(struct list_lru *lru)
641{
642 /* Already destroyed or not yet initialized? */
643 if (!lru->node)
644 return;
645
646 memcg_get_cache_ids();
647
648 list_lru_unregister(lru);
649
650 memcg_destroy_list_lru(lru);
651 kfree(lru->node);
652 lru->node = NULL;
653
654#ifdef CONFIG_MEMCG_KMEM
655 lru->shrinker_id = -1;
656#endif
657 memcg_put_cache_ids();
658}
659EXPORT_SYMBOL_GPL(list_lru_destroy);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/list_lru.h>
12#include <linux/slab.h>
13#include <linux/mutex.h>
14#include <linux/memcontrol.h>
15#include "slab.h"
16#include "internal.h"
17
18#ifdef CONFIG_MEMCG_KMEM
19static LIST_HEAD(memcg_list_lrus);
20static DEFINE_MUTEX(list_lrus_mutex);
21
22static inline bool list_lru_memcg_aware(struct list_lru *lru)
23{
24 return lru->memcg_aware;
25}
26
27static void list_lru_register(struct list_lru *lru)
28{
29 if (!list_lru_memcg_aware(lru))
30 return;
31
32 mutex_lock(&list_lrus_mutex);
33 list_add(&lru->list, &memcg_list_lrus);
34 mutex_unlock(&list_lrus_mutex);
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39 if (!list_lru_memcg_aware(lru))
40 return;
41
42 mutex_lock(&list_lrus_mutex);
43 list_del(&lru->list);
44 mutex_unlock(&list_lrus_mutex);
45}
46
47static int lru_shrinker_id(struct list_lru *lru)
48{
49 return lru->shrinker_id;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
54{
55 if (list_lru_memcg_aware(lru) && idx >= 0) {
56 struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
57
58 return mlru ? &mlru->node[nid] : NULL;
59 }
60 return &lru->node[nid].lru;
61}
62#else
63static void list_lru_register(struct list_lru *lru)
64{
65}
66
67static void list_lru_unregister(struct list_lru *lru)
68{
69}
70
71static int lru_shrinker_id(struct list_lru *lru)
72{
73 return -1;
74}
75
76static inline bool list_lru_memcg_aware(struct list_lru *lru)
77{
78 return false;
79}
80
81static inline struct list_lru_one *
82list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
83{
84 return &lru->node[nid].lru;
85}
86#endif /* CONFIG_MEMCG_KMEM */
87
88bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
89 struct mem_cgroup *memcg)
90{
91 struct list_lru_node *nlru = &lru->node[nid];
92 struct list_lru_one *l;
93
94 spin_lock(&nlru->lock);
95 if (list_empty(item)) {
96 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
97 list_add_tail(item, &l->list);
98 /* Set shrinker bit if the first element was added */
99 if (!l->nr_items++)
100 set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
101 nlru->nr_items++;
102 spin_unlock(&nlru->lock);
103 return true;
104 }
105 spin_unlock(&nlru->lock);
106 return false;
107}
108EXPORT_SYMBOL_GPL(list_lru_add);
109
110bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
111{
112 int nid = page_to_nid(virt_to_page(item));
113 struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
114 mem_cgroup_from_slab_obj(item) : NULL;
115
116 return list_lru_add(lru, item, nid, memcg);
117}
118EXPORT_SYMBOL_GPL(list_lru_add_obj);
119
120bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
121 struct mem_cgroup *memcg)
122{
123 struct list_lru_node *nlru = &lru->node[nid];
124 struct list_lru_one *l;
125
126 spin_lock(&nlru->lock);
127 if (!list_empty(item)) {
128 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
129 list_del_init(item);
130 l->nr_items--;
131 nlru->nr_items--;
132 spin_unlock(&nlru->lock);
133 return true;
134 }
135 spin_unlock(&nlru->lock);
136 return false;
137}
138EXPORT_SYMBOL_GPL(list_lru_del);
139
140bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
141{
142 int nid = page_to_nid(virt_to_page(item));
143 struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
144 mem_cgroup_from_slab_obj(item) : NULL;
145
146 return list_lru_del(lru, item, nid, memcg);
147}
148EXPORT_SYMBOL_GPL(list_lru_del_obj);
149
150void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151{
152 list_del_init(item);
153 list->nr_items--;
154}
155EXPORT_SYMBOL_GPL(list_lru_isolate);
156
157void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158 struct list_head *head)
159{
160 list_move(item, head);
161 list->nr_items--;
162}
163EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164
165unsigned long list_lru_count_one(struct list_lru *lru,
166 int nid, struct mem_cgroup *memcg)
167{
168 struct list_lru_one *l;
169 long count;
170
171 rcu_read_lock();
172 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
173 count = l ? READ_ONCE(l->nr_items) : 0;
174 rcu_read_unlock();
175
176 if (unlikely(count < 0))
177 count = 0;
178
179 return count;
180}
181EXPORT_SYMBOL_GPL(list_lru_count_one);
182
183unsigned long list_lru_count_node(struct list_lru *lru, int nid)
184{
185 struct list_lru_node *nlru;
186
187 nlru = &lru->node[nid];
188 return nlru->nr_items;
189}
190EXPORT_SYMBOL_GPL(list_lru_count_node);
191
192static unsigned long
193__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
194 list_lru_walk_cb isolate, void *cb_arg,
195 unsigned long *nr_to_walk)
196{
197 struct list_lru_node *nlru = &lru->node[nid];
198 struct list_lru_one *l;
199 struct list_head *item, *n;
200 unsigned long isolated = 0;
201
202restart:
203 l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
204 if (!l)
205 goto out;
206
207 list_for_each_safe(item, n, &l->list) {
208 enum lru_status ret;
209
210 /*
211 * decrement nr_to_walk first so that we don't livelock if we
212 * get stuck on large numbers of LRU_RETRY items
213 */
214 if (!*nr_to_walk)
215 break;
216 --*nr_to_walk;
217
218 ret = isolate(item, l, &nlru->lock, cb_arg);
219 switch (ret) {
220 case LRU_REMOVED_RETRY:
221 assert_spin_locked(&nlru->lock);
222 fallthrough;
223 case LRU_REMOVED:
224 isolated++;
225 nlru->nr_items--;
226 /*
227 * If the lru lock has been dropped, our list
228 * traversal is now invalid and so we have to
229 * restart from scratch.
230 */
231 if (ret == LRU_REMOVED_RETRY)
232 goto restart;
233 break;
234 case LRU_ROTATE:
235 list_move_tail(item, &l->list);
236 break;
237 case LRU_SKIP:
238 break;
239 case LRU_RETRY:
240 /*
241 * The lru lock has been dropped, our list traversal is
242 * now invalid and so we have to restart from scratch.
243 */
244 assert_spin_locked(&nlru->lock);
245 goto restart;
246 case LRU_STOP:
247 assert_spin_locked(&nlru->lock);
248 goto out;
249 default:
250 BUG();
251 }
252 }
253out:
254 return isolated;
255}
256
257unsigned long
258list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
259 list_lru_walk_cb isolate, void *cb_arg,
260 unsigned long *nr_to_walk)
261{
262 struct list_lru_node *nlru = &lru->node[nid];
263 unsigned long ret;
264
265 spin_lock(&nlru->lock);
266 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
267 cb_arg, nr_to_walk);
268 spin_unlock(&nlru->lock);
269 return ret;
270}
271EXPORT_SYMBOL_GPL(list_lru_walk_one);
272
273unsigned long
274list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
275 list_lru_walk_cb isolate, void *cb_arg,
276 unsigned long *nr_to_walk)
277{
278 struct list_lru_node *nlru = &lru->node[nid];
279 unsigned long ret;
280
281 spin_lock_irq(&nlru->lock);
282 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
283 cb_arg, nr_to_walk);
284 spin_unlock_irq(&nlru->lock);
285 return ret;
286}
287
288unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
289 list_lru_walk_cb isolate, void *cb_arg,
290 unsigned long *nr_to_walk)
291{
292 long isolated = 0;
293
294 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
295 nr_to_walk);
296
297#ifdef CONFIG_MEMCG_KMEM
298 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
299 struct list_lru_memcg *mlru;
300 unsigned long index;
301
302 xa_for_each(&lru->xa, index, mlru) {
303 struct list_lru_node *nlru = &lru->node[nid];
304
305 spin_lock(&nlru->lock);
306 isolated += __list_lru_walk_one(lru, nid, index,
307 isolate, cb_arg,
308 nr_to_walk);
309 spin_unlock(&nlru->lock);
310
311 if (*nr_to_walk <= 0)
312 break;
313 }
314 }
315#endif
316
317 return isolated;
318}
319EXPORT_SYMBOL_GPL(list_lru_walk_node);
320
321static void init_one_lru(struct list_lru_one *l)
322{
323 INIT_LIST_HEAD(&l->list);
324 l->nr_items = 0;
325}
326
327#ifdef CONFIG_MEMCG_KMEM
328static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
329{
330 int nid;
331 struct list_lru_memcg *mlru;
332
333 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
334 if (!mlru)
335 return NULL;
336
337 for_each_node(nid)
338 init_one_lru(&mlru->node[nid]);
339
340 return mlru;
341}
342
343static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
344{
345 struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
346
347 /*
348 * The __list_lru_walk_one() can walk the list of this node.
349 * We need kvfree_rcu() here. And the walking of the list
350 * is under lru->node[nid]->lock, which can serve as a RCU
351 * read-side critical section.
352 */
353 if (mlru)
354 kvfree_rcu(mlru, rcu);
355}
356
357static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
358{
359 if (memcg_aware)
360 xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
361 lru->memcg_aware = memcg_aware;
362}
363
364static void memcg_destroy_list_lru(struct list_lru *lru)
365{
366 XA_STATE(xas, &lru->xa, 0);
367 struct list_lru_memcg *mlru;
368
369 if (!list_lru_memcg_aware(lru))
370 return;
371
372 xas_lock_irq(&xas);
373 xas_for_each(&xas, mlru, ULONG_MAX) {
374 kfree(mlru);
375 xas_store(&xas, NULL);
376 }
377 xas_unlock_irq(&xas);
378}
379
380static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
381 int src_idx, struct mem_cgroup *dst_memcg)
382{
383 struct list_lru_node *nlru = &lru->node[nid];
384 int dst_idx = dst_memcg->kmemcg_id;
385 struct list_lru_one *src, *dst;
386
387 /*
388 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
389 * we have to use IRQ-safe primitives here to avoid deadlock.
390 */
391 spin_lock_irq(&nlru->lock);
392
393 src = list_lru_from_memcg_idx(lru, nid, src_idx);
394 if (!src)
395 goto out;
396 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
397
398 list_splice_init(&src->list, &dst->list);
399
400 if (src->nr_items) {
401 dst->nr_items += src->nr_items;
402 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
403 src->nr_items = 0;
404 }
405out:
406 spin_unlock_irq(&nlru->lock);
407}
408
409static void memcg_reparent_list_lru(struct list_lru *lru,
410 int src_idx, struct mem_cgroup *dst_memcg)
411{
412 int i;
413
414 for_each_node(i)
415 memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
416
417 memcg_list_lru_free(lru, src_idx);
418}
419
420void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
421{
422 struct cgroup_subsys_state *css;
423 struct list_lru *lru;
424 int src_idx = memcg->kmemcg_id;
425
426 /*
427 * Change kmemcg_id of this cgroup and all its descendants to the
428 * parent's id, and then move all entries from this cgroup's list_lrus
429 * to ones of the parent.
430 *
431 * After we have finished, all list_lrus corresponding to this cgroup
432 * are guaranteed to remain empty. So we can safely free this cgroup's
433 * list lrus in memcg_list_lru_free().
434 *
435 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
436 * from allocating list lrus for this cgroup after memcg_list_lru_free()
437 * call.
438 */
439 rcu_read_lock();
440 css_for_each_descendant_pre(css, &memcg->css) {
441 struct mem_cgroup *child;
442
443 child = mem_cgroup_from_css(css);
444 WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
445 }
446 rcu_read_unlock();
447
448 mutex_lock(&list_lrus_mutex);
449 list_for_each_entry(lru, &memcg_list_lrus, list)
450 memcg_reparent_list_lru(lru, src_idx, parent);
451 mutex_unlock(&list_lrus_mutex);
452}
453
454static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
455 struct list_lru *lru)
456{
457 int idx = memcg->kmemcg_id;
458
459 return idx < 0 || xa_load(&lru->xa, idx);
460}
461
462int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
463 gfp_t gfp)
464{
465 int i;
466 unsigned long flags;
467 struct list_lru_memcg_table {
468 struct list_lru_memcg *mlru;
469 struct mem_cgroup *memcg;
470 } *table;
471 XA_STATE(xas, &lru->xa, 0);
472
473 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
474 return 0;
475
476 gfp &= GFP_RECLAIM_MASK;
477 table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
478 if (!table)
479 return -ENOMEM;
480
481 /*
482 * Because the list_lru can be reparented to the parent cgroup's
483 * list_lru, we should make sure that this cgroup and all its
484 * ancestors have allocated list_lru_memcg.
485 */
486 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
487 if (memcg_list_lru_allocated(memcg, lru))
488 break;
489
490 table[i].memcg = memcg;
491 table[i].mlru = memcg_init_list_lru_one(gfp);
492 if (!table[i].mlru) {
493 while (i--)
494 kfree(table[i].mlru);
495 kfree(table);
496 return -ENOMEM;
497 }
498 }
499
500 xas_lock_irqsave(&xas, flags);
501 while (i--) {
502 int index = READ_ONCE(table[i].memcg->kmemcg_id);
503 struct list_lru_memcg *mlru = table[i].mlru;
504
505 xas_set(&xas, index);
506retry:
507 if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
508 kfree(mlru);
509 } else {
510 xas_store(&xas, mlru);
511 if (xas_error(&xas) == -ENOMEM) {
512 xas_unlock_irqrestore(&xas, flags);
513 if (xas_nomem(&xas, gfp))
514 xas_set_err(&xas, 0);
515 xas_lock_irqsave(&xas, flags);
516 /*
517 * The xas lock has been released, this memcg
518 * can be reparented before us. So reload
519 * memcg id. More details see the comments
520 * in memcg_reparent_list_lrus().
521 */
522 index = READ_ONCE(table[i].memcg->kmemcg_id);
523 if (index < 0)
524 xas_set_err(&xas, 0);
525 else if (!xas_error(&xas) && index != xas.xa_index)
526 xas_set(&xas, index);
527 goto retry;
528 }
529 }
530 }
531 /* xas_nomem() is used to free memory instead of memory allocation. */
532 if (xas.xa_alloc)
533 xas_nomem(&xas, gfp);
534 xas_unlock_irqrestore(&xas, flags);
535 kfree(table);
536
537 return xas_error(&xas);
538}
539#else
540static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
541{
542}
543
544static void memcg_destroy_list_lru(struct list_lru *lru)
545{
546}
547#endif /* CONFIG_MEMCG_KMEM */
548
549int __list_lru_init(struct list_lru *lru, bool memcg_aware,
550 struct lock_class_key *key, struct shrinker *shrinker)
551{
552 int i;
553
554#ifdef CONFIG_MEMCG_KMEM
555 if (shrinker)
556 lru->shrinker_id = shrinker->id;
557 else
558 lru->shrinker_id = -1;
559
560 if (mem_cgroup_kmem_disabled())
561 memcg_aware = false;
562#endif
563
564 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
565 if (!lru->node)
566 return -ENOMEM;
567
568 for_each_node(i) {
569 spin_lock_init(&lru->node[i].lock);
570 if (key)
571 lockdep_set_class(&lru->node[i].lock, key);
572 init_one_lru(&lru->node[i].lru);
573 }
574
575 memcg_init_list_lru(lru, memcg_aware);
576 list_lru_register(lru);
577
578 return 0;
579}
580EXPORT_SYMBOL_GPL(__list_lru_init);
581
582void list_lru_destroy(struct list_lru *lru)
583{
584 /* Already destroyed or not yet initialized? */
585 if (!lru->node)
586 return;
587
588 list_lru_unregister(lru);
589
590 memcg_destroy_list_lru(lru);
591 kfree(lru->node);
592 lru->node = NULL;
593
594#ifdef CONFIG_MEMCG_KMEM
595 lru->shrinker_id = -1;
596#endif
597}
598EXPORT_SYMBOL_GPL(list_lru_destroy);