Loading...
1/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/list_lru.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13#include <linux/memcontrol.h>
14
15#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
16static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
32#else
33static void list_lru_register(struct list_lru *lru)
34{
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39}
40#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
41
42#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{
45 /*
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
48 */
49 return !!lru->node[0].memcg_lrus;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
54{
55 struct list_lru_memcg *memcg_lrus;
56 /*
57 * Either lock or RCU protects the array of per cgroup lists
58 * from relocation (see memcg_update_list_lru_node).
59 */
60 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
61 lockdep_is_held(&nlru->lock));
62 if (memcg_lrus && idx >= 0)
63 return memcg_lrus->lru[idx];
64 return &nlru->lru;
65}
66
67static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
68{
69 struct page *page;
70
71 if (!memcg_kmem_enabled())
72 return NULL;
73 page = virt_to_head_page(ptr);
74 return page->mem_cgroup;
75}
76
77static inline struct list_lru_one *
78list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
79{
80 struct mem_cgroup *memcg;
81
82 if (!nlru->memcg_lrus)
83 return &nlru->lru;
84
85 memcg = mem_cgroup_from_kmem(ptr);
86 if (!memcg)
87 return &nlru->lru;
88
89 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
90}
91#else
92static inline bool list_lru_memcg_aware(struct list_lru *lru)
93{
94 return false;
95}
96
97static inline struct list_lru_one *
98list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
99{
100 return &nlru->lru;
101}
102
103static inline struct list_lru_one *
104list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
105{
106 return &nlru->lru;
107}
108#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
109
110bool list_lru_add(struct list_lru *lru, struct list_head *item)
111{
112 int nid = page_to_nid(virt_to_page(item));
113 struct list_lru_node *nlru = &lru->node[nid];
114 struct list_lru_one *l;
115
116 spin_lock(&nlru->lock);
117 if (list_empty(item)) {
118 l = list_lru_from_kmem(nlru, item);
119 list_add_tail(item, &l->list);
120 l->nr_items++;
121 nlru->nr_items++;
122 spin_unlock(&nlru->lock);
123 return true;
124 }
125 spin_unlock(&nlru->lock);
126 return false;
127}
128EXPORT_SYMBOL_GPL(list_lru_add);
129
130bool list_lru_del(struct list_lru *lru, struct list_head *item)
131{
132 int nid = page_to_nid(virt_to_page(item));
133 struct list_lru_node *nlru = &lru->node[nid];
134 struct list_lru_one *l;
135
136 spin_lock(&nlru->lock);
137 if (!list_empty(item)) {
138 l = list_lru_from_kmem(nlru, item);
139 list_del_init(item);
140 l->nr_items--;
141 nlru->nr_items--;
142 spin_unlock(&nlru->lock);
143 return true;
144 }
145 spin_unlock(&nlru->lock);
146 return false;
147}
148EXPORT_SYMBOL_GPL(list_lru_del);
149
150void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151{
152 list_del_init(item);
153 list->nr_items--;
154}
155EXPORT_SYMBOL_GPL(list_lru_isolate);
156
157void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158 struct list_head *head)
159{
160 list_move(item, head);
161 list->nr_items--;
162}
163EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164
165static unsigned long __list_lru_count_one(struct list_lru *lru,
166 int nid, int memcg_idx)
167{
168 struct list_lru_node *nlru = &lru->node[nid];
169 struct list_lru_one *l;
170 unsigned long count;
171
172 rcu_read_lock();
173 l = list_lru_from_memcg_idx(nlru, memcg_idx);
174 count = l->nr_items;
175 rcu_read_unlock();
176
177 return count;
178}
179
180unsigned long list_lru_count_one(struct list_lru *lru,
181 int nid, struct mem_cgroup *memcg)
182{
183 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
184}
185EXPORT_SYMBOL_GPL(list_lru_count_one);
186
187unsigned long list_lru_count_node(struct list_lru *lru, int nid)
188{
189 struct list_lru_node *nlru;
190
191 nlru = &lru->node[nid];
192 return nlru->nr_items;
193}
194EXPORT_SYMBOL_GPL(list_lru_count_node);
195
196static unsigned long
197__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
198 list_lru_walk_cb isolate, void *cb_arg,
199 unsigned long *nr_to_walk)
200{
201
202 struct list_lru_node *nlru = &lru->node[nid];
203 struct list_lru_one *l;
204 struct list_head *item, *n;
205 unsigned long isolated = 0;
206
207 spin_lock(&nlru->lock);
208 l = list_lru_from_memcg_idx(nlru, memcg_idx);
209restart:
210 list_for_each_safe(item, n, &l->list) {
211 enum lru_status ret;
212
213 /*
214 * decrement nr_to_walk first so that we don't livelock if we
215 * get stuck on large numbesr of LRU_RETRY items
216 */
217 if (!*nr_to_walk)
218 break;
219 --*nr_to_walk;
220
221 ret = isolate(item, l, &nlru->lock, cb_arg);
222 switch (ret) {
223 case LRU_REMOVED_RETRY:
224 assert_spin_locked(&nlru->lock);
225 /* fall through */
226 case LRU_REMOVED:
227 isolated++;
228 nlru->nr_items--;
229 /*
230 * If the lru lock has been dropped, our list
231 * traversal is now invalid and so we have to
232 * restart from scratch.
233 */
234 if (ret == LRU_REMOVED_RETRY)
235 goto restart;
236 break;
237 case LRU_ROTATE:
238 list_move_tail(item, &l->list);
239 break;
240 case LRU_SKIP:
241 break;
242 case LRU_RETRY:
243 /*
244 * The lru lock has been dropped, our list traversal is
245 * now invalid and so we have to restart from scratch.
246 */
247 assert_spin_locked(&nlru->lock);
248 goto restart;
249 default:
250 BUG();
251 }
252 }
253
254 spin_unlock(&nlru->lock);
255 return isolated;
256}
257
258unsigned long
259list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
260 list_lru_walk_cb isolate, void *cb_arg,
261 unsigned long *nr_to_walk)
262{
263 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
264 isolate, cb_arg, nr_to_walk);
265}
266EXPORT_SYMBOL_GPL(list_lru_walk_one);
267
268unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
271{
272 long isolated = 0;
273 int memcg_idx;
274
275 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
276 nr_to_walk);
277 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
278 for_each_memcg_cache_index(memcg_idx) {
279 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
280 isolate, cb_arg, nr_to_walk);
281 if (*nr_to_walk <= 0)
282 break;
283 }
284 }
285 return isolated;
286}
287EXPORT_SYMBOL_GPL(list_lru_walk_node);
288
289static void init_one_lru(struct list_lru_one *l)
290{
291 INIT_LIST_HEAD(&l->list);
292 l->nr_items = 0;
293}
294
295#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
296static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
297 int begin, int end)
298{
299 int i;
300
301 for (i = begin; i < end; i++)
302 kfree(memcg_lrus->lru[i]);
303}
304
305static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
306 int begin, int end)
307{
308 int i;
309
310 for (i = begin; i < end; i++) {
311 struct list_lru_one *l;
312
313 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
314 if (!l)
315 goto fail;
316
317 init_one_lru(l);
318 memcg_lrus->lru[i] = l;
319 }
320 return 0;
321fail:
322 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
323 return -ENOMEM;
324}
325
326static int memcg_init_list_lru_node(struct list_lru_node *nlru)
327{
328 struct list_lru_memcg *memcg_lrus;
329 int size = memcg_nr_cache_ids;
330
331 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
332 size * sizeof(void *), GFP_KERNEL);
333 if (!memcg_lrus)
334 return -ENOMEM;
335
336 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
337 kvfree(memcg_lrus);
338 return -ENOMEM;
339 }
340 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
341
342 return 0;
343}
344
345static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
346{
347 struct list_lru_memcg *memcg_lrus;
348 /*
349 * This is called when shrinker has already been unregistered,
350 * and nobody can use it. So, there is no need to use kvfree_rcu().
351 */
352 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
353 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
354 kvfree(memcg_lrus);
355}
356
357static void kvfree_rcu(struct rcu_head *head)
358{
359 struct list_lru_memcg *mlru;
360
361 mlru = container_of(head, struct list_lru_memcg, rcu);
362 kvfree(mlru);
363}
364
365static int memcg_update_list_lru_node(struct list_lru_node *nlru,
366 int old_size, int new_size)
367{
368 struct list_lru_memcg *old, *new;
369
370 BUG_ON(old_size > new_size);
371
372 old = rcu_dereference_protected(nlru->memcg_lrus,
373 lockdep_is_held(&list_lrus_mutex));
374 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
375 if (!new)
376 return -ENOMEM;
377
378 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
379 kvfree(new);
380 return -ENOMEM;
381 }
382
383 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
384
385 /*
386 * The locking below allows readers that hold nlru->lock avoid taking
387 * rcu_read_lock (see list_lru_from_memcg_idx).
388 *
389 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
390 * we have to use IRQ-safe primitives here to avoid deadlock.
391 */
392 spin_lock_irq(&nlru->lock);
393 rcu_assign_pointer(nlru->memcg_lrus, new);
394 spin_unlock_irq(&nlru->lock);
395
396 call_rcu(&old->rcu, kvfree_rcu);
397 return 0;
398}
399
400static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
401 int old_size, int new_size)
402{
403 struct list_lru_memcg *memcg_lrus;
404
405 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
406 lockdep_is_held(&list_lrus_mutex));
407 /* do not bother shrinking the array back to the old size, because we
408 * cannot handle allocation failures here */
409 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
410}
411
412static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
413{
414 int i;
415
416 if (!memcg_aware)
417 return 0;
418
419 for_each_node(i) {
420 if (memcg_init_list_lru_node(&lru->node[i]))
421 goto fail;
422 }
423 return 0;
424fail:
425 for (i = i - 1; i >= 0; i--) {
426 if (!lru->node[i].memcg_lrus)
427 continue;
428 memcg_destroy_list_lru_node(&lru->node[i]);
429 }
430 return -ENOMEM;
431}
432
433static void memcg_destroy_list_lru(struct list_lru *lru)
434{
435 int i;
436
437 if (!list_lru_memcg_aware(lru))
438 return;
439
440 for_each_node(i)
441 memcg_destroy_list_lru_node(&lru->node[i]);
442}
443
444static int memcg_update_list_lru(struct list_lru *lru,
445 int old_size, int new_size)
446{
447 int i;
448
449 if (!list_lru_memcg_aware(lru))
450 return 0;
451
452 for_each_node(i) {
453 if (memcg_update_list_lru_node(&lru->node[i],
454 old_size, new_size))
455 goto fail;
456 }
457 return 0;
458fail:
459 for (i = i - 1; i >= 0; i--) {
460 if (!lru->node[i].memcg_lrus)
461 continue;
462
463 memcg_cancel_update_list_lru_node(&lru->node[i],
464 old_size, new_size);
465 }
466 return -ENOMEM;
467}
468
469static void memcg_cancel_update_list_lru(struct list_lru *lru,
470 int old_size, int new_size)
471{
472 int i;
473
474 if (!list_lru_memcg_aware(lru))
475 return;
476
477 for_each_node(i)
478 memcg_cancel_update_list_lru_node(&lru->node[i],
479 old_size, new_size);
480}
481
482int memcg_update_all_list_lrus(int new_size)
483{
484 int ret = 0;
485 struct list_lru *lru;
486 int old_size = memcg_nr_cache_ids;
487
488 mutex_lock(&list_lrus_mutex);
489 list_for_each_entry(lru, &list_lrus, list) {
490 ret = memcg_update_list_lru(lru, old_size, new_size);
491 if (ret)
492 goto fail;
493 }
494out:
495 mutex_unlock(&list_lrus_mutex);
496 return ret;
497fail:
498 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
499 memcg_cancel_update_list_lru(lru, old_size, new_size);
500 goto out;
501}
502
503static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
504 int src_idx, int dst_idx)
505{
506 struct list_lru_one *src, *dst;
507
508 /*
509 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
510 * we have to use IRQ-safe primitives here to avoid deadlock.
511 */
512 spin_lock_irq(&nlru->lock);
513
514 src = list_lru_from_memcg_idx(nlru, src_idx);
515 dst = list_lru_from_memcg_idx(nlru, dst_idx);
516
517 list_splice_init(&src->list, &dst->list);
518 dst->nr_items += src->nr_items;
519 src->nr_items = 0;
520
521 spin_unlock_irq(&nlru->lock);
522}
523
524static void memcg_drain_list_lru(struct list_lru *lru,
525 int src_idx, int dst_idx)
526{
527 int i;
528
529 if (!list_lru_memcg_aware(lru))
530 return;
531
532 for_each_node(i)
533 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
534}
535
536void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
537{
538 struct list_lru *lru;
539
540 mutex_lock(&list_lrus_mutex);
541 list_for_each_entry(lru, &list_lrus, list)
542 memcg_drain_list_lru(lru, src_idx, dst_idx);
543 mutex_unlock(&list_lrus_mutex);
544}
545#else
546static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
547{
548 return 0;
549}
550
551static void memcg_destroy_list_lru(struct list_lru *lru)
552{
553}
554#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
555
556int __list_lru_init(struct list_lru *lru, bool memcg_aware,
557 struct lock_class_key *key)
558{
559 int i;
560 size_t size = sizeof(*lru->node) * nr_node_ids;
561 int err = -ENOMEM;
562
563 memcg_get_cache_ids();
564
565 lru->node = kzalloc(size, GFP_KERNEL);
566 if (!lru->node)
567 goto out;
568
569 for_each_node(i) {
570 spin_lock_init(&lru->node[i].lock);
571 if (key)
572 lockdep_set_class(&lru->node[i].lock, key);
573 init_one_lru(&lru->node[i].lru);
574 }
575
576 err = memcg_init_list_lru(lru, memcg_aware);
577 if (err) {
578 kfree(lru->node);
579 /* Do this so a list_lru_destroy() doesn't crash: */
580 lru->node = NULL;
581 goto out;
582 }
583
584 list_lru_register(lru);
585out:
586 memcg_put_cache_ids();
587 return err;
588}
589EXPORT_SYMBOL_GPL(__list_lru_init);
590
591void list_lru_destroy(struct list_lru *lru)
592{
593 /* Already destroyed or not yet initialized? */
594 if (!lru->node)
595 return;
596
597 memcg_get_cache_ids();
598
599 list_lru_unregister(lru);
600
601 memcg_destroy_list_lru(lru);
602 kfree(lru->node);
603 lru->node = NULL;
604
605 memcg_put_cache_ids();
606}
607EXPORT_SYMBOL_GPL(list_lru_destroy);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/list_lru.h>
12#include <linux/slab.h>
13#include <linux/mutex.h>
14#include <linux/memcontrol.h>
15#include "slab.h"
16#include "internal.h"
17
18#ifdef CONFIG_MEMCG_KMEM
19static LIST_HEAD(memcg_list_lrus);
20static DEFINE_MUTEX(list_lrus_mutex);
21
22static inline bool list_lru_memcg_aware(struct list_lru *lru)
23{
24 return lru->memcg_aware;
25}
26
27static void list_lru_register(struct list_lru *lru)
28{
29 if (!list_lru_memcg_aware(lru))
30 return;
31
32 mutex_lock(&list_lrus_mutex);
33 list_add(&lru->list, &memcg_list_lrus);
34 mutex_unlock(&list_lrus_mutex);
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39 if (!list_lru_memcg_aware(lru))
40 return;
41
42 mutex_lock(&list_lrus_mutex);
43 list_del(&lru->list);
44 mutex_unlock(&list_lrus_mutex);
45}
46
47static int lru_shrinker_id(struct list_lru *lru)
48{
49 return lru->shrinker_id;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
54{
55 if (list_lru_memcg_aware(lru) && idx >= 0) {
56 struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
57
58 return mlru ? &mlru->node[nid] : NULL;
59 }
60 return &lru->node[nid].lru;
61}
62
63static inline struct list_lru_one *
64list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
65 struct mem_cgroup **memcg_ptr)
66{
67 struct list_lru_node *nlru = &lru->node[nid];
68 struct list_lru_one *l = &nlru->lru;
69 struct mem_cgroup *memcg = NULL;
70
71 if (!list_lru_memcg_aware(lru))
72 goto out;
73
74 memcg = mem_cgroup_from_slab_obj(ptr);
75 if (!memcg)
76 goto out;
77
78 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
79out:
80 if (memcg_ptr)
81 *memcg_ptr = memcg;
82 return l;
83}
84#else
85static void list_lru_register(struct list_lru *lru)
86{
87}
88
89static void list_lru_unregister(struct list_lru *lru)
90{
91}
92
93static int lru_shrinker_id(struct list_lru *lru)
94{
95 return -1;
96}
97
98static inline bool list_lru_memcg_aware(struct list_lru *lru)
99{
100 return false;
101}
102
103static inline struct list_lru_one *
104list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
105{
106 return &lru->node[nid].lru;
107}
108
109static inline struct list_lru_one *
110list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
111 struct mem_cgroup **memcg_ptr)
112{
113 if (memcg_ptr)
114 *memcg_ptr = NULL;
115 return &lru->node[nid].lru;
116}
117#endif /* CONFIG_MEMCG_KMEM */
118
119bool list_lru_add(struct list_lru *lru, struct list_head *item)
120{
121 int nid = page_to_nid(virt_to_page(item));
122 struct list_lru_node *nlru = &lru->node[nid];
123 struct mem_cgroup *memcg;
124 struct list_lru_one *l;
125
126 spin_lock(&nlru->lock);
127 if (list_empty(item)) {
128 l = list_lru_from_kmem(lru, nid, item, &memcg);
129 list_add_tail(item, &l->list);
130 /* Set shrinker bit if the first element was added */
131 if (!l->nr_items++)
132 set_shrinker_bit(memcg, nid,
133 lru_shrinker_id(lru));
134 nlru->nr_items++;
135 spin_unlock(&nlru->lock);
136 return true;
137 }
138 spin_unlock(&nlru->lock);
139 return false;
140}
141EXPORT_SYMBOL_GPL(list_lru_add);
142
143bool list_lru_del(struct list_lru *lru, struct list_head *item)
144{
145 int nid = page_to_nid(virt_to_page(item));
146 struct list_lru_node *nlru = &lru->node[nid];
147 struct list_lru_one *l;
148
149 spin_lock(&nlru->lock);
150 if (!list_empty(item)) {
151 l = list_lru_from_kmem(lru, nid, item, NULL);
152 list_del_init(item);
153 l->nr_items--;
154 nlru->nr_items--;
155 spin_unlock(&nlru->lock);
156 return true;
157 }
158 spin_unlock(&nlru->lock);
159 return false;
160}
161EXPORT_SYMBOL_GPL(list_lru_del);
162
163void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
164{
165 list_del_init(item);
166 list->nr_items--;
167}
168EXPORT_SYMBOL_GPL(list_lru_isolate);
169
170void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
171 struct list_head *head)
172{
173 list_move(item, head);
174 list->nr_items--;
175}
176EXPORT_SYMBOL_GPL(list_lru_isolate_move);
177
178unsigned long list_lru_count_one(struct list_lru *lru,
179 int nid, struct mem_cgroup *memcg)
180{
181 struct list_lru_one *l;
182 long count;
183
184 rcu_read_lock();
185 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
186 count = l ? READ_ONCE(l->nr_items) : 0;
187 rcu_read_unlock();
188
189 if (unlikely(count < 0))
190 count = 0;
191
192 return count;
193}
194EXPORT_SYMBOL_GPL(list_lru_count_one);
195
196unsigned long list_lru_count_node(struct list_lru *lru, int nid)
197{
198 struct list_lru_node *nlru;
199
200 nlru = &lru->node[nid];
201 return nlru->nr_items;
202}
203EXPORT_SYMBOL_GPL(list_lru_count_node);
204
205static unsigned long
206__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
207 list_lru_walk_cb isolate, void *cb_arg,
208 unsigned long *nr_to_walk)
209{
210 struct list_lru_node *nlru = &lru->node[nid];
211 struct list_lru_one *l;
212 struct list_head *item, *n;
213 unsigned long isolated = 0;
214
215restart:
216 l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
217 if (!l)
218 goto out;
219
220 list_for_each_safe(item, n, &l->list) {
221 enum lru_status ret;
222
223 /*
224 * decrement nr_to_walk first so that we don't livelock if we
225 * get stuck on large numbers of LRU_RETRY items
226 */
227 if (!*nr_to_walk)
228 break;
229 --*nr_to_walk;
230
231 ret = isolate(item, l, &nlru->lock, cb_arg);
232 switch (ret) {
233 case LRU_REMOVED_RETRY:
234 assert_spin_locked(&nlru->lock);
235 fallthrough;
236 case LRU_REMOVED:
237 isolated++;
238 nlru->nr_items--;
239 /*
240 * If the lru lock has been dropped, our list
241 * traversal is now invalid and so we have to
242 * restart from scratch.
243 */
244 if (ret == LRU_REMOVED_RETRY)
245 goto restart;
246 break;
247 case LRU_ROTATE:
248 list_move_tail(item, &l->list);
249 break;
250 case LRU_SKIP:
251 break;
252 case LRU_RETRY:
253 /*
254 * The lru lock has been dropped, our list traversal is
255 * now invalid and so we have to restart from scratch.
256 */
257 assert_spin_locked(&nlru->lock);
258 goto restart;
259 default:
260 BUG();
261 }
262 }
263out:
264 return isolated;
265}
266
267unsigned long
268list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
271{
272 struct list_lru_node *nlru = &lru->node[nid];
273 unsigned long ret;
274
275 spin_lock(&nlru->lock);
276 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
277 cb_arg, nr_to_walk);
278 spin_unlock(&nlru->lock);
279 return ret;
280}
281EXPORT_SYMBOL_GPL(list_lru_walk_one);
282
283unsigned long
284list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
285 list_lru_walk_cb isolate, void *cb_arg,
286 unsigned long *nr_to_walk)
287{
288 struct list_lru_node *nlru = &lru->node[nid];
289 unsigned long ret;
290
291 spin_lock_irq(&nlru->lock);
292 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
293 cb_arg, nr_to_walk);
294 spin_unlock_irq(&nlru->lock);
295 return ret;
296}
297
298unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
299 list_lru_walk_cb isolate, void *cb_arg,
300 unsigned long *nr_to_walk)
301{
302 long isolated = 0;
303
304 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
305 nr_to_walk);
306
307#ifdef CONFIG_MEMCG_KMEM
308 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
309 struct list_lru_memcg *mlru;
310 unsigned long index;
311
312 xa_for_each(&lru->xa, index, mlru) {
313 struct list_lru_node *nlru = &lru->node[nid];
314
315 spin_lock(&nlru->lock);
316 isolated += __list_lru_walk_one(lru, nid, index,
317 isolate, cb_arg,
318 nr_to_walk);
319 spin_unlock(&nlru->lock);
320
321 if (*nr_to_walk <= 0)
322 break;
323 }
324 }
325#endif
326
327 return isolated;
328}
329EXPORT_SYMBOL_GPL(list_lru_walk_node);
330
331static void init_one_lru(struct list_lru_one *l)
332{
333 INIT_LIST_HEAD(&l->list);
334 l->nr_items = 0;
335}
336
337#ifdef CONFIG_MEMCG_KMEM
338static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
339{
340 int nid;
341 struct list_lru_memcg *mlru;
342
343 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
344 if (!mlru)
345 return NULL;
346
347 for_each_node(nid)
348 init_one_lru(&mlru->node[nid]);
349
350 return mlru;
351}
352
353static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
354{
355 struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
356
357 /*
358 * The __list_lru_walk_one() can walk the list of this node.
359 * We need kvfree_rcu() here. And the walking of the list
360 * is under lru->node[nid]->lock, which can serve as a RCU
361 * read-side critical section.
362 */
363 if (mlru)
364 kvfree_rcu(mlru, rcu);
365}
366
367static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
368{
369 if (memcg_aware)
370 xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
371 lru->memcg_aware = memcg_aware;
372}
373
374static void memcg_destroy_list_lru(struct list_lru *lru)
375{
376 XA_STATE(xas, &lru->xa, 0);
377 struct list_lru_memcg *mlru;
378
379 if (!list_lru_memcg_aware(lru))
380 return;
381
382 xas_lock_irq(&xas);
383 xas_for_each(&xas, mlru, ULONG_MAX) {
384 kfree(mlru);
385 xas_store(&xas, NULL);
386 }
387 xas_unlock_irq(&xas);
388}
389
390static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
391 int src_idx, struct mem_cgroup *dst_memcg)
392{
393 struct list_lru_node *nlru = &lru->node[nid];
394 int dst_idx = dst_memcg->kmemcg_id;
395 struct list_lru_one *src, *dst;
396
397 /*
398 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
399 * we have to use IRQ-safe primitives here to avoid deadlock.
400 */
401 spin_lock_irq(&nlru->lock);
402
403 src = list_lru_from_memcg_idx(lru, nid, src_idx);
404 if (!src)
405 goto out;
406 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
407
408 list_splice_init(&src->list, &dst->list);
409
410 if (src->nr_items) {
411 dst->nr_items += src->nr_items;
412 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
413 src->nr_items = 0;
414 }
415out:
416 spin_unlock_irq(&nlru->lock);
417}
418
419static void memcg_reparent_list_lru(struct list_lru *lru,
420 int src_idx, struct mem_cgroup *dst_memcg)
421{
422 int i;
423
424 for_each_node(i)
425 memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
426
427 memcg_list_lru_free(lru, src_idx);
428}
429
430void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
431{
432 struct cgroup_subsys_state *css;
433 struct list_lru *lru;
434 int src_idx = memcg->kmemcg_id;
435
436 /*
437 * Change kmemcg_id of this cgroup and all its descendants to the
438 * parent's id, and then move all entries from this cgroup's list_lrus
439 * to ones of the parent.
440 *
441 * After we have finished, all list_lrus corresponding to this cgroup
442 * are guaranteed to remain empty. So we can safely free this cgroup's
443 * list lrus in memcg_list_lru_free().
444 *
445 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
446 * from allocating list lrus for this cgroup after memcg_list_lru_free()
447 * call.
448 */
449 rcu_read_lock();
450 css_for_each_descendant_pre(css, &memcg->css) {
451 struct mem_cgroup *child;
452
453 child = mem_cgroup_from_css(css);
454 WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
455 }
456 rcu_read_unlock();
457
458 mutex_lock(&list_lrus_mutex);
459 list_for_each_entry(lru, &memcg_list_lrus, list)
460 memcg_reparent_list_lru(lru, src_idx, parent);
461 mutex_unlock(&list_lrus_mutex);
462}
463
464static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
465 struct list_lru *lru)
466{
467 int idx = memcg->kmemcg_id;
468
469 return idx < 0 || xa_load(&lru->xa, idx);
470}
471
472int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
473 gfp_t gfp)
474{
475 int i;
476 unsigned long flags;
477 struct list_lru_memcg_table {
478 struct list_lru_memcg *mlru;
479 struct mem_cgroup *memcg;
480 } *table;
481 XA_STATE(xas, &lru->xa, 0);
482
483 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
484 return 0;
485
486 gfp &= GFP_RECLAIM_MASK;
487 table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
488 if (!table)
489 return -ENOMEM;
490
491 /*
492 * Because the list_lru can be reparented to the parent cgroup's
493 * list_lru, we should make sure that this cgroup and all its
494 * ancestors have allocated list_lru_memcg.
495 */
496 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
497 if (memcg_list_lru_allocated(memcg, lru))
498 break;
499
500 table[i].memcg = memcg;
501 table[i].mlru = memcg_init_list_lru_one(gfp);
502 if (!table[i].mlru) {
503 while (i--)
504 kfree(table[i].mlru);
505 kfree(table);
506 return -ENOMEM;
507 }
508 }
509
510 xas_lock_irqsave(&xas, flags);
511 while (i--) {
512 int index = READ_ONCE(table[i].memcg->kmemcg_id);
513 struct list_lru_memcg *mlru = table[i].mlru;
514
515 xas_set(&xas, index);
516retry:
517 if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
518 kfree(mlru);
519 } else {
520 xas_store(&xas, mlru);
521 if (xas_error(&xas) == -ENOMEM) {
522 xas_unlock_irqrestore(&xas, flags);
523 if (xas_nomem(&xas, gfp))
524 xas_set_err(&xas, 0);
525 xas_lock_irqsave(&xas, flags);
526 /*
527 * The xas lock has been released, this memcg
528 * can be reparented before us. So reload
529 * memcg id. More details see the comments
530 * in memcg_reparent_list_lrus().
531 */
532 index = READ_ONCE(table[i].memcg->kmemcg_id);
533 if (index < 0)
534 xas_set_err(&xas, 0);
535 else if (!xas_error(&xas) && index != xas.xa_index)
536 xas_set(&xas, index);
537 goto retry;
538 }
539 }
540 }
541 /* xas_nomem() is used to free memory instead of memory allocation. */
542 if (xas.xa_alloc)
543 xas_nomem(&xas, gfp);
544 xas_unlock_irqrestore(&xas, flags);
545 kfree(table);
546
547 return xas_error(&xas);
548}
549#else
550static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
551{
552}
553
554static void memcg_destroy_list_lru(struct list_lru *lru)
555{
556}
557#endif /* CONFIG_MEMCG_KMEM */
558
559int __list_lru_init(struct list_lru *lru, bool memcg_aware,
560 struct lock_class_key *key, struct shrinker *shrinker)
561{
562 int i;
563
564#ifdef CONFIG_MEMCG_KMEM
565 if (shrinker)
566 lru->shrinker_id = shrinker->id;
567 else
568 lru->shrinker_id = -1;
569#endif
570
571 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
572 if (!lru->node)
573 return -ENOMEM;
574
575 for_each_node(i) {
576 spin_lock_init(&lru->node[i].lock);
577 if (key)
578 lockdep_set_class(&lru->node[i].lock, key);
579 init_one_lru(&lru->node[i].lru);
580 }
581
582 memcg_init_list_lru(lru, memcg_aware);
583 list_lru_register(lru);
584
585 return 0;
586}
587EXPORT_SYMBOL_GPL(__list_lru_init);
588
589void list_lru_destroy(struct list_lru *lru)
590{
591 /* Already destroyed or not yet initialized? */
592 if (!lru->node)
593 return;
594
595 list_lru_unregister(lru);
596
597 memcg_destroy_list_lru(lru);
598 kfree(lru->node);
599 lru->node = NULL;
600
601#ifdef CONFIG_MEMCG_KMEM
602 lru->shrinker_id = -1;
603#endif
604}
605EXPORT_SYMBOL_GPL(list_lru_destroy);