Loading...
1/*
2 * KASAN quarantine.
3 *
4 * Author: Alexander Potapenko <glider@google.com>
5 * Copyright (C) 2016 Google, Inc.
6 *
7 * Based on code by Dmitry Chernenkov.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19
20#include <linux/gfp.h>
21#include <linux/hash.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/percpu.h>
25#include <linux/printk.h>
26#include <linux/shrinker.h>
27#include <linux/slab.h>
28#include <linux/srcu.h>
29#include <linux/string.h>
30#include <linux/types.h>
31
32#include "../slab.h"
33#include "kasan.h"
34
35/* Data structure and operations for quarantine queues. */
36
37/*
38 * Each queue is a signle-linked list, which also stores the total size of
39 * objects inside of it.
40 */
41struct qlist_head {
42 struct qlist_node *head;
43 struct qlist_node *tail;
44 size_t bytes;
45};
46
47#define QLIST_INIT { NULL, NULL, 0 }
48
49static bool qlist_empty(struct qlist_head *q)
50{
51 return !q->head;
52}
53
54static void qlist_init(struct qlist_head *q)
55{
56 q->head = q->tail = NULL;
57 q->bytes = 0;
58}
59
60static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
61 size_t size)
62{
63 if (unlikely(qlist_empty(q)))
64 q->head = qlink;
65 else
66 q->tail->next = qlink;
67 q->tail = qlink;
68 qlink->next = NULL;
69 q->bytes += size;
70}
71
72static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
73{
74 if (unlikely(qlist_empty(from)))
75 return;
76
77 if (qlist_empty(to)) {
78 *to = *from;
79 qlist_init(from);
80 return;
81 }
82
83 to->tail->next = from->head;
84 to->tail = from->tail;
85 to->bytes += from->bytes;
86
87 qlist_init(from);
88}
89
90#define QUARANTINE_PERCPU_SIZE (1 << 20)
91#define QUARANTINE_BATCHES \
92 (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
93
94/*
95 * The object quarantine consists of per-cpu queues and a global queue,
96 * guarded by quarantine_lock.
97 */
98static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
99
100/* Round-robin FIFO array of batches. */
101static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
102static int quarantine_head;
103static int quarantine_tail;
104/* Total size of all objects in global_quarantine across all batches. */
105static unsigned long quarantine_size;
106static DEFINE_SPINLOCK(quarantine_lock);
107DEFINE_STATIC_SRCU(remove_cache_srcu);
108
109/* Maximum size of the global queue. */
110static unsigned long quarantine_max_size;
111
112/*
113 * Target size of a batch in global_quarantine.
114 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
115 */
116static unsigned long quarantine_batch_size;
117
118/*
119 * The fraction of physical memory the quarantine is allowed to occupy.
120 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
121 * the ratio low to avoid OOM.
122 */
123#define QUARANTINE_FRACTION 32
124
125static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
126{
127 return virt_to_head_page(qlink)->slab_cache;
128}
129
130static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
131{
132 struct kasan_free_meta *free_info =
133 container_of(qlink, struct kasan_free_meta,
134 quarantine_link);
135
136 return ((void *)free_info) - cache->kasan_info.free_meta_offset;
137}
138
139static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
140{
141 void *object = qlink_to_object(qlink, cache);
142 unsigned long flags;
143
144 if (IS_ENABLED(CONFIG_SLAB))
145 local_irq_save(flags);
146
147 ___cache_free(cache, object, _THIS_IP_);
148
149 if (IS_ENABLED(CONFIG_SLAB))
150 local_irq_restore(flags);
151}
152
153static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
154{
155 struct qlist_node *qlink;
156
157 if (unlikely(qlist_empty(q)))
158 return;
159
160 qlink = q->head;
161 while (qlink) {
162 struct kmem_cache *obj_cache =
163 cache ? cache : qlink_to_cache(qlink);
164 struct qlist_node *next = qlink->next;
165
166 qlink_free(qlink, obj_cache);
167 qlink = next;
168 }
169 qlist_init(q);
170}
171
172void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
173{
174 unsigned long flags;
175 struct qlist_head *q;
176 struct qlist_head temp = QLIST_INIT;
177
178 /*
179 * Note: irq must be disabled until after we move the batch to the
180 * global quarantine. Otherwise quarantine_remove_cache() can miss
181 * some objects belonging to the cache if they are in our local temp
182 * list. quarantine_remove_cache() executes on_each_cpu() at the
183 * beginning which ensures that it either sees the objects in per-cpu
184 * lists or in the global quarantine.
185 */
186 local_irq_save(flags);
187
188 q = this_cpu_ptr(&cpu_quarantine);
189 qlist_put(q, &info->quarantine_link, cache->size);
190 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
191 qlist_move_all(q, &temp);
192
193 spin_lock(&quarantine_lock);
194 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
195 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
196 if (global_quarantine[quarantine_tail].bytes >=
197 READ_ONCE(quarantine_batch_size)) {
198 int new_tail;
199
200 new_tail = quarantine_tail + 1;
201 if (new_tail == QUARANTINE_BATCHES)
202 new_tail = 0;
203 if (new_tail != quarantine_head)
204 quarantine_tail = new_tail;
205 }
206 spin_unlock(&quarantine_lock);
207 }
208
209 local_irq_restore(flags);
210}
211
212void quarantine_reduce(void)
213{
214 size_t total_size, new_quarantine_size, percpu_quarantines;
215 unsigned long flags;
216 int srcu_idx;
217 struct qlist_head to_free = QLIST_INIT;
218
219 if (likely(READ_ONCE(quarantine_size) <=
220 READ_ONCE(quarantine_max_size)))
221 return;
222
223 /*
224 * srcu critical section ensures that quarantine_remove_cache()
225 * will not miss objects belonging to the cache while they are in our
226 * local to_free list. srcu is chosen because (1) it gives us private
227 * grace period domain that does not interfere with anything else,
228 * and (2) it allows synchronize_srcu() to return without waiting
229 * if there are no pending read critical sections (which is the
230 * expected case).
231 */
232 srcu_idx = srcu_read_lock(&remove_cache_srcu);
233 spin_lock_irqsave(&quarantine_lock, flags);
234
235 /*
236 * Update quarantine size in case of hotplug. Allocate a fraction of
237 * the installed memory to quarantine minus per-cpu queue limits.
238 */
239 total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
240 QUARANTINE_FRACTION;
241 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
242 new_quarantine_size = (total_size < percpu_quarantines) ?
243 0 : total_size - percpu_quarantines;
244 WRITE_ONCE(quarantine_max_size, new_quarantine_size);
245 /* Aim at consuming at most 1/2 of slots in quarantine. */
246 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
247 2 * total_size / QUARANTINE_BATCHES));
248
249 if (likely(quarantine_size > quarantine_max_size)) {
250 qlist_move_all(&global_quarantine[quarantine_head], &to_free);
251 WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
252 quarantine_head++;
253 if (quarantine_head == QUARANTINE_BATCHES)
254 quarantine_head = 0;
255 }
256
257 spin_unlock_irqrestore(&quarantine_lock, flags);
258
259 qlist_free_all(&to_free, NULL);
260 srcu_read_unlock(&remove_cache_srcu, srcu_idx);
261}
262
263static void qlist_move_cache(struct qlist_head *from,
264 struct qlist_head *to,
265 struct kmem_cache *cache)
266{
267 struct qlist_node *curr;
268
269 if (unlikely(qlist_empty(from)))
270 return;
271
272 curr = from->head;
273 qlist_init(from);
274 while (curr) {
275 struct qlist_node *next = curr->next;
276 struct kmem_cache *obj_cache = qlink_to_cache(curr);
277
278 if (obj_cache == cache)
279 qlist_put(to, curr, obj_cache->size);
280 else
281 qlist_put(from, curr, obj_cache->size);
282
283 curr = next;
284 }
285}
286
287static void per_cpu_remove_cache(void *arg)
288{
289 struct kmem_cache *cache = arg;
290 struct qlist_head to_free = QLIST_INIT;
291 struct qlist_head *q;
292
293 q = this_cpu_ptr(&cpu_quarantine);
294 qlist_move_cache(q, &to_free, cache);
295 qlist_free_all(&to_free, cache);
296}
297
298/* Free all quarantined objects belonging to cache. */
299void quarantine_remove_cache(struct kmem_cache *cache)
300{
301 unsigned long flags, i;
302 struct qlist_head to_free = QLIST_INIT;
303
304 /*
305 * Must be careful to not miss any objects that are being moved from
306 * per-cpu list to the global quarantine in quarantine_put(),
307 * nor objects being freed in quarantine_reduce(). on_each_cpu()
308 * achieves the first goal, while synchronize_srcu() achieves the
309 * second.
310 */
311 on_each_cpu(per_cpu_remove_cache, cache, 1);
312
313 spin_lock_irqsave(&quarantine_lock, flags);
314 for (i = 0; i < QUARANTINE_BATCHES; i++) {
315 if (qlist_empty(&global_quarantine[i]))
316 continue;
317 qlist_move_cache(&global_quarantine[i], &to_free, cache);
318 /* Scanning whole quarantine can take a while. */
319 spin_unlock_irqrestore(&quarantine_lock, flags);
320 cond_resched();
321 spin_lock_irqsave(&quarantine_lock, flags);
322 }
323 spin_unlock_irqrestore(&quarantine_lock, flags);
324
325 qlist_free_all(&to_free, cache);
326
327 synchronize_srcu(&remove_cache_srcu);
328}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KASAN quarantine.
4 *
5 * Author: Alexander Potapenko <glider@google.com>
6 * Copyright (C) 2016 Google, Inc.
7 *
8 * Based on code by Dmitry Chernenkov.
9 */
10
11#define pr_fmt(fmt) "kasan: " fmt
12
13#include <linux/gfp.h>
14#include <linux/hash.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/percpu.h>
18#include <linux/printk.h>
19#include <linux/shrinker.h>
20#include <linux/slab.h>
21#include <linux/srcu.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/cpuhotplug.h>
25
26#include "../slab.h"
27#include "kasan.h"
28
29/* Data structure and operations for quarantine queues. */
30
31/*
32 * Each queue is a single-linked list, which also stores the total size of
33 * objects inside of it.
34 */
35struct qlist_head {
36 struct qlist_node *head;
37 struct qlist_node *tail;
38 size_t bytes;
39 bool offline;
40};
41
42#define QLIST_INIT { NULL, NULL, 0 }
43
44static bool qlist_empty(struct qlist_head *q)
45{
46 return !q->head;
47}
48
49static void qlist_init(struct qlist_head *q)
50{
51 q->head = q->tail = NULL;
52 q->bytes = 0;
53}
54
55static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
56 size_t size)
57{
58 if (unlikely(qlist_empty(q)))
59 q->head = qlink;
60 else
61 q->tail->next = qlink;
62 q->tail = qlink;
63 qlink->next = NULL;
64 q->bytes += size;
65}
66
67static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
68{
69 if (unlikely(qlist_empty(from)))
70 return;
71
72 if (qlist_empty(to)) {
73 *to = *from;
74 qlist_init(from);
75 return;
76 }
77
78 to->tail->next = from->head;
79 to->tail = from->tail;
80 to->bytes += from->bytes;
81
82 qlist_init(from);
83}
84
85#define QUARANTINE_PERCPU_SIZE (1 << 20)
86#define QUARANTINE_BATCHES \
87 (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
88
89/*
90 * The object quarantine consists of per-cpu queues and a global queue,
91 * guarded by quarantine_lock.
92 */
93static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
94
95/* Round-robin FIFO array of batches. */
96static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
97static int quarantine_head;
98static int quarantine_tail;
99/* Total size of all objects in global_quarantine across all batches. */
100static unsigned long quarantine_size;
101static DEFINE_RAW_SPINLOCK(quarantine_lock);
102DEFINE_STATIC_SRCU(remove_cache_srcu);
103
104struct cpu_shrink_qlist {
105 raw_spinlock_t lock;
106 struct qlist_head qlist;
107};
108
109static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = {
110 .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock),
111};
112
113/* Maximum size of the global queue. */
114static unsigned long quarantine_max_size;
115
116/*
117 * Target size of a batch in global_quarantine.
118 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
119 */
120static unsigned long quarantine_batch_size;
121
122/*
123 * The fraction of physical memory the quarantine is allowed to occupy.
124 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
125 * the ratio low to avoid OOM.
126 */
127#define QUARANTINE_FRACTION 32
128
129static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
130{
131 return virt_to_slab(qlink)->slab_cache;
132}
133
134static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
135{
136 struct kasan_free_meta *free_info =
137 container_of(qlink, struct kasan_free_meta,
138 quarantine_link);
139
140 return ((void *)free_info) - cache->kasan_info.free_meta_offset;
141}
142
143static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
144{
145 void *object = qlink_to_object(qlink, cache);
146 struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
147
148 /*
149 * Note: Keep per-object metadata to allow KASAN print stack traces for
150 * use-after-free-before-realloc bugs.
151 */
152
153 /*
154 * If init_on_free is enabled and KASAN's free metadata is stored in
155 * the object, zero the metadata. Otherwise, the object's memory will
156 * not be properly zeroed, as KASAN saves the metadata after the slab
157 * allocator zeroes the object.
158 */
159 if (slab_want_init_on_free(cache) &&
160 cache->kasan_info.free_meta_offset == 0)
161 memzero_explicit(free_meta, sizeof(*free_meta));
162
163 ___cache_free(cache, object, _THIS_IP_);
164}
165
166static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
167{
168 struct qlist_node *qlink;
169
170 if (unlikely(qlist_empty(q)))
171 return;
172
173 qlink = q->head;
174 while (qlink) {
175 struct kmem_cache *obj_cache =
176 cache ? cache : qlink_to_cache(qlink);
177 struct qlist_node *next = qlink->next;
178
179 qlink_free(qlink, obj_cache);
180 qlink = next;
181 }
182 qlist_init(q);
183}
184
185bool kasan_quarantine_put(struct kmem_cache *cache, void *object)
186{
187 unsigned long flags;
188 struct qlist_head *q;
189 struct qlist_head temp = QLIST_INIT;
190 struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
191
192 /*
193 * If there's no metadata for this object, don't put it into
194 * quarantine.
195 */
196 if (!meta)
197 return false;
198
199 /*
200 * Note: irq must be disabled until after we move the batch to the
201 * global quarantine. Otherwise kasan_quarantine_remove_cache() can
202 * miss some objects belonging to the cache if they are in our local
203 * temp list. kasan_quarantine_remove_cache() executes on_each_cpu()
204 * at the beginning which ensures that it either sees the objects in
205 * per-cpu lists or in the global quarantine.
206 */
207 local_irq_save(flags);
208
209 q = this_cpu_ptr(&cpu_quarantine);
210 if (q->offline) {
211 local_irq_restore(flags);
212 return false;
213 }
214 qlist_put(q, &meta->quarantine_link, cache->size);
215 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
216 qlist_move_all(q, &temp);
217
218 raw_spin_lock(&quarantine_lock);
219 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
220 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
221 if (global_quarantine[quarantine_tail].bytes >=
222 READ_ONCE(quarantine_batch_size)) {
223 int new_tail;
224
225 new_tail = quarantine_tail + 1;
226 if (new_tail == QUARANTINE_BATCHES)
227 new_tail = 0;
228 if (new_tail != quarantine_head)
229 quarantine_tail = new_tail;
230 }
231 raw_spin_unlock(&quarantine_lock);
232 }
233
234 local_irq_restore(flags);
235
236 return true;
237}
238
239void kasan_quarantine_reduce(void)
240{
241 size_t total_size, new_quarantine_size, percpu_quarantines;
242 unsigned long flags;
243 int srcu_idx;
244 struct qlist_head to_free = QLIST_INIT;
245
246 if (likely(READ_ONCE(quarantine_size) <=
247 READ_ONCE(quarantine_max_size)))
248 return;
249
250 /*
251 * srcu critical section ensures that kasan_quarantine_remove_cache()
252 * will not miss objects belonging to the cache while they are in our
253 * local to_free list. srcu is chosen because (1) it gives us private
254 * grace period domain that does not interfere with anything else,
255 * and (2) it allows synchronize_srcu() to return without waiting
256 * if there are no pending read critical sections (which is the
257 * expected case).
258 */
259 srcu_idx = srcu_read_lock(&remove_cache_srcu);
260 raw_spin_lock_irqsave(&quarantine_lock, flags);
261
262 /*
263 * Update quarantine size in case of hotplug. Allocate a fraction of
264 * the installed memory to quarantine minus per-cpu queue limits.
265 */
266 total_size = (totalram_pages() << PAGE_SHIFT) /
267 QUARANTINE_FRACTION;
268 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
269 new_quarantine_size = (total_size < percpu_quarantines) ?
270 0 : total_size - percpu_quarantines;
271 WRITE_ONCE(quarantine_max_size, new_quarantine_size);
272 /* Aim at consuming at most 1/2 of slots in quarantine. */
273 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
274 2 * total_size / QUARANTINE_BATCHES));
275
276 if (likely(quarantine_size > quarantine_max_size)) {
277 qlist_move_all(&global_quarantine[quarantine_head], &to_free);
278 WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
279 quarantine_head++;
280 if (quarantine_head == QUARANTINE_BATCHES)
281 quarantine_head = 0;
282 }
283
284 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
285
286 qlist_free_all(&to_free, NULL);
287 srcu_read_unlock(&remove_cache_srcu, srcu_idx);
288}
289
290static void qlist_move_cache(struct qlist_head *from,
291 struct qlist_head *to,
292 struct kmem_cache *cache)
293{
294 struct qlist_node *curr;
295
296 if (unlikely(qlist_empty(from)))
297 return;
298
299 curr = from->head;
300 qlist_init(from);
301 while (curr) {
302 struct qlist_node *next = curr->next;
303 struct kmem_cache *obj_cache = qlink_to_cache(curr);
304
305 if (obj_cache == cache)
306 qlist_put(to, curr, obj_cache->size);
307 else
308 qlist_put(from, curr, obj_cache->size);
309
310 curr = next;
311 }
312}
313
314static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
315{
316 struct kmem_cache *cache = arg;
317 unsigned long flags;
318 struct cpu_shrink_qlist *sq;
319
320 sq = this_cpu_ptr(&shrink_qlist);
321 raw_spin_lock_irqsave(&sq->lock, flags);
322 qlist_move_cache(q, &sq->qlist, cache);
323 raw_spin_unlock_irqrestore(&sq->lock, flags);
324}
325
326static void per_cpu_remove_cache(void *arg)
327{
328 struct qlist_head *q;
329
330 q = this_cpu_ptr(&cpu_quarantine);
331 /*
332 * Ensure the ordering between the writing to q->offline and
333 * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted
334 * by interrupt.
335 */
336 if (READ_ONCE(q->offline))
337 return;
338 __per_cpu_remove_cache(q, arg);
339}
340
341/* Free all quarantined objects belonging to cache. */
342void kasan_quarantine_remove_cache(struct kmem_cache *cache)
343{
344 unsigned long flags, i;
345 struct qlist_head to_free = QLIST_INIT;
346 int cpu;
347 struct cpu_shrink_qlist *sq;
348
349 /*
350 * Must be careful to not miss any objects that are being moved from
351 * per-cpu list to the global quarantine in kasan_quarantine_put(),
352 * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu()
353 * achieves the first goal, while synchronize_srcu() achieves the
354 * second.
355 */
356 on_each_cpu(per_cpu_remove_cache, cache, 1);
357
358 for_each_online_cpu(cpu) {
359 sq = per_cpu_ptr(&shrink_qlist, cpu);
360 raw_spin_lock_irqsave(&sq->lock, flags);
361 qlist_move_cache(&sq->qlist, &to_free, cache);
362 raw_spin_unlock_irqrestore(&sq->lock, flags);
363 }
364 qlist_free_all(&to_free, cache);
365
366 raw_spin_lock_irqsave(&quarantine_lock, flags);
367 for (i = 0; i < QUARANTINE_BATCHES; i++) {
368 if (qlist_empty(&global_quarantine[i]))
369 continue;
370 qlist_move_cache(&global_quarantine[i], &to_free, cache);
371 /* Scanning whole quarantine can take a while. */
372 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
373 cond_resched();
374 raw_spin_lock_irqsave(&quarantine_lock, flags);
375 }
376 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
377
378 qlist_free_all(&to_free, cache);
379
380 synchronize_srcu(&remove_cache_srcu);
381}
382
383static int kasan_cpu_online(unsigned int cpu)
384{
385 this_cpu_ptr(&cpu_quarantine)->offline = false;
386 return 0;
387}
388
389static int kasan_cpu_offline(unsigned int cpu)
390{
391 struct qlist_head *q;
392
393 q = this_cpu_ptr(&cpu_quarantine);
394 /* Ensure the ordering between the writing to q->offline and
395 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
396 * by interrupt.
397 */
398 WRITE_ONCE(q->offline, true);
399 barrier();
400 qlist_free_all(q, NULL);
401 return 0;
402}
403
404static int __init kasan_cpu_quarantine_init(void)
405{
406 int ret = 0;
407
408 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
409 kasan_cpu_online, kasan_cpu_offline);
410 if (ret < 0)
411 pr_err("cpu quarantine register failed [%d]\n", ret);
412 return ret;
413}
414late_initcall(kasan_cpu_quarantine_init);