Loading...
1/*
2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
4 *
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
8 *
9 * High level machine check handler. Handles pages reported by the
10 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
11 * failure.
12 *
13 * In addition there is a "soft offline" entry point that allows stop using
14 * not-yet-corrupted-by-suspicious pages without killing anything.
15 *
16 * Handles page cache pages in various states. The tricky part
17 * here is that we can access any page asynchronously in respect to
18 * other VM users, because memory failures could happen anytime and
19 * anywhere. This could violate some of their assumptions. This is why
20 * this code has to be extremely careful. Generally it tries to use
21 * normal locking rules, as in get the standard locks, even if that means
22 * the error handling takes potentially a long time.
23 *
24 * It can be very tempting to add handling for obscure cases here.
25 * In general any code for handling new cases should only be added iff:
26 * - You know how to test it.
27 * - You have a test that can be added to mce-test
28 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
29 * - The case actually shows up as a frequent (top 10) page state in
30 * tools/vm/page-types when running a real workload.
31 *
32 * There are several operations here with exponential complexity because
33 * of unsuitable VM data structures. For example the operation to map back
34 * from RMAP chains to processes has to walk the complete process list and
35 * has non linear complexity with the number. But since memory corruptions
36 * are rare we hope to get away with this. This avoids impacting the core
37 * VM.
38 */
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
42#include <linux/kernel-page-flags.h>
43#include <linux/sched/signal.h>
44#include <linux/sched/task.h>
45#include <linux/ksm.h>
46#include <linux/rmap.h>
47#include <linux/export.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/backing-dev.h>
51#include <linux/migrate.h>
52#include <linux/suspend.h>
53#include <linux/slab.h>
54#include <linux/swapops.h>
55#include <linux/hugetlb.h>
56#include <linux/memory_hotplug.h>
57#include <linux/mm_inline.h>
58#include <linux/kfifo.h>
59#include <linux/ratelimit.h>
60#include "internal.h"
61#include "ras/ras_event.h"
62
63int sysctl_memory_failure_early_kill __read_mostly = 0;
64
65int sysctl_memory_failure_recovery __read_mostly = 1;
66
67atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
68
69#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70
71u32 hwpoison_filter_enable = 0;
72u32 hwpoison_filter_dev_major = ~0U;
73u32 hwpoison_filter_dev_minor = ~0U;
74u64 hwpoison_filter_flags_mask;
75u64 hwpoison_filter_flags_value;
76EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
77EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
78EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
79EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
80EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
81
82static int hwpoison_filter_dev(struct page *p)
83{
84 struct address_space *mapping;
85 dev_t dev;
86
87 if (hwpoison_filter_dev_major == ~0U &&
88 hwpoison_filter_dev_minor == ~0U)
89 return 0;
90
91 /*
92 * page_mapping() does not accept slab pages.
93 */
94 if (PageSlab(p))
95 return -EINVAL;
96
97 mapping = page_mapping(p);
98 if (mapping == NULL || mapping->host == NULL)
99 return -EINVAL;
100
101 dev = mapping->host->i_sb->s_dev;
102 if (hwpoison_filter_dev_major != ~0U &&
103 hwpoison_filter_dev_major != MAJOR(dev))
104 return -EINVAL;
105 if (hwpoison_filter_dev_minor != ~0U &&
106 hwpoison_filter_dev_minor != MINOR(dev))
107 return -EINVAL;
108
109 return 0;
110}
111
112static int hwpoison_filter_flags(struct page *p)
113{
114 if (!hwpoison_filter_flags_mask)
115 return 0;
116
117 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
118 hwpoison_filter_flags_value)
119 return 0;
120 else
121 return -EINVAL;
122}
123
124/*
125 * This allows stress tests to limit test scope to a collection of tasks
126 * by putting them under some memcg. This prevents killing unrelated/important
127 * processes such as /sbin/init. Note that the target task may share clean
128 * pages with init (eg. libc text), which is harmless. If the target task
129 * share _dirty_ pages with another task B, the test scheme must make sure B
130 * is also included in the memcg. At last, due to race conditions this filter
131 * can only guarantee that the page either belongs to the memcg tasks, or is
132 * a freed page.
133 */
134#ifdef CONFIG_MEMCG
135u64 hwpoison_filter_memcg;
136EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
137static int hwpoison_filter_task(struct page *p)
138{
139 if (!hwpoison_filter_memcg)
140 return 0;
141
142 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
143 return -EINVAL;
144
145 return 0;
146}
147#else
148static int hwpoison_filter_task(struct page *p) { return 0; }
149#endif
150
151int hwpoison_filter(struct page *p)
152{
153 if (!hwpoison_filter_enable)
154 return 0;
155
156 if (hwpoison_filter_dev(p))
157 return -EINVAL;
158
159 if (hwpoison_filter_flags(p))
160 return -EINVAL;
161
162 if (hwpoison_filter_task(p))
163 return -EINVAL;
164
165 return 0;
166}
167#else
168int hwpoison_filter(struct page *p)
169{
170 return 0;
171}
172#endif
173
174EXPORT_SYMBOL_GPL(hwpoison_filter);
175
176/*
177 * Send all the processes who have the page mapped a signal.
178 * ``action optional'' if they are not immediately affected by the error
179 * ``action required'' if error happened in current execution context
180 */
181static int kill_proc(struct task_struct *t, unsigned long addr,
182 unsigned long pfn, struct page *page, int flags)
183{
184 short addr_lsb;
185 int ret;
186
187 pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
188 pfn, t->comm, t->pid);
189 addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
190
191 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
192 ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr,
193 addr_lsb, current);
194 } else {
195 /*
196 * Don't use force here, it's convenient if the signal
197 * can be temporarily blocked.
198 * This could cause a loop when the user sets SIGBUS
199 * to SIG_IGN, but hopefully no one will do that?
200 */
201 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)addr,
202 addr_lsb, t); /* synchronous? */
203 }
204 if (ret < 0)
205 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
206 t->comm, t->pid, ret);
207 return ret;
208}
209
210/*
211 * When a unknown page type is encountered drain as many buffers as possible
212 * in the hope to turn the page into a LRU or free page, which we can handle.
213 */
214void shake_page(struct page *p, int access)
215{
216 if (PageHuge(p))
217 return;
218
219 if (!PageSlab(p)) {
220 lru_add_drain_all();
221 if (PageLRU(p))
222 return;
223 drain_all_pages(page_zone(p));
224 if (PageLRU(p) || is_free_buddy_page(p))
225 return;
226 }
227
228 /*
229 * Only call shrink_node_slabs here (which would also shrink
230 * other caches) if access is not potentially fatal.
231 */
232 if (access)
233 drop_slab_node(page_to_nid(p));
234}
235EXPORT_SYMBOL_GPL(shake_page);
236
237/*
238 * Kill all processes that have a poisoned page mapped and then isolate
239 * the page.
240 *
241 * General strategy:
242 * Find all processes having the page mapped and kill them.
243 * But we keep a page reference around so that the page is not
244 * actually freed yet.
245 * Then stash the page away
246 *
247 * There's no convenient way to get back to mapped processes
248 * from the VMAs. So do a brute-force search over all
249 * running processes.
250 *
251 * Remember that machine checks are not common (or rather
252 * if they are common you have other problems), so this shouldn't
253 * be a performance issue.
254 *
255 * Also there are some races possible while we get from the
256 * error detection to actually handle it.
257 */
258
259struct to_kill {
260 struct list_head nd;
261 struct task_struct *tsk;
262 unsigned long addr;
263 char addr_valid;
264};
265
266/*
267 * Failure handling: if we can't find or can't kill a process there's
268 * not much we can do. We just print a message and ignore otherwise.
269 */
270
271/*
272 * Schedule a process for later kill.
273 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
274 * TBD would GFP_NOIO be enough?
275 */
276static void add_to_kill(struct task_struct *tsk, struct page *p,
277 struct vm_area_struct *vma,
278 struct list_head *to_kill,
279 struct to_kill **tkc)
280{
281 struct to_kill *tk;
282
283 if (*tkc) {
284 tk = *tkc;
285 *tkc = NULL;
286 } else {
287 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
288 if (!tk) {
289 pr_err("Memory failure: Out of memory while machine check handling\n");
290 return;
291 }
292 }
293 tk->addr = page_address_in_vma(p, vma);
294 tk->addr_valid = 1;
295
296 /*
297 * In theory we don't have to kill when the page was
298 * munmaped. But it could be also a mremap. Since that's
299 * likely very rare kill anyways just out of paranoia, but use
300 * a SIGKILL because the error is not contained anymore.
301 */
302 if (tk->addr == -EFAULT) {
303 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
304 page_to_pfn(p), tsk->comm);
305 tk->addr_valid = 0;
306 }
307 get_task_struct(tsk);
308 tk->tsk = tsk;
309 list_add_tail(&tk->nd, to_kill);
310}
311
312/*
313 * Kill the processes that have been collected earlier.
314 *
315 * Only do anything when DOIT is set, otherwise just free the list
316 * (this is used for clean pages which do not need killing)
317 * Also when FAIL is set do a force kill because something went
318 * wrong earlier.
319 */
320static void kill_procs(struct list_head *to_kill, int forcekill,
321 bool fail, struct page *page, unsigned long pfn,
322 int flags)
323{
324 struct to_kill *tk, *next;
325
326 list_for_each_entry_safe (tk, next, to_kill, nd) {
327 if (forcekill) {
328 /*
329 * In case something went wrong with munmapping
330 * make sure the process doesn't catch the
331 * signal and then access the memory. Just kill it.
332 */
333 if (fail || tk->addr_valid == 0) {
334 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
335 pfn, tk->tsk->comm, tk->tsk->pid);
336 force_sig(SIGKILL, tk->tsk);
337 }
338
339 /*
340 * In theory the process could have mapped
341 * something else on the address in-between. We could
342 * check for that, but we need to tell the
343 * process anyways.
344 */
345 else if (kill_proc(tk->tsk, tk->addr,
346 pfn, page, flags) < 0)
347 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
348 pfn, tk->tsk->comm, tk->tsk->pid);
349 }
350 put_task_struct(tk->tsk);
351 kfree(tk);
352 }
353}
354
355/*
356 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
357 * on behalf of the thread group. Return task_struct of the (first found)
358 * dedicated thread if found, and return NULL otherwise.
359 *
360 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
361 * have to call rcu_read_lock/unlock() in this function.
362 */
363static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
364{
365 struct task_struct *t;
366
367 for_each_thread(tsk, t)
368 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
369 return t;
370 return NULL;
371}
372
373/*
374 * Determine whether a given process is "early kill" process which expects
375 * to be signaled when some page under the process is hwpoisoned.
376 * Return task_struct of the dedicated thread (main thread unless explicitly
377 * specified) if the process is "early kill," and otherwise returns NULL.
378 */
379static struct task_struct *task_early_kill(struct task_struct *tsk,
380 int force_early)
381{
382 struct task_struct *t;
383 if (!tsk->mm)
384 return NULL;
385 if (force_early)
386 return tsk;
387 t = find_early_kill_thread(tsk);
388 if (t)
389 return t;
390 if (sysctl_memory_failure_early_kill)
391 return tsk;
392 return NULL;
393}
394
395/*
396 * Collect processes when the error hit an anonymous page.
397 */
398static void collect_procs_anon(struct page *page, struct list_head *to_kill,
399 struct to_kill **tkc, int force_early)
400{
401 struct vm_area_struct *vma;
402 struct task_struct *tsk;
403 struct anon_vma *av;
404 pgoff_t pgoff;
405
406 av = page_lock_anon_vma_read(page);
407 if (av == NULL) /* Not actually mapped anymore */
408 return;
409
410 pgoff = page_to_pgoff(page);
411 read_lock(&tasklist_lock);
412 for_each_process (tsk) {
413 struct anon_vma_chain *vmac;
414 struct task_struct *t = task_early_kill(tsk, force_early);
415
416 if (!t)
417 continue;
418 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
419 pgoff, pgoff) {
420 vma = vmac->vma;
421 if (!page_mapped_in_vma(page, vma))
422 continue;
423 if (vma->vm_mm == t->mm)
424 add_to_kill(t, page, vma, to_kill, tkc);
425 }
426 }
427 read_unlock(&tasklist_lock);
428 page_unlock_anon_vma_read(av);
429}
430
431/*
432 * Collect processes when the error hit a file mapped page.
433 */
434static void collect_procs_file(struct page *page, struct list_head *to_kill,
435 struct to_kill **tkc, int force_early)
436{
437 struct vm_area_struct *vma;
438 struct task_struct *tsk;
439 struct address_space *mapping = page->mapping;
440
441 i_mmap_lock_read(mapping);
442 read_lock(&tasklist_lock);
443 for_each_process(tsk) {
444 pgoff_t pgoff = page_to_pgoff(page);
445 struct task_struct *t = task_early_kill(tsk, force_early);
446
447 if (!t)
448 continue;
449 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
450 pgoff) {
451 /*
452 * Send early kill signal to tasks where a vma covers
453 * the page but the corrupted page is not necessarily
454 * mapped it in its pte.
455 * Assume applications who requested early kill want
456 * to be informed of all such data corruptions.
457 */
458 if (vma->vm_mm == t->mm)
459 add_to_kill(t, page, vma, to_kill, tkc);
460 }
461 }
462 read_unlock(&tasklist_lock);
463 i_mmap_unlock_read(mapping);
464}
465
466/*
467 * Collect the processes who have the corrupted page mapped to kill.
468 * This is done in two steps for locking reasons.
469 * First preallocate one tokill structure outside the spin locks,
470 * so that we can kill at least one process reasonably reliable.
471 */
472static void collect_procs(struct page *page, struct list_head *tokill,
473 int force_early)
474{
475 struct to_kill *tk;
476
477 if (!page->mapping)
478 return;
479
480 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
481 if (!tk)
482 return;
483 if (PageAnon(page))
484 collect_procs_anon(page, tokill, &tk, force_early);
485 else
486 collect_procs_file(page, tokill, &tk, force_early);
487 kfree(tk);
488}
489
490static const char *action_name[] = {
491 [MF_IGNORED] = "Ignored",
492 [MF_FAILED] = "Failed",
493 [MF_DELAYED] = "Delayed",
494 [MF_RECOVERED] = "Recovered",
495};
496
497static const char * const action_page_types[] = {
498 [MF_MSG_KERNEL] = "reserved kernel page",
499 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
500 [MF_MSG_SLAB] = "kernel slab page",
501 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
502 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
503 [MF_MSG_HUGE] = "huge page",
504 [MF_MSG_FREE_HUGE] = "free huge page",
505 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
506 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
507 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
508 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
509 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
510 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
511 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
512 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
513 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
514 [MF_MSG_CLEAN_LRU] = "clean LRU page",
515 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
516 [MF_MSG_BUDDY] = "free buddy page",
517 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
518 [MF_MSG_UNKNOWN] = "unknown page",
519};
520
521/*
522 * XXX: It is possible that a page is isolated from LRU cache,
523 * and then kept in swap cache or failed to remove from page cache.
524 * The page count will stop it from being freed by unpoison.
525 * Stress tests should be aware of this memory leak problem.
526 */
527static int delete_from_lru_cache(struct page *p)
528{
529 if (!isolate_lru_page(p)) {
530 /*
531 * Clear sensible page flags, so that the buddy system won't
532 * complain when the page is unpoison-and-freed.
533 */
534 ClearPageActive(p);
535 ClearPageUnevictable(p);
536
537 /*
538 * Poisoned page might never drop its ref count to 0 so we have
539 * to uncharge it manually from its memcg.
540 */
541 mem_cgroup_uncharge(p);
542
543 /*
544 * drop the page count elevated by isolate_lru_page()
545 */
546 put_page(p);
547 return 0;
548 }
549 return -EIO;
550}
551
552static int truncate_error_page(struct page *p, unsigned long pfn,
553 struct address_space *mapping)
554{
555 int ret = MF_FAILED;
556
557 if (mapping->a_ops->error_remove_page) {
558 int err = mapping->a_ops->error_remove_page(mapping, p);
559
560 if (err != 0) {
561 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
562 pfn, err);
563 } else if (page_has_private(p) &&
564 !try_to_release_page(p, GFP_NOIO)) {
565 pr_info("Memory failure: %#lx: failed to release buffers\n",
566 pfn);
567 } else {
568 ret = MF_RECOVERED;
569 }
570 } else {
571 /*
572 * If the file system doesn't support it just invalidate
573 * This fails on dirty or anything with private pages
574 */
575 if (invalidate_inode_page(p))
576 ret = MF_RECOVERED;
577 else
578 pr_info("Memory failure: %#lx: Failed to invalidate\n",
579 pfn);
580 }
581
582 return ret;
583}
584
585/*
586 * Error hit kernel page.
587 * Do nothing, try to be lucky and not touch this instead. For a few cases we
588 * could be more sophisticated.
589 */
590static int me_kernel(struct page *p, unsigned long pfn)
591{
592 return MF_IGNORED;
593}
594
595/*
596 * Page in unknown state. Do nothing.
597 */
598static int me_unknown(struct page *p, unsigned long pfn)
599{
600 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
601 return MF_FAILED;
602}
603
604/*
605 * Clean (or cleaned) page cache page.
606 */
607static int me_pagecache_clean(struct page *p, unsigned long pfn)
608{
609 struct address_space *mapping;
610
611 delete_from_lru_cache(p);
612
613 /*
614 * For anonymous pages we're done the only reference left
615 * should be the one m_f() holds.
616 */
617 if (PageAnon(p))
618 return MF_RECOVERED;
619
620 /*
621 * Now truncate the page in the page cache. This is really
622 * more like a "temporary hole punch"
623 * Don't do this for block devices when someone else
624 * has a reference, because it could be file system metadata
625 * and that's not safe to truncate.
626 */
627 mapping = page_mapping(p);
628 if (!mapping) {
629 /*
630 * Page has been teared down in the meanwhile
631 */
632 return MF_FAILED;
633 }
634
635 /*
636 * Truncation is a bit tricky. Enable it per file system for now.
637 *
638 * Open: to take i_mutex or not for this? Right now we don't.
639 */
640 return truncate_error_page(p, pfn, mapping);
641}
642
643/*
644 * Dirty pagecache page
645 * Issues: when the error hit a hole page the error is not properly
646 * propagated.
647 */
648static int me_pagecache_dirty(struct page *p, unsigned long pfn)
649{
650 struct address_space *mapping = page_mapping(p);
651
652 SetPageError(p);
653 /* TBD: print more information about the file. */
654 if (mapping) {
655 /*
656 * IO error will be reported by write(), fsync(), etc.
657 * who check the mapping.
658 * This way the application knows that something went
659 * wrong with its dirty file data.
660 *
661 * There's one open issue:
662 *
663 * The EIO will be only reported on the next IO
664 * operation and then cleared through the IO map.
665 * Normally Linux has two mechanisms to pass IO error
666 * first through the AS_EIO flag in the address space
667 * and then through the PageError flag in the page.
668 * Since we drop pages on memory failure handling the
669 * only mechanism open to use is through AS_AIO.
670 *
671 * This has the disadvantage that it gets cleared on
672 * the first operation that returns an error, while
673 * the PageError bit is more sticky and only cleared
674 * when the page is reread or dropped. If an
675 * application assumes it will always get error on
676 * fsync, but does other operations on the fd before
677 * and the page is dropped between then the error
678 * will not be properly reported.
679 *
680 * This can already happen even without hwpoisoned
681 * pages: first on metadata IO errors (which only
682 * report through AS_EIO) or when the page is dropped
683 * at the wrong time.
684 *
685 * So right now we assume that the application DTRT on
686 * the first EIO, but we're not worse than other parts
687 * of the kernel.
688 */
689 mapping_set_error(mapping, -EIO);
690 }
691
692 return me_pagecache_clean(p, pfn);
693}
694
695/*
696 * Clean and dirty swap cache.
697 *
698 * Dirty swap cache page is tricky to handle. The page could live both in page
699 * cache and swap cache(ie. page is freshly swapped in). So it could be
700 * referenced concurrently by 2 types of PTEs:
701 * normal PTEs and swap PTEs. We try to handle them consistently by calling
702 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
703 * and then
704 * - clear dirty bit to prevent IO
705 * - remove from LRU
706 * - but keep in the swap cache, so that when we return to it on
707 * a later page fault, we know the application is accessing
708 * corrupted data and shall be killed (we installed simple
709 * interception code in do_swap_page to catch it).
710 *
711 * Clean swap cache pages can be directly isolated. A later page fault will
712 * bring in the known good data from disk.
713 */
714static int me_swapcache_dirty(struct page *p, unsigned long pfn)
715{
716 ClearPageDirty(p);
717 /* Trigger EIO in shmem: */
718 ClearPageUptodate(p);
719
720 if (!delete_from_lru_cache(p))
721 return MF_DELAYED;
722 else
723 return MF_FAILED;
724}
725
726static int me_swapcache_clean(struct page *p, unsigned long pfn)
727{
728 delete_from_swap_cache(p);
729
730 if (!delete_from_lru_cache(p))
731 return MF_RECOVERED;
732 else
733 return MF_FAILED;
734}
735
736/*
737 * Huge pages. Needs work.
738 * Issues:
739 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
740 * To narrow down kill region to one page, we need to break up pmd.
741 */
742static int me_huge_page(struct page *p, unsigned long pfn)
743{
744 int res = 0;
745 struct page *hpage = compound_head(p);
746 struct address_space *mapping;
747
748 if (!PageHuge(hpage))
749 return MF_DELAYED;
750
751 mapping = page_mapping(hpage);
752 if (mapping) {
753 res = truncate_error_page(hpage, pfn, mapping);
754 } else {
755 unlock_page(hpage);
756 /*
757 * migration entry prevents later access on error anonymous
758 * hugepage, so we can free and dissolve it into buddy to
759 * save healthy subpages.
760 */
761 if (PageAnon(hpage))
762 put_page(hpage);
763 dissolve_free_huge_page(p);
764 res = MF_RECOVERED;
765 lock_page(hpage);
766 }
767
768 return res;
769}
770
771/*
772 * Various page states we can handle.
773 *
774 * A page state is defined by its current page->flags bits.
775 * The table matches them in order and calls the right handler.
776 *
777 * This is quite tricky because we can access page at any time
778 * in its live cycle, so all accesses have to be extremely careful.
779 *
780 * This is not complete. More states could be added.
781 * For any missing state don't attempt recovery.
782 */
783
784#define dirty (1UL << PG_dirty)
785#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
786#define unevict (1UL << PG_unevictable)
787#define mlock (1UL << PG_mlocked)
788#define writeback (1UL << PG_writeback)
789#define lru (1UL << PG_lru)
790#define head (1UL << PG_head)
791#define slab (1UL << PG_slab)
792#define reserved (1UL << PG_reserved)
793
794static struct page_state {
795 unsigned long mask;
796 unsigned long res;
797 enum mf_action_page_type type;
798 int (*action)(struct page *p, unsigned long pfn);
799} error_states[] = {
800 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
801 /*
802 * free pages are specially detected outside this table:
803 * PG_buddy pages only make a small fraction of all free pages.
804 */
805
806 /*
807 * Could in theory check if slab page is free or if we can drop
808 * currently unused objects without touching them. But just
809 * treat it as standard kernel for now.
810 */
811 { slab, slab, MF_MSG_SLAB, me_kernel },
812
813 { head, head, MF_MSG_HUGE, me_huge_page },
814
815 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
816 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
817
818 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
819 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
820
821 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
822 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
823
824 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
825 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
826
827 /*
828 * Catchall entry: must be at end.
829 */
830 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
831};
832
833#undef dirty
834#undef sc
835#undef unevict
836#undef mlock
837#undef writeback
838#undef lru
839#undef head
840#undef slab
841#undef reserved
842
843/*
844 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
845 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
846 */
847static void action_result(unsigned long pfn, enum mf_action_page_type type,
848 enum mf_result result)
849{
850 trace_memory_failure_event(pfn, type, result);
851
852 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
853 pfn, action_page_types[type], action_name[result]);
854}
855
856static int page_action(struct page_state *ps, struct page *p,
857 unsigned long pfn)
858{
859 int result;
860 int count;
861
862 result = ps->action(p, pfn);
863
864 count = page_count(p) - 1;
865 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
866 count--;
867 if (count > 0) {
868 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
869 pfn, action_page_types[ps->type], count);
870 result = MF_FAILED;
871 }
872 action_result(pfn, ps->type, result);
873
874 /* Could do more checks here if page looks ok */
875 /*
876 * Could adjust zone counters here to correct for the missing page.
877 */
878
879 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
880}
881
882/**
883 * get_hwpoison_page() - Get refcount for memory error handling:
884 * @page: raw error page (hit by memory error)
885 *
886 * Return: return 0 if failed to grab the refcount, otherwise true (some
887 * non-zero value.)
888 */
889int get_hwpoison_page(struct page *page)
890{
891 struct page *head = compound_head(page);
892
893 if (!PageHuge(head) && PageTransHuge(head)) {
894 /*
895 * Non anonymous thp exists only in allocation/free time. We
896 * can't handle such a case correctly, so let's give it up.
897 * This should be better than triggering BUG_ON when kernel
898 * tries to touch the "partially handled" page.
899 */
900 if (!PageAnon(head)) {
901 pr_err("Memory failure: %#lx: non anonymous thp\n",
902 page_to_pfn(page));
903 return 0;
904 }
905 }
906
907 if (get_page_unless_zero(head)) {
908 if (head == compound_head(page))
909 return 1;
910
911 pr_info("Memory failure: %#lx cannot catch tail\n",
912 page_to_pfn(page));
913 put_page(head);
914 }
915
916 return 0;
917}
918EXPORT_SYMBOL_GPL(get_hwpoison_page);
919
920/*
921 * Do all that is necessary to remove user space mappings. Unmap
922 * the pages and send SIGBUS to the processes if the data was dirty.
923 */
924static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
925 int flags, struct page **hpagep)
926{
927 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
928 struct address_space *mapping;
929 LIST_HEAD(tokill);
930 bool unmap_success;
931 int kill = 1, forcekill;
932 struct page *hpage = *hpagep;
933 bool mlocked = PageMlocked(hpage);
934
935 /*
936 * Here we are interested only in user-mapped pages, so skip any
937 * other types of pages.
938 */
939 if (PageReserved(p) || PageSlab(p))
940 return true;
941 if (!(PageLRU(hpage) || PageHuge(p)))
942 return true;
943
944 /*
945 * This check implies we don't kill processes if their pages
946 * are in the swap cache early. Those are always late kills.
947 */
948 if (!page_mapped(hpage))
949 return true;
950
951 if (PageKsm(p)) {
952 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
953 return false;
954 }
955
956 if (PageSwapCache(p)) {
957 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
958 pfn);
959 ttu |= TTU_IGNORE_HWPOISON;
960 }
961
962 /*
963 * Propagate the dirty bit from PTEs to struct page first, because we
964 * need this to decide if we should kill or just drop the page.
965 * XXX: the dirty test could be racy: set_page_dirty() may not always
966 * be called inside page lock (it's recommended but not enforced).
967 */
968 mapping = page_mapping(hpage);
969 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
970 mapping_cap_writeback_dirty(mapping)) {
971 if (page_mkclean(hpage)) {
972 SetPageDirty(hpage);
973 } else {
974 kill = 0;
975 ttu |= TTU_IGNORE_HWPOISON;
976 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
977 pfn);
978 }
979 }
980
981 /*
982 * First collect all the processes that have the page
983 * mapped in dirty form. This has to be done before try_to_unmap,
984 * because ttu takes the rmap data structures down.
985 *
986 * Error handling: We ignore errors here because
987 * there's nothing that can be done.
988 */
989 if (kill)
990 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
991
992 unmap_success = try_to_unmap(hpage, ttu);
993 if (!unmap_success)
994 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
995 pfn, page_mapcount(hpage));
996
997 /*
998 * try_to_unmap() might put mlocked page in lru cache, so call
999 * shake_page() again to ensure that it's flushed.
1000 */
1001 if (mlocked)
1002 shake_page(hpage, 0);
1003
1004 /*
1005 * Now that the dirty bit has been propagated to the
1006 * struct page and all unmaps done we can decide if
1007 * killing is needed or not. Only kill when the page
1008 * was dirty or the process is not restartable,
1009 * otherwise the tokill list is merely
1010 * freed. When there was a problem unmapping earlier
1011 * use a more force-full uncatchable kill to prevent
1012 * any accesses to the poisoned memory.
1013 */
1014 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1015 kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags);
1016
1017 return unmap_success;
1018}
1019
1020static int identify_page_state(unsigned long pfn, struct page *p,
1021 unsigned long page_flags)
1022{
1023 struct page_state *ps;
1024
1025 /*
1026 * The first check uses the current page flags which may not have any
1027 * relevant information. The second check with the saved page flags is
1028 * carried out only if the first check can't determine the page status.
1029 */
1030 for (ps = error_states;; ps++)
1031 if ((p->flags & ps->mask) == ps->res)
1032 break;
1033
1034 page_flags |= (p->flags & (1UL << PG_dirty));
1035
1036 if (!ps->mask)
1037 for (ps = error_states;; ps++)
1038 if ((page_flags & ps->mask) == ps->res)
1039 break;
1040 return page_action(ps, p, pfn);
1041}
1042
1043static int memory_failure_hugetlb(unsigned long pfn, int flags)
1044{
1045 struct page *p = pfn_to_page(pfn);
1046 struct page *head = compound_head(p);
1047 int res;
1048 unsigned long page_flags;
1049
1050 if (TestSetPageHWPoison(head)) {
1051 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1052 pfn);
1053 return 0;
1054 }
1055
1056 num_poisoned_pages_inc();
1057
1058 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1059 /*
1060 * Check "filter hit" and "race with other subpage."
1061 */
1062 lock_page(head);
1063 if (PageHWPoison(head)) {
1064 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1065 || (p != head && TestSetPageHWPoison(head))) {
1066 num_poisoned_pages_dec();
1067 unlock_page(head);
1068 return 0;
1069 }
1070 }
1071 unlock_page(head);
1072 dissolve_free_huge_page(p);
1073 action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED);
1074 return 0;
1075 }
1076
1077 lock_page(head);
1078 page_flags = head->flags;
1079
1080 if (!PageHWPoison(head)) {
1081 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1082 num_poisoned_pages_dec();
1083 unlock_page(head);
1084 put_hwpoison_page(head);
1085 return 0;
1086 }
1087
1088 /*
1089 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1090 * simply disable it. In order to make it work properly, we need
1091 * make sure that:
1092 * - conversion of a pud that maps an error hugetlb into hwpoison
1093 * entry properly works, and
1094 * - other mm code walking over page table is aware of pud-aligned
1095 * hwpoison entries.
1096 */
1097 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1098 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1099 res = -EBUSY;
1100 goto out;
1101 }
1102
1103 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
1104 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1105 res = -EBUSY;
1106 goto out;
1107 }
1108
1109 res = identify_page_state(pfn, p, page_flags);
1110out:
1111 unlock_page(head);
1112 return res;
1113}
1114
1115/**
1116 * memory_failure - Handle memory failure of a page.
1117 * @pfn: Page Number of the corrupted page
1118 * @flags: fine tune action taken
1119 *
1120 * This function is called by the low level machine check code
1121 * of an architecture when it detects hardware memory corruption
1122 * of a page. It tries its best to recover, which includes
1123 * dropping pages, killing processes etc.
1124 *
1125 * The function is primarily of use for corruptions that
1126 * happen outside the current execution context (e.g. when
1127 * detected by a background scrubber)
1128 *
1129 * Must run in process context (e.g. a work queue) with interrupts
1130 * enabled and no spinlocks hold.
1131 */
1132int memory_failure(unsigned long pfn, int flags)
1133{
1134 struct page *p;
1135 struct page *hpage;
1136 struct page *orig_head;
1137 int res;
1138 unsigned long page_flags;
1139
1140 if (!sysctl_memory_failure_recovery)
1141 panic("Memory failure on page %lx", pfn);
1142
1143 if (!pfn_valid(pfn)) {
1144 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1145 pfn);
1146 return -ENXIO;
1147 }
1148
1149 p = pfn_to_page(pfn);
1150 if (PageHuge(p))
1151 return memory_failure_hugetlb(pfn, flags);
1152 if (TestSetPageHWPoison(p)) {
1153 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1154 pfn);
1155 return 0;
1156 }
1157
1158 orig_head = hpage = compound_head(p);
1159 num_poisoned_pages_inc();
1160
1161 /*
1162 * We need/can do nothing about count=0 pages.
1163 * 1) it's a free page, and therefore in safe hand:
1164 * prep_new_page() will be the gate keeper.
1165 * 2) it's part of a non-compound high order page.
1166 * Implies some kernel user: cannot stop them from
1167 * R/W the page; let's pray that the page has been
1168 * used and will be freed some time later.
1169 * In fact it's dangerous to directly bump up page count from 0,
1170 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
1171 */
1172 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1173 if (is_free_buddy_page(p)) {
1174 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1175 return 0;
1176 } else {
1177 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1178 return -EBUSY;
1179 }
1180 }
1181
1182 if (PageTransHuge(hpage)) {
1183 lock_page(p);
1184 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1185 unlock_page(p);
1186 if (!PageAnon(p))
1187 pr_err("Memory failure: %#lx: non anonymous thp\n",
1188 pfn);
1189 else
1190 pr_err("Memory failure: %#lx: thp split failed\n",
1191 pfn);
1192 if (TestClearPageHWPoison(p))
1193 num_poisoned_pages_dec();
1194 put_hwpoison_page(p);
1195 return -EBUSY;
1196 }
1197 unlock_page(p);
1198 VM_BUG_ON_PAGE(!page_count(p), p);
1199 hpage = compound_head(p);
1200 }
1201
1202 /*
1203 * We ignore non-LRU pages for good reasons.
1204 * - PG_locked is only well defined for LRU pages and a few others
1205 * - to avoid races with __SetPageLocked()
1206 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1207 * The check (unnecessarily) ignores LRU pages being isolated and
1208 * walked by the page reclaim code, however that's not a big loss.
1209 */
1210 shake_page(p, 0);
1211 /* shake_page could have turned it free. */
1212 if (!PageLRU(p) && is_free_buddy_page(p)) {
1213 if (flags & MF_COUNT_INCREASED)
1214 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1215 else
1216 action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
1217 return 0;
1218 }
1219
1220 lock_page(p);
1221
1222 /*
1223 * The page could have changed compound pages during the locking.
1224 * If this happens just bail out.
1225 */
1226 if (PageCompound(p) && compound_head(p) != orig_head) {
1227 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
1228 res = -EBUSY;
1229 goto out;
1230 }
1231
1232 /*
1233 * We use page flags to determine what action should be taken, but
1234 * the flags can be modified by the error containment action. One
1235 * example is an mlocked page, where PG_mlocked is cleared by
1236 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1237 * correctly, we save a copy of the page flags at this time.
1238 */
1239 if (PageHuge(p))
1240 page_flags = hpage->flags;
1241 else
1242 page_flags = p->flags;
1243
1244 /*
1245 * unpoison always clear PG_hwpoison inside page lock
1246 */
1247 if (!PageHWPoison(p)) {
1248 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1249 num_poisoned_pages_dec();
1250 unlock_page(p);
1251 put_hwpoison_page(p);
1252 return 0;
1253 }
1254 if (hwpoison_filter(p)) {
1255 if (TestClearPageHWPoison(p))
1256 num_poisoned_pages_dec();
1257 unlock_page(p);
1258 put_hwpoison_page(p);
1259 return 0;
1260 }
1261
1262 if (!PageTransTail(p) && !PageLRU(p))
1263 goto identify_page_state;
1264
1265 /*
1266 * It's very difficult to mess with pages currently under IO
1267 * and in many cases impossible, so we just avoid it here.
1268 */
1269 wait_on_page_writeback(p);
1270
1271 /*
1272 * Now take care of user space mappings.
1273 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1274 *
1275 * When the raw error page is thp tail page, hpage points to the raw
1276 * page after thp split.
1277 */
1278 if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
1279 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1280 res = -EBUSY;
1281 goto out;
1282 }
1283
1284 /*
1285 * Torn down by someone else?
1286 */
1287 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1288 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
1289 res = -EBUSY;
1290 goto out;
1291 }
1292
1293identify_page_state:
1294 res = identify_page_state(pfn, p, page_flags);
1295out:
1296 unlock_page(p);
1297 return res;
1298}
1299EXPORT_SYMBOL_GPL(memory_failure);
1300
1301#define MEMORY_FAILURE_FIFO_ORDER 4
1302#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1303
1304struct memory_failure_entry {
1305 unsigned long pfn;
1306 int flags;
1307};
1308
1309struct memory_failure_cpu {
1310 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1311 MEMORY_FAILURE_FIFO_SIZE);
1312 spinlock_t lock;
1313 struct work_struct work;
1314};
1315
1316static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1317
1318/**
1319 * memory_failure_queue - Schedule handling memory failure of a page.
1320 * @pfn: Page Number of the corrupted page
1321 * @flags: Flags for memory failure handling
1322 *
1323 * This function is called by the low level hardware error handler
1324 * when it detects hardware memory corruption of a page. It schedules
1325 * the recovering of error page, including dropping pages, killing
1326 * processes etc.
1327 *
1328 * The function is primarily of use for corruptions that
1329 * happen outside the current execution context (e.g. when
1330 * detected by a background scrubber)
1331 *
1332 * Can run in IRQ context.
1333 */
1334void memory_failure_queue(unsigned long pfn, int flags)
1335{
1336 struct memory_failure_cpu *mf_cpu;
1337 unsigned long proc_flags;
1338 struct memory_failure_entry entry = {
1339 .pfn = pfn,
1340 .flags = flags,
1341 };
1342
1343 mf_cpu = &get_cpu_var(memory_failure_cpu);
1344 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1345 if (kfifo_put(&mf_cpu->fifo, entry))
1346 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1347 else
1348 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1349 pfn);
1350 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1351 put_cpu_var(memory_failure_cpu);
1352}
1353EXPORT_SYMBOL_GPL(memory_failure_queue);
1354
1355static void memory_failure_work_func(struct work_struct *work)
1356{
1357 struct memory_failure_cpu *mf_cpu;
1358 struct memory_failure_entry entry = { 0, };
1359 unsigned long proc_flags;
1360 int gotten;
1361
1362 mf_cpu = this_cpu_ptr(&memory_failure_cpu);
1363 for (;;) {
1364 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1365 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1366 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1367 if (!gotten)
1368 break;
1369 if (entry.flags & MF_SOFT_OFFLINE)
1370 soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
1371 else
1372 memory_failure(entry.pfn, entry.flags);
1373 }
1374}
1375
1376static int __init memory_failure_init(void)
1377{
1378 struct memory_failure_cpu *mf_cpu;
1379 int cpu;
1380
1381 for_each_possible_cpu(cpu) {
1382 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1383 spin_lock_init(&mf_cpu->lock);
1384 INIT_KFIFO(mf_cpu->fifo);
1385 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1386 }
1387
1388 return 0;
1389}
1390core_initcall(memory_failure_init);
1391
1392#define unpoison_pr_info(fmt, pfn, rs) \
1393({ \
1394 if (__ratelimit(rs)) \
1395 pr_info(fmt, pfn); \
1396})
1397
1398/**
1399 * unpoison_memory - Unpoison a previously poisoned page
1400 * @pfn: Page number of the to be unpoisoned page
1401 *
1402 * Software-unpoison a page that has been poisoned by
1403 * memory_failure() earlier.
1404 *
1405 * This is only done on the software-level, so it only works
1406 * for linux injected failures, not real hardware failures
1407 *
1408 * Returns 0 for success, otherwise -errno.
1409 */
1410int unpoison_memory(unsigned long pfn)
1411{
1412 struct page *page;
1413 struct page *p;
1414 int freeit = 0;
1415 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1416 DEFAULT_RATELIMIT_BURST);
1417
1418 if (!pfn_valid(pfn))
1419 return -ENXIO;
1420
1421 p = pfn_to_page(pfn);
1422 page = compound_head(p);
1423
1424 if (!PageHWPoison(p)) {
1425 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
1426 pfn, &unpoison_rs);
1427 return 0;
1428 }
1429
1430 if (page_count(page) > 1) {
1431 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
1432 pfn, &unpoison_rs);
1433 return 0;
1434 }
1435
1436 if (page_mapped(page)) {
1437 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
1438 pfn, &unpoison_rs);
1439 return 0;
1440 }
1441
1442 if (page_mapping(page)) {
1443 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
1444 pfn, &unpoison_rs);
1445 return 0;
1446 }
1447
1448 /*
1449 * unpoison_memory() can encounter thp only when the thp is being
1450 * worked by memory_failure() and the page lock is not held yet.
1451 * In such case, we yield to memory_failure() and make unpoison fail.
1452 */
1453 if (!PageHuge(page) && PageTransHuge(page)) {
1454 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
1455 pfn, &unpoison_rs);
1456 return 0;
1457 }
1458
1459 if (!get_hwpoison_page(p)) {
1460 if (TestClearPageHWPoison(p))
1461 num_poisoned_pages_dec();
1462 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
1463 pfn, &unpoison_rs);
1464 return 0;
1465 }
1466
1467 lock_page(page);
1468 /*
1469 * This test is racy because PG_hwpoison is set outside of page lock.
1470 * That's acceptable because that won't trigger kernel panic. Instead,
1471 * the PG_hwpoison page will be caught and isolated on the entrance to
1472 * the free buddy page pool.
1473 */
1474 if (TestClearPageHWPoison(page)) {
1475 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
1476 pfn, &unpoison_rs);
1477 num_poisoned_pages_dec();
1478 freeit = 1;
1479 }
1480 unlock_page(page);
1481
1482 put_hwpoison_page(page);
1483 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
1484 put_hwpoison_page(page);
1485
1486 return 0;
1487}
1488EXPORT_SYMBOL(unpoison_memory);
1489
1490static struct page *new_page(struct page *p, unsigned long private)
1491{
1492 int nid = page_to_nid(p);
1493
1494 return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
1495}
1496
1497/*
1498 * Safely get reference count of an arbitrary page.
1499 * Returns 0 for a free page, -EIO for a zero refcount page
1500 * that is not free, and 1 for any other page type.
1501 * For 1 the page is returned with increased page count, otherwise not.
1502 */
1503static int __get_any_page(struct page *p, unsigned long pfn, int flags)
1504{
1505 int ret;
1506
1507 if (flags & MF_COUNT_INCREASED)
1508 return 1;
1509
1510 /*
1511 * When the target page is a free hugepage, just remove it
1512 * from free hugepage list.
1513 */
1514 if (!get_hwpoison_page(p)) {
1515 if (PageHuge(p)) {
1516 pr_info("%s: %#lx free huge page\n", __func__, pfn);
1517 ret = 0;
1518 } else if (is_free_buddy_page(p)) {
1519 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
1520 ret = 0;
1521 } else {
1522 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1523 __func__, pfn, p->flags);
1524 ret = -EIO;
1525 }
1526 } else {
1527 /* Not a free page */
1528 ret = 1;
1529 }
1530 return ret;
1531}
1532
1533static int get_any_page(struct page *page, unsigned long pfn, int flags)
1534{
1535 int ret = __get_any_page(page, pfn, flags);
1536
1537 if (ret == 1 && !PageHuge(page) &&
1538 !PageLRU(page) && !__PageMovable(page)) {
1539 /*
1540 * Try to free it.
1541 */
1542 put_hwpoison_page(page);
1543 shake_page(page, 1);
1544
1545 /*
1546 * Did it turn free?
1547 */
1548 ret = __get_any_page(page, pfn, 0);
1549 if (ret == 1 && !PageLRU(page)) {
1550 /* Drop page reference which is from __get_any_page() */
1551 put_hwpoison_page(page);
1552 pr_info("soft_offline: %#lx: unknown non LRU page type %lx (%pGp)\n",
1553 pfn, page->flags, &page->flags);
1554 return -EIO;
1555 }
1556 }
1557 return ret;
1558}
1559
1560static int soft_offline_huge_page(struct page *page, int flags)
1561{
1562 int ret;
1563 unsigned long pfn = page_to_pfn(page);
1564 struct page *hpage = compound_head(page);
1565 LIST_HEAD(pagelist);
1566
1567 /*
1568 * This double-check of PageHWPoison is to avoid the race with
1569 * memory_failure(). See also comment in __soft_offline_page().
1570 */
1571 lock_page(hpage);
1572 if (PageHWPoison(hpage)) {
1573 unlock_page(hpage);
1574 put_hwpoison_page(hpage);
1575 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
1576 return -EBUSY;
1577 }
1578 unlock_page(hpage);
1579
1580 ret = isolate_huge_page(hpage, &pagelist);
1581 /*
1582 * get_any_page() and isolate_huge_page() takes a refcount each,
1583 * so need to drop one here.
1584 */
1585 put_hwpoison_page(hpage);
1586 if (!ret) {
1587 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1588 return -EBUSY;
1589 }
1590
1591 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1592 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1593 if (ret) {
1594 pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
1595 pfn, ret, page->flags, &page->flags);
1596 if (!list_empty(&pagelist))
1597 putback_movable_pages(&pagelist);
1598 if (ret > 0)
1599 ret = -EIO;
1600 } else {
1601 if (PageHuge(page))
1602 dissolve_free_huge_page(page);
1603 }
1604 return ret;
1605}
1606
1607static int __soft_offline_page(struct page *page, int flags)
1608{
1609 int ret;
1610 unsigned long pfn = page_to_pfn(page);
1611
1612 /*
1613 * Check PageHWPoison again inside page lock because PageHWPoison
1614 * is set by memory_failure() outside page lock. Note that
1615 * memory_failure() also double-checks PageHWPoison inside page lock,
1616 * so there's no race between soft_offline_page() and memory_failure().
1617 */
1618 lock_page(page);
1619 wait_on_page_writeback(page);
1620 if (PageHWPoison(page)) {
1621 unlock_page(page);
1622 put_hwpoison_page(page);
1623 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1624 return -EBUSY;
1625 }
1626 /*
1627 * Try to invalidate first. This should work for
1628 * non dirty unmapped page cache pages.
1629 */
1630 ret = invalidate_inode_page(page);
1631 unlock_page(page);
1632 /*
1633 * RED-PEN would be better to keep it isolated here, but we
1634 * would need to fix isolation locking first.
1635 */
1636 if (ret == 1) {
1637 put_hwpoison_page(page);
1638 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1639 SetPageHWPoison(page);
1640 num_poisoned_pages_inc();
1641 return 0;
1642 }
1643
1644 /*
1645 * Simple invalidation didn't work.
1646 * Try to migrate to a new page instead. migrate.c
1647 * handles a large number of cases for us.
1648 */
1649 if (PageLRU(page))
1650 ret = isolate_lru_page(page);
1651 else
1652 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1653 /*
1654 * Drop page reference which is came from get_any_page()
1655 * successful isolate_lru_page() already took another one.
1656 */
1657 put_hwpoison_page(page);
1658 if (!ret) {
1659 LIST_HEAD(pagelist);
1660 /*
1661 * After isolated lru page, the PageLRU will be cleared,
1662 * so use !__PageMovable instead for LRU page's mapping
1663 * cannot have PAGE_MAPPING_MOVABLE.
1664 */
1665 if (!__PageMovable(page))
1666 inc_node_page_state(page, NR_ISOLATED_ANON +
1667 page_is_file_cache(page));
1668 list_add(&page->lru, &pagelist);
1669 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1670 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1671 if (ret) {
1672 if (!list_empty(&pagelist))
1673 putback_movable_pages(&pagelist);
1674
1675 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
1676 pfn, ret, page->flags, &page->flags);
1677 if (ret > 0)
1678 ret = -EIO;
1679 }
1680 } else {
1681 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n",
1682 pfn, ret, page_count(page), page->flags, &page->flags);
1683 }
1684 return ret;
1685}
1686
1687static int soft_offline_in_use_page(struct page *page, int flags)
1688{
1689 int ret;
1690 struct page *hpage = compound_head(page);
1691
1692 if (!PageHuge(page) && PageTransHuge(hpage)) {
1693 lock_page(hpage);
1694 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1695 unlock_page(hpage);
1696 if (!PageAnon(hpage))
1697 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1698 else
1699 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
1700 put_hwpoison_page(hpage);
1701 return -EBUSY;
1702 }
1703 unlock_page(hpage);
1704 get_hwpoison_page(page);
1705 put_hwpoison_page(hpage);
1706 }
1707
1708 if (PageHuge(page))
1709 ret = soft_offline_huge_page(page, flags);
1710 else
1711 ret = __soft_offline_page(page, flags);
1712
1713 return ret;
1714}
1715
1716static void soft_offline_free_page(struct page *page)
1717{
1718 struct page *head = compound_head(page);
1719
1720 if (!TestSetPageHWPoison(head)) {
1721 num_poisoned_pages_inc();
1722 if (PageHuge(head))
1723 dissolve_free_huge_page(page);
1724 }
1725}
1726
1727/**
1728 * soft_offline_page - Soft offline a page.
1729 * @page: page to offline
1730 * @flags: flags. Same as memory_failure().
1731 *
1732 * Returns 0 on success, otherwise negated errno.
1733 *
1734 * Soft offline a page, by migration or invalidation,
1735 * without killing anything. This is for the case when
1736 * a page is not corrupted yet (so it's still valid to access),
1737 * but has had a number of corrected errors and is better taken
1738 * out.
1739 *
1740 * The actual policy on when to do that is maintained by
1741 * user space.
1742 *
1743 * This should never impact any application or cause data loss,
1744 * however it might take some time.
1745 *
1746 * This is not a 100% solution for all memory, but tries to be
1747 * ``good enough'' for the majority of memory.
1748 */
1749int soft_offline_page(struct page *page, int flags)
1750{
1751 int ret;
1752 unsigned long pfn = page_to_pfn(page);
1753
1754 if (PageHWPoison(page)) {
1755 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1756 if (flags & MF_COUNT_INCREASED)
1757 put_hwpoison_page(page);
1758 return -EBUSY;
1759 }
1760
1761 get_online_mems();
1762 ret = get_any_page(page, pfn, flags);
1763 put_online_mems();
1764
1765 if (ret > 0)
1766 ret = soft_offline_in_use_page(page, flags);
1767 else if (ret == 0)
1768 soft_offline_free_page(page);
1769
1770 return ret;
1771}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
6 * High level machine check handler. Handles pages reported by the
7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
8 * failure.
9 *
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
12 *
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
20 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
28 *
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
34 * VM.
35 */
36#include <linux/kernel.h>
37#include <linux/mm.h>
38#include <linux/page-flags.h>
39#include <linux/kernel-page-flags.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/task.h>
42#include <linux/ksm.h>
43#include <linux/rmap.h>
44#include <linux/export.h>
45#include <linux/pagemap.h>
46#include <linux/swap.h>
47#include <linux/backing-dev.h>
48#include <linux/migrate.h>
49#include <linux/suspend.h>
50#include <linux/slab.h>
51#include <linux/swapops.h>
52#include <linux/hugetlb.h>
53#include <linux/memory_hotplug.h>
54#include <linux/mm_inline.h>
55#include <linux/memremap.h>
56#include <linux/kfifo.h>
57#include <linux/ratelimit.h>
58#include <linux/page-isolation.h>
59#include "internal.h"
60#include "ras/ras_event.h"
61
62int sysctl_memory_failure_early_kill __read_mostly = 0;
63
64int sysctl_memory_failure_recovery __read_mostly = 1;
65
66atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
67
68#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69
70u32 hwpoison_filter_enable = 0;
71u32 hwpoison_filter_dev_major = ~0U;
72u32 hwpoison_filter_dev_minor = ~0U;
73u64 hwpoison_filter_flags_mask;
74u64 hwpoison_filter_flags_value;
75EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
76EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
77EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
78EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
79EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
80
81static int hwpoison_filter_dev(struct page *p)
82{
83 struct address_space *mapping;
84 dev_t dev;
85
86 if (hwpoison_filter_dev_major == ~0U &&
87 hwpoison_filter_dev_minor == ~0U)
88 return 0;
89
90 /*
91 * page_mapping() does not accept slab pages.
92 */
93 if (PageSlab(p))
94 return -EINVAL;
95
96 mapping = page_mapping(p);
97 if (mapping == NULL || mapping->host == NULL)
98 return -EINVAL;
99
100 dev = mapping->host->i_sb->s_dev;
101 if (hwpoison_filter_dev_major != ~0U &&
102 hwpoison_filter_dev_major != MAJOR(dev))
103 return -EINVAL;
104 if (hwpoison_filter_dev_minor != ~0U &&
105 hwpoison_filter_dev_minor != MINOR(dev))
106 return -EINVAL;
107
108 return 0;
109}
110
111static int hwpoison_filter_flags(struct page *p)
112{
113 if (!hwpoison_filter_flags_mask)
114 return 0;
115
116 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
117 hwpoison_filter_flags_value)
118 return 0;
119 else
120 return -EINVAL;
121}
122
123/*
124 * This allows stress tests to limit test scope to a collection of tasks
125 * by putting them under some memcg. This prevents killing unrelated/important
126 * processes such as /sbin/init. Note that the target task may share clean
127 * pages with init (eg. libc text), which is harmless. If the target task
128 * share _dirty_ pages with another task B, the test scheme must make sure B
129 * is also included in the memcg. At last, due to race conditions this filter
130 * can only guarantee that the page either belongs to the memcg tasks, or is
131 * a freed page.
132 */
133#ifdef CONFIG_MEMCG
134u64 hwpoison_filter_memcg;
135EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
136static int hwpoison_filter_task(struct page *p)
137{
138 if (!hwpoison_filter_memcg)
139 return 0;
140
141 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
142 return -EINVAL;
143
144 return 0;
145}
146#else
147static int hwpoison_filter_task(struct page *p) { return 0; }
148#endif
149
150int hwpoison_filter(struct page *p)
151{
152 if (!hwpoison_filter_enable)
153 return 0;
154
155 if (hwpoison_filter_dev(p))
156 return -EINVAL;
157
158 if (hwpoison_filter_flags(p))
159 return -EINVAL;
160
161 if (hwpoison_filter_task(p))
162 return -EINVAL;
163
164 return 0;
165}
166#else
167int hwpoison_filter(struct page *p)
168{
169 return 0;
170}
171#endif
172
173EXPORT_SYMBOL_GPL(hwpoison_filter);
174
175/*
176 * Kill all processes that have a poisoned page mapped and then isolate
177 * the page.
178 *
179 * General strategy:
180 * Find all processes having the page mapped and kill them.
181 * But we keep a page reference around so that the page is not
182 * actually freed yet.
183 * Then stash the page away
184 *
185 * There's no convenient way to get back to mapped processes
186 * from the VMAs. So do a brute-force search over all
187 * running processes.
188 *
189 * Remember that machine checks are not common (or rather
190 * if they are common you have other problems), so this shouldn't
191 * be a performance issue.
192 *
193 * Also there are some races possible while we get from the
194 * error detection to actually handle it.
195 */
196
197struct to_kill {
198 struct list_head nd;
199 struct task_struct *tsk;
200 unsigned long addr;
201 short size_shift;
202};
203
204/*
205 * Send all the processes who have the page mapped a signal.
206 * ``action optional'' if they are not immediately affected by the error
207 * ``action required'' if error happened in current execution context
208 */
209static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
210{
211 struct task_struct *t = tk->tsk;
212 short addr_lsb = tk->size_shift;
213 int ret;
214
215 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
216 pfn, t->comm, t->pid);
217
218 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
219 ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr,
220 addr_lsb);
221 } else {
222 /*
223 * Don't use force here, it's convenient if the signal
224 * can be temporarily blocked.
225 * This could cause a loop when the user sets SIGBUS
226 * to SIG_IGN, but hopefully no one will do that?
227 */
228 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
229 addr_lsb, t); /* synchronous? */
230 }
231 if (ret < 0)
232 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
233 t->comm, t->pid, ret);
234 return ret;
235}
236
237/*
238 * When a unknown page type is encountered drain as many buffers as possible
239 * in the hope to turn the page into a LRU or free page, which we can handle.
240 */
241void shake_page(struct page *p, int access)
242{
243 if (PageHuge(p))
244 return;
245
246 if (!PageSlab(p)) {
247 lru_add_drain_all();
248 if (PageLRU(p))
249 return;
250 drain_all_pages(page_zone(p));
251 if (PageLRU(p) || is_free_buddy_page(p))
252 return;
253 }
254
255 /*
256 * Only call shrink_node_slabs here (which would also shrink
257 * other caches) if access is not potentially fatal.
258 */
259 if (access)
260 drop_slab_node(page_to_nid(p));
261}
262EXPORT_SYMBOL_GPL(shake_page);
263
264static unsigned long dev_pagemap_mapping_shift(struct page *page,
265 struct vm_area_struct *vma)
266{
267 unsigned long address = vma_address(page, vma);
268 pgd_t *pgd;
269 p4d_t *p4d;
270 pud_t *pud;
271 pmd_t *pmd;
272 pte_t *pte;
273
274 pgd = pgd_offset(vma->vm_mm, address);
275 if (!pgd_present(*pgd))
276 return 0;
277 p4d = p4d_offset(pgd, address);
278 if (!p4d_present(*p4d))
279 return 0;
280 pud = pud_offset(p4d, address);
281 if (!pud_present(*pud))
282 return 0;
283 if (pud_devmap(*pud))
284 return PUD_SHIFT;
285 pmd = pmd_offset(pud, address);
286 if (!pmd_present(*pmd))
287 return 0;
288 if (pmd_devmap(*pmd))
289 return PMD_SHIFT;
290 pte = pte_offset_map(pmd, address);
291 if (!pte_present(*pte))
292 return 0;
293 if (pte_devmap(*pte))
294 return PAGE_SHIFT;
295 return 0;
296}
297
298/*
299 * Failure handling: if we can't find or can't kill a process there's
300 * not much we can do. We just print a message and ignore otherwise.
301 */
302
303/*
304 * Schedule a process for later kill.
305 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
306 * TBD would GFP_NOIO be enough?
307 */
308static void add_to_kill(struct task_struct *tsk, struct page *p,
309 struct vm_area_struct *vma,
310 struct list_head *to_kill,
311 struct to_kill **tkc)
312{
313 struct to_kill *tk;
314
315 if (*tkc) {
316 tk = *tkc;
317 *tkc = NULL;
318 } else {
319 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
320 if (!tk) {
321 pr_err("Memory failure: Out of memory while machine check handling\n");
322 return;
323 }
324 }
325 tk->addr = page_address_in_vma(p, vma);
326 if (is_zone_device_page(p))
327 tk->size_shift = dev_pagemap_mapping_shift(p, vma);
328 else
329 tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
330
331 /*
332 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
333 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
334 * so "tk->size_shift == 0" effectively checks no mapping on
335 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
336 * to a process' address space, it's possible not all N VMAs
337 * contain mappings for the page, but at least one VMA does.
338 * Only deliver SIGBUS with payload derived from the VMA that
339 * has a mapping for the page.
340 */
341 if (tk->addr == -EFAULT) {
342 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
343 page_to_pfn(p), tsk->comm);
344 } else if (tk->size_shift == 0) {
345 kfree(tk);
346 return;
347 }
348 get_task_struct(tsk);
349 tk->tsk = tsk;
350 list_add_tail(&tk->nd, to_kill);
351}
352
353/*
354 * Kill the processes that have been collected earlier.
355 *
356 * Only do anything when DOIT is set, otherwise just free the list
357 * (this is used for clean pages which do not need killing)
358 * Also when FAIL is set do a force kill because something went
359 * wrong earlier.
360 */
361static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
362 unsigned long pfn, int flags)
363{
364 struct to_kill *tk, *next;
365
366 list_for_each_entry_safe (tk, next, to_kill, nd) {
367 if (forcekill) {
368 /*
369 * In case something went wrong with munmapping
370 * make sure the process doesn't catch the
371 * signal and then access the memory. Just kill it.
372 */
373 if (fail || tk->addr == -EFAULT) {
374 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
375 pfn, tk->tsk->comm, tk->tsk->pid);
376 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
377 tk->tsk, PIDTYPE_PID);
378 }
379
380 /*
381 * In theory the process could have mapped
382 * something else on the address in-between. We could
383 * check for that, but we need to tell the
384 * process anyways.
385 */
386 else if (kill_proc(tk, pfn, flags) < 0)
387 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
388 pfn, tk->tsk->comm, tk->tsk->pid);
389 }
390 put_task_struct(tk->tsk);
391 kfree(tk);
392 }
393}
394
395/*
396 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
397 * on behalf of the thread group. Return task_struct of the (first found)
398 * dedicated thread if found, and return NULL otherwise.
399 *
400 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
401 * have to call rcu_read_lock/unlock() in this function.
402 */
403static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
404{
405 struct task_struct *t;
406
407 for_each_thread(tsk, t)
408 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
409 return t;
410 return NULL;
411}
412
413/*
414 * Determine whether a given process is "early kill" process which expects
415 * to be signaled when some page under the process is hwpoisoned.
416 * Return task_struct of the dedicated thread (main thread unless explicitly
417 * specified) if the process is "early kill," and otherwise returns NULL.
418 */
419static struct task_struct *task_early_kill(struct task_struct *tsk,
420 int force_early)
421{
422 struct task_struct *t;
423 if (!tsk->mm)
424 return NULL;
425 if (force_early)
426 return tsk;
427 t = find_early_kill_thread(tsk);
428 if (t)
429 return t;
430 if (sysctl_memory_failure_early_kill)
431 return tsk;
432 return NULL;
433}
434
435/*
436 * Collect processes when the error hit an anonymous page.
437 */
438static void collect_procs_anon(struct page *page, struct list_head *to_kill,
439 struct to_kill **tkc, int force_early)
440{
441 struct vm_area_struct *vma;
442 struct task_struct *tsk;
443 struct anon_vma *av;
444 pgoff_t pgoff;
445
446 av = page_lock_anon_vma_read(page);
447 if (av == NULL) /* Not actually mapped anymore */
448 return;
449
450 pgoff = page_to_pgoff(page);
451 read_lock(&tasklist_lock);
452 for_each_process (tsk) {
453 struct anon_vma_chain *vmac;
454 struct task_struct *t = task_early_kill(tsk, force_early);
455
456 if (!t)
457 continue;
458 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
459 pgoff, pgoff) {
460 vma = vmac->vma;
461 if (!page_mapped_in_vma(page, vma))
462 continue;
463 if (vma->vm_mm == t->mm)
464 add_to_kill(t, page, vma, to_kill, tkc);
465 }
466 }
467 read_unlock(&tasklist_lock);
468 page_unlock_anon_vma_read(av);
469}
470
471/*
472 * Collect processes when the error hit a file mapped page.
473 */
474static void collect_procs_file(struct page *page, struct list_head *to_kill,
475 struct to_kill **tkc, int force_early)
476{
477 struct vm_area_struct *vma;
478 struct task_struct *tsk;
479 struct address_space *mapping = page->mapping;
480
481 i_mmap_lock_read(mapping);
482 read_lock(&tasklist_lock);
483 for_each_process(tsk) {
484 pgoff_t pgoff = page_to_pgoff(page);
485 struct task_struct *t = task_early_kill(tsk, force_early);
486
487 if (!t)
488 continue;
489 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
490 pgoff) {
491 /*
492 * Send early kill signal to tasks where a vma covers
493 * the page but the corrupted page is not necessarily
494 * mapped it in its pte.
495 * Assume applications who requested early kill want
496 * to be informed of all such data corruptions.
497 */
498 if (vma->vm_mm == t->mm)
499 add_to_kill(t, page, vma, to_kill, tkc);
500 }
501 }
502 read_unlock(&tasklist_lock);
503 i_mmap_unlock_read(mapping);
504}
505
506/*
507 * Collect the processes who have the corrupted page mapped to kill.
508 * This is done in two steps for locking reasons.
509 * First preallocate one tokill structure outside the spin locks,
510 * so that we can kill at least one process reasonably reliable.
511 */
512static void collect_procs(struct page *page, struct list_head *tokill,
513 int force_early)
514{
515 struct to_kill *tk;
516
517 if (!page->mapping)
518 return;
519
520 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
521 if (!tk)
522 return;
523 if (PageAnon(page))
524 collect_procs_anon(page, tokill, &tk, force_early);
525 else
526 collect_procs_file(page, tokill, &tk, force_early);
527 kfree(tk);
528}
529
530static const char *action_name[] = {
531 [MF_IGNORED] = "Ignored",
532 [MF_FAILED] = "Failed",
533 [MF_DELAYED] = "Delayed",
534 [MF_RECOVERED] = "Recovered",
535};
536
537static const char * const action_page_types[] = {
538 [MF_MSG_KERNEL] = "reserved kernel page",
539 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
540 [MF_MSG_SLAB] = "kernel slab page",
541 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
542 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
543 [MF_MSG_HUGE] = "huge page",
544 [MF_MSG_FREE_HUGE] = "free huge page",
545 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
546 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
547 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
548 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
549 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
550 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
551 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
552 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
553 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
554 [MF_MSG_CLEAN_LRU] = "clean LRU page",
555 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
556 [MF_MSG_BUDDY] = "free buddy page",
557 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
558 [MF_MSG_DAX] = "dax page",
559 [MF_MSG_UNKNOWN] = "unknown page",
560};
561
562/*
563 * XXX: It is possible that a page is isolated from LRU cache,
564 * and then kept in swap cache or failed to remove from page cache.
565 * The page count will stop it from being freed by unpoison.
566 * Stress tests should be aware of this memory leak problem.
567 */
568static int delete_from_lru_cache(struct page *p)
569{
570 if (!isolate_lru_page(p)) {
571 /*
572 * Clear sensible page flags, so that the buddy system won't
573 * complain when the page is unpoison-and-freed.
574 */
575 ClearPageActive(p);
576 ClearPageUnevictable(p);
577
578 /*
579 * Poisoned page might never drop its ref count to 0 so we have
580 * to uncharge it manually from its memcg.
581 */
582 mem_cgroup_uncharge(p);
583
584 /*
585 * drop the page count elevated by isolate_lru_page()
586 */
587 put_page(p);
588 return 0;
589 }
590 return -EIO;
591}
592
593static int truncate_error_page(struct page *p, unsigned long pfn,
594 struct address_space *mapping)
595{
596 int ret = MF_FAILED;
597
598 if (mapping->a_ops->error_remove_page) {
599 int err = mapping->a_ops->error_remove_page(mapping, p);
600
601 if (err != 0) {
602 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
603 pfn, err);
604 } else if (page_has_private(p) &&
605 !try_to_release_page(p, GFP_NOIO)) {
606 pr_info("Memory failure: %#lx: failed to release buffers\n",
607 pfn);
608 } else {
609 ret = MF_RECOVERED;
610 }
611 } else {
612 /*
613 * If the file system doesn't support it just invalidate
614 * This fails on dirty or anything with private pages
615 */
616 if (invalidate_inode_page(p))
617 ret = MF_RECOVERED;
618 else
619 pr_info("Memory failure: %#lx: Failed to invalidate\n",
620 pfn);
621 }
622
623 return ret;
624}
625
626/*
627 * Error hit kernel page.
628 * Do nothing, try to be lucky and not touch this instead. For a few cases we
629 * could be more sophisticated.
630 */
631static int me_kernel(struct page *p, unsigned long pfn)
632{
633 return MF_IGNORED;
634}
635
636/*
637 * Page in unknown state. Do nothing.
638 */
639static int me_unknown(struct page *p, unsigned long pfn)
640{
641 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
642 return MF_FAILED;
643}
644
645/*
646 * Clean (or cleaned) page cache page.
647 */
648static int me_pagecache_clean(struct page *p, unsigned long pfn)
649{
650 struct address_space *mapping;
651
652 delete_from_lru_cache(p);
653
654 /*
655 * For anonymous pages we're done the only reference left
656 * should be the one m_f() holds.
657 */
658 if (PageAnon(p))
659 return MF_RECOVERED;
660
661 /*
662 * Now truncate the page in the page cache. This is really
663 * more like a "temporary hole punch"
664 * Don't do this for block devices when someone else
665 * has a reference, because it could be file system metadata
666 * and that's not safe to truncate.
667 */
668 mapping = page_mapping(p);
669 if (!mapping) {
670 /*
671 * Page has been teared down in the meanwhile
672 */
673 return MF_FAILED;
674 }
675
676 /*
677 * Truncation is a bit tricky. Enable it per file system for now.
678 *
679 * Open: to take i_mutex or not for this? Right now we don't.
680 */
681 return truncate_error_page(p, pfn, mapping);
682}
683
684/*
685 * Dirty pagecache page
686 * Issues: when the error hit a hole page the error is not properly
687 * propagated.
688 */
689static int me_pagecache_dirty(struct page *p, unsigned long pfn)
690{
691 struct address_space *mapping = page_mapping(p);
692
693 SetPageError(p);
694 /* TBD: print more information about the file. */
695 if (mapping) {
696 /*
697 * IO error will be reported by write(), fsync(), etc.
698 * who check the mapping.
699 * This way the application knows that something went
700 * wrong with its dirty file data.
701 *
702 * There's one open issue:
703 *
704 * The EIO will be only reported on the next IO
705 * operation and then cleared through the IO map.
706 * Normally Linux has two mechanisms to pass IO error
707 * first through the AS_EIO flag in the address space
708 * and then through the PageError flag in the page.
709 * Since we drop pages on memory failure handling the
710 * only mechanism open to use is through AS_AIO.
711 *
712 * This has the disadvantage that it gets cleared on
713 * the first operation that returns an error, while
714 * the PageError bit is more sticky and only cleared
715 * when the page is reread or dropped. If an
716 * application assumes it will always get error on
717 * fsync, but does other operations on the fd before
718 * and the page is dropped between then the error
719 * will not be properly reported.
720 *
721 * This can already happen even without hwpoisoned
722 * pages: first on metadata IO errors (which only
723 * report through AS_EIO) or when the page is dropped
724 * at the wrong time.
725 *
726 * So right now we assume that the application DTRT on
727 * the first EIO, but we're not worse than other parts
728 * of the kernel.
729 */
730 mapping_set_error(mapping, -EIO);
731 }
732
733 return me_pagecache_clean(p, pfn);
734}
735
736/*
737 * Clean and dirty swap cache.
738 *
739 * Dirty swap cache page is tricky to handle. The page could live both in page
740 * cache and swap cache(ie. page is freshly swapped in). So it could be
741 * referenced concurrently by 2 types of PTEs:
742 * normal PTEs and swap PTEs. We try to handle them consistently by calling
743 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
744 * and then
745 * - clear dirty bit to prevent IO
746 * - remove from LRU
747 * - but keep in the swap cache, so that when we return to it on
748 * a later page fault, we know the application is accessing
749 * corrupted data and shall be killed (we installed simple
750 * interception code in do_swap_page to catch it).
751 *
752 * Clean swap cache pages can be directly isolated. A later page fault will
753 * bring in the known good data from disk.
754 */
755static int me_swapcache_dirty(struct page *p, unsigned long pfn)
756{
757 ClearPageDirty(p);
758 /* Trigger EIO in shmem: */
759 ClearPageUptodate(p);
760
761 if (!delete_from_lru_cache(p))
762 return MF_DELAYED;
763 else
764 return MF_FAILED;
765}
766
767static int me_swapcache_clean(struct page *p, unsigned long pfn)
768{
769 delete_from_swap_cache(p);
770
771 if (!delete_from_lru_cache(p))
772 return MF_RECOVERED;
773 else
774 return MF_FAILED;
775}
776
777/*
778 * Huge pages. Needs work.
779 * Issues:
780 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
781 * To narrow down kill region to one page, we need to break up pmd.
782 */
783static int me_huge_page(struct page *p, unsigned long pfn)
784{
785 int res = 0;
786 struct page *hpage = compound_head(p);
787 struct address_space *mapping;
788
789 if (!PageHuge(hpage))
790 return MF_DELAYED;
791
792 mapping = page_mapping(hpage);
793 if (mapping) {
794 res = truncate_error_page(hpage, pfn, mapping);
795 } else {
796 unlock_page(hpage);
797 /*
798 * migration entry prevents later access on error anonymous
799 * hugepage, so we can free and dissolve it into buddy to
800 * save healthy subpages.
801 */
802 if (PageAnon(hpage))
803 put_page(hpage);
804 dissolve_free_huge_page(p);
805 res = MF_RECOVERED;
806 lock_page(hpage);
807 }
808
809 return res;
810}
811
812/*
813 * Various page states we can handle.
814 *
815 * A page state is defined by its current page->flags bits.
816 * The table matches them in order and calls the right handler.
817 *
818 * This is quite tricky because we can access page at any time
819 * in its live cycle, so all accesses have to be extremely careful.
820 *
821 * This is not complete. More states could be added.
822 * For any missing state don't attempt recovery.
823 */
824
825#define dirty (1UL << PG_dirty)
826#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
827#define unevict (1UL << PG_unevictable)
828#define mlock (1UL << PG_mlocked)
829#define writeback (1UL << PG_writeback)
830#define lru (1UL << PG_lru)
831#define head (1UL << PG_head)
832#define slab (1UL << PG_slab)
833#define reserved (1UL << PG_reserved)
834
835static struct page_state {
836 unsigned long mask;
837 unsigned long res;
838 enum mf_action_page_type type;
839 int (*action)(struct page *p, unsigned long pfn);
840} error_states[] = {
841 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
842 /*
843 * free pages are specially detected outside this table:
844 * PG_buddy pages only make a small fraction of all free pages.
845 */
846
847 /*
848 * Could in theory check if slab page is free or if we can drop
849 * currently unused objects without touching them. But just
850 * treat it as standard kernel for now.
851 */
852 { slab, slab, MF_MSG_SLAB, me_kernel },
853
854 { head, head, MF_MSG_HUGE, me_huge_page },
855
856 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
857 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
858
859 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
860 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
861
862 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
863 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
864
865 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
866 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
867
868 /*
869 * Catchall entry: must be at end.
870 */
871 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
872};
873
874#undef dirty
875#undef sc
876#undef unevict
877#undef mlock
878#undef writeback
879#undef lru
880#undef head
881#undef slab
882#undef reserved
883
884/*
885 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
886 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
887 */
888static void action_result(unsigned long pfn, enum mf_action_page_type type,
889 enum mf_result result)
890{
891 trace_memory_failure_event(pfn, type, result);
892
893 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
894 pfn, action_page_types[type], action_name[result]);
895}
896
897static int page_action(struct page_state *ps, struct page *p,
898 unsigned long pfn)
899{
900 int result;
901 int count;
902
903 result = ps->action(p, pfn);
904
905 count = page_count(p) - 1;
906 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
907 count--;
908 if (count > 0) {
909 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
910 pfn, action_page_types[ps->type], count);
911 result = MF_FAILED;
912 }
913 action_result(pfn, ps->type, result);
914
915 /* Could do more checks here if page looks ok */
916 /*
917 * Could adjust zone counters here to correct for the missing page.
918 */
919
920 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
921}
922
923/**
924 * get_hwpoison_page() - Get refcount for memory error handling:
925 * @page: raw error page (hit by memory error)
926 *
927 * Return: return 0 if failed to grab the refcount, otherwise true (some
928 * non-zero value.)
929 */
930int get_hwpoison_page(struct page *page)
931{
932 struct page *head = compound_head(page);
933
934 if (!PageHuge(head) && PageTransHuge(head)) {
935 /*
936 * Non anonymous thp exists only in allocation/free time. We
937 * can't handle such a case correctly, so let's give it up.
938 * This should be better than triggering BUG_ON when kernel
939 * tries to touch the "partially handled" page.
940 */
941 if (!PageAnon(head)) {
942 pr_err("Memory failure: %#lx: non anonymous thp\n",
943 page_to_pfn(page));
944 return 0;
945 }
946 }
947
948 if (get_page_unless_zero(head)) {
949 if (head == compound_head(page))
950 return 1;
951
952 pr_info("Memory failure: %#lx cannot catch tail\n",
953 page_to_pfn(page));
954 put_page(head);
955 }
956
957 return 0;
958}
959EXPORT_SYMBOL_GPL(get_hwpoison_page);
960
961/*
962 * Do all that is necessary to remove user space mappings. Unmap
963 * the pages and send SIGBUS to the processes if the data was dirty.
964 */
965static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
966 int flags, struct page **hpagep)
967{
968 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
969 struct address_space *mapping;
970 LIST_HEAD(tokill);
971 bool unmap_success;
972 int kill = 1, forcekill;
973 struct page *hpage = *hpagep;
974 bool mlocked = PageMlocked(hpage);
975
976 /*
977 * Here we are interested only in user-mapped pages, so skip any
978 * other types of pages.
979 */
980 if (PageReserved(p) || PageSlab(p))
981 return true;
982 if (!(PageLRU(hpage) || PageHuge(p)))
983 return true;
984
985 /*
986 * This check implies we don't kill processes if their pages
987 * are in the swap cache early. Those are always late kills.
988 */
989 if (!page_mapped(hpage))
990 return true;
991
992 if (PageKsm(p)) {
993 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
994 return false;
995 }
996
997 if (PageSwapCache(p)) {
998 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
999 pfn);
1000 ttu |= TTU_IGNORE_HWPOISON;
1001 }
1002
1003 /*
1004 * Propagate the dirty bit from PTEs to struct page first, because we
1005 * need this to decide if we should kill or just drop the page.
1006 * XXX: the dirty test could be racy: set_page_dirty() may not always
1007 * be called inside page lock (it's recommended but not enforced).
1008 */
1009 mapping = page_mapping(hpage);
1010 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1011 mapping_cap_writeback_dirty(mapping)) {
1012 if (page_mkclean(hpage)) {
1013 SetPageDirty(hpage);
1014 } else {
1015 kill = 0;
1016 ttu |= TTU_IGNORE_HWPOISON;
1017 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
1018 pfn);
1019 }
1020 }
1021
1022 /*
1023 * First collect all the processes that have the page
1024 * mapped in dirty form. This has to be done before try_to_unmap,
1025 * because ttu takes the rmap data structures down.
1026 *
1027 * Error handling: We ignore errors here because
1028 * there's nothing that can be done.
1029 */
1030 if (kill)
1031 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1032
1033 unmap_success = try_to_unmap(hpage, ttu);
1034 if (!unmap_success)
1035 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1036 pfn, page_mapcount(hpage));
1037
1038 /*
1039 * try_to_unmap() might put mlocked page in lru cache, so call
1040 * shake_page() again to ensure that it's flushed.
1041 */
1042 if (mlocked)
1043 shake_page(hpage, 0);
1044
1045 /*
1046 * Now that the dirty bit has been propagated to the
1047 * struct page and all unmaps done we can decide if
1048 * killing is needed or not. Only kill when the page
1049 * was dirty or the process is not restartable,
1050 * otherwise the tokill list is merely
1051 * freed. When there was a problem unmapping earlier
1052 * use a more force-full uncatchable kill to prevent
1053 * any accesses to the poisoned memory.
1054 */
1055 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1056 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1057
1058 return unmap_success;
1059}
1060
1061static int identify_page_state(unsigned long pfn, struct page *p,
1062 unsigned long page_flags)
1063{
1064 struct page_state *ps;
1065
1066 /*
1067 * The first check uses the current page flags which may not have any
1068 * relevant information. The second check with the saved page flags is
1069 * carried out only if the first check can't determine the page status.
1070 */
1071 for (ps = error_states;; ps++)
1072 if ((p->flags & ps->mask) == ps->res)
1073 break;
1074
1075 page_flags |= (p->flags & (1UL << PG_dirty));
1076
1077 if (!ps->mask)
1078 for (ps = error_states;; ps++)
1079 if ((page_flags & ps->mask) == ps->res)
1080 break;
1081 return page_action(ps, p, pfn);
1082}
1083
1084static int memory_failure_hugetlb(unsigned long pfn, int flags)
1085{
1086 struct page *p = pfn_to_page(pfn);
1087 struct page *head = compound_head(p);
1088 int res;
1089 unsigned long page_flags;
1090
1091 if (TestSetPageHWPoison(head)) {
1092 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1093 pfn);
1094 return 0;
1095 }
1096
1097 num_poisoned_pages_inc();
1098
1099 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1100 /*
1101 * Check "filter hit" and "race with other subpage."
1102 */
1103 lock_page(head);
1104 if (PageHWPoison(head)) {
1105 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1106 || (p != head && TestSetPageHWPoison(head))) {
1107 num_poisoned_pages_dec();
1108 unlock_page(head);
1109 return 0;
1110 }
1111 }
1112 unlock_page(head);
1113 dissolve_free_huge_page(p);
1114 action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED);
1115 return 0;
1116 }
1117
1118 lock_page(head);
1119 page_flags = head->flags;
1120
1121 if (!PageHWPoison(head)) {
1122 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1123 num_poisoned_pages_dec();
1124 unlock_page(head);
1125 put_hwpoison_page(head);
1126 return 0;
1127 }
1128
1129 /*
1130 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1131 * simply disable it. In order to make it work properly, we need
1132 * make sure that:
1133 * - conversion of a pud that maps an error hugetlb into hwpoison
1134 * entry properly works, and
1135 * - other mm code walking over page table is aware of pud-aligned
1136 * hwpoison entries.
1137 */
1138 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1139 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1140 res = -EBUSY;
1141 goto out;
1142 }
1143
1144 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
1145 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1146 res = -EBUSY;
1147 goto out;
1148 }
1149
1150 res = identify_page_state(pfn, p, page_flags);
1151out:
1152 unlock_page(head);
1153 return res;
1154}
1155
1156static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1157 struct dev_pagemap *pgmap)
1158{
1159 struct page *page = pfn_to_page(pfn);
1160 const bool unmap_success = true;
1161 unsigned long size = 0;
1162 struct to_kill *tk;
1163 LIST_HEAD(tokill);
1164 int rc = -EBUSY;
1165 loff_t start;
1166 dax_entry_t cookie;
1167
1168 /*
1169 * Prevent the inode from being freed while we are interrogating
1170 * the address_space, typically this would be handled by
1171 * lock_page(), but dax pages do not use the page lock. This
1172 * also prevents changes to the mapping of this pfn until
1173 * poison signaling is complete.
1174 */
1175 cookie = dax_lock_page(page);
1176 if (!cookie)
1177 goto out;
1178
1179 if (hwpoison_filter(page)) {
1180 rc = 0;
1181 goto unlock;
1182 }
1183
1184 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
1185 /*
1186 * TODO: Handle HMM pages which may need coordination
1187 * with device-side memory.
1188 */
1189 goto unlock;
1190 }
1191
1192 /*
1193 * Use this flag as an indication that the dax page has been
1194 * remapped UC to prevent speculative consumption of poison.
1195 */
1196 SetPageHWPoison(page);
1197
1198 /*
1199 * Unlike System-RAM there is no possibility to swap in a
1200 * different physical page at a given virtual address, so all
1201 * userspace consumption of ZONE_DEVICE memory necessitates
1202 * SIGBUS (i.e. MF_MUST_KILL)
1203 */
1204 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1205 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
1206
1207 list_for_each_entry(tk, &tokill, nd)
1208 if (tk->size_shift)
1209 size = max(size, 1UL << tk->size_shift);
1210 if (size) {
1211 /*
1212 * Unmap the largest mapping to avoid breaking up
1213 * device-dax mappings which are constant size. The
1214 * actual size of the mapping being torn down is
1215 * communicated in siginfo, see kill_proc()
1216 */
1217 start = (page->index << PAGE_SHIFT) & ~(size - 1);
1218 unmap_mapping_range(page->mapping, start, start + size, 0);
1219 }
1220 kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
1221 rc = 0;
1222unlock:
1223 dax_unlock_page(page, cookie);
1224out:
1225 /* drop pgmap ref acquired in caller */
1226 put_dev_pagemap(pgmap);
1227 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1228 return rc;
1229}
1230
1231/**
1232 * memory_failure - Handle memory failure of a page.
1233 * @pfn: Page Number of the corrupted page
1234 * @flags: fine tune action taken
1235 *
1236 * This function is called by the low level machine check code
1237 * of an architecture when it detects hardware memory corruption
1238 * of a page. It tries its best to recover, which includes
1239 * dropping pages, killing processes etc.
1240 *
1241 * The function is primarily of use for corruptions that
1242 * happen outside the current execution context (e.g. when
1243 * detected by a background scrubber)
1244 *
1245 * Must run in process context (e.g. a work queue) with interrupts
1246 * enabled and no spinlocks hold.
1247 */
1248int memory_failure(unsigned long pfn, int flags)
1249{
1250 struct page *p;
1251 struct page *hpage;
1252 struct page *orig_head;
1253 struct dev_pagemap *pgmap;
1254 int res;
1255 unsigned long page_flags;
1256
1257 if (!sysctl_memory_failure_recovery)
1258 panic("Memory failure on page %lx", pfn);
1259
1260 p = pfn_to_online_page(pfn);
1261 if (!p) {
1262 if (pfn_valid(pfn)) {
1263 pgmap = get_dev_pagemap(pfn, NULL);
1264 if (pgmap)
1265 return memory_failure_dev_pagemap(pfn, flags,
1266 pgmap);
1267 }
1268 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1269 pfn);
1270 return -ENXIO;
1271 }
1272
1273 if (PageHuge(p))
1274 return memory_failure_hugetlb(pfn, flags);
1275 if (TestSetPageHWPoison(p)) {
1276 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1277 pfn);
1278 return 0;
1279 }
1280
1281 orig_head = hpage = compound_head(p);
1282 num_poisoned_pages_inc();
1283
1284 /*
1285 * We need/can do nothing about count=0 pages.
1286 * 1) it's a free page, and therefore in safe hand:
1287 * prep_new_page() will be the gate keeper.
1288 * 2) it's part of a non-compound high order page.
1289 * Implies some kernel user: cannot stop them from
1290 * R/W the page; let's pray that the page has been
1291 * used and will be freed some time later.
1292 * In fact it's dangerous to directly bump up page count from 0,
1293 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
1294 */
1295 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1296 if (is_free_buddy_page(p)) {
1297 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1298 return 0;
1299 } else {
1300 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1301 return -EBUSY;
1302 }
1303 }
1304
1305 if (PageTransHuge(hpage)) {
1306 lock_page(p);
1307 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1308 unlock_page(p);
1309 if (!PageAnon(p))
1310 pr_err("Memory failure: %#lx: non anonymous thp\n",
1311 pfn);
1312 else
1313 pr_err("Memory failure: %#lx: thp split failed\n",
1314 pfn);
1315 if (TestClearPageHWPoison(p))
1316 num_poisoned_pages_dec();
1317 put_hwpoison_page(p);
1318 return -EBUSY;
1319 }
1320 unlock_page(p);
1321 VM_BUG_ON_PAGE(!page_count(p), p);
1322 hpage = compound_head(p);
1323 }
1324
1325 /*
1326 * We ignore non-LRU pages for good reasons.
1327 * - PG_locked is only well defined for LRU pages and a few others
1328 * - to avoid races with __SetPageLocked()
1329 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1330 * The check (unnecessarily) ignores LRU pages being isolated and
1331 * walked by the page reclaim code, however that's not a big loss.
1332 */
1333 shake_page(p, 0);
1334 /* shake_page could have turned it free. */
1335 if (!PageLRU(p) && is_free_buddy_page(p)) {
1336 if (flags & MF_COUNT_INCREASED)
1337 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1338 else
1339 action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
1340 return 0;
1341 }
1342
1343 lock_page(p);
1344
1345 /*
1346 * The page could have changed compound pages during the locking.
1347 * If this happens just bail out.
1348 */
1349 if (PageCompound(p) && compound_head(p) != orig_head) {
1350 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
1351 res = -EBUSY;
1352 goto out;
1353 }
1354
1355 /*
1356 * We use page flags to determine what action should be taken, but
1357 * the flags can be modified by the error containment action. One
1358 * example is an mlocked page, where PG_mlocked is cleared by
1359 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1360 * correctly, we save a copy of the page flags at this time.
1361 */
1362 if (PageHuge(p))
1363 page_flags = hpage->flags;
1364 else
1365 page_flags = p->flags;
1366
1367 /*
1368 * unpoison always clear PG_hwpoison inside page lock
1369 */
1370 if (!PageHWPoison(p)) {
1371 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1372 num_poisoned_pages_dec();
1373 unlock_page(p);
1374 put_hwpoison_page(p);
1375 return 0;
1376 }
1377 if (hwpoison_filter(p)) {
1378 if (TestClearPageHWPoison(p))
1379 num_poisoned_pages_dec();
1380 unlock_page(p);
1381 put_hwpoison_page(p);
1382 return 0;
1383 }
1384
1385 if (!PageTransTail(p) && !PageLRU(p))
1386 goto identify_page_state;
1387
1388 /*
1389 * It's very difficult to mess with pages currently under IO
1390 * and in many cases impossible, so we just avoid it here.
1391 */
1392 wait_on_page_writeback(p);
1393
1394 /*
1395 * Now take care of user space mappings.
1396 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1397 *
1398 * When the raw error page is thp tail page, hpage points to the raw
1399 * page after thp split.
1400 */
1401 if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
1402 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1403 res = -EBUSY;
1404 goto out;
1405 }
1406
1407 /*
1408 * Torn down by someone else?
1409 */
1410 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1411 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
1412 res = -EBUSY;
1413 goto out;
1414 }
1415
1416identify_page_state:
1417 res = identify_page_state(pfn, p, page_flags);
1418out:
1419 unlock_page(p);
1420 return res;
1421}
1422EXPORT_SYMBOL_GPL(memory_failure);
1423
1424#define MEMORY_FAILURE_FIFO_ORDER 4
1425#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1426
1427struct memory_failure_entry {
1428 unsigned long pfn;
1429 int flags;
1430};
1431
1432struct memory_failure_cpu {
1433 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1434 MEMORY_FAILURE_FIFO_SIZE);
1435 spinlock_t lock;
1436 struct work_struct work;
1437};
1438
1439static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1440
1441/**
1442 * memory_failure_queue - Schedule handling memory failure of a page.
1443 * @pfn: Page Number of the corrupted page
1444 * @flags: Flags for memory failure handling
1445 *
1446 * This function is called by the low level hardware error handler
1447 * when it detects hardware memory corruption of a page. It schedules
1448 * the recovering of error page, including dropping pages, killing
1449 * processes etc.
1450 *
1451 * The function is primarily of use for corruptions that
1452 * happen outside the current execution context (e.g. when
1453 * detected by a background scrubber)
1454 *
1455 * Can run in IRQ context.
1456 */
1457void memory_failure_queue(unsigned long pfn, int flags)
1458{
1459 struct memory_failure_cpu *mf_cpu;
1460 unsigned long proc_flags;
1461 struct memory_failure_entry entry = {
1462 .pfn = pfn,
1463 .flags = flags,
1464 };
1465
1466 mf_cpu = &get_cpu_var(memory_failure_cpu);
1467 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1468 if (kfifo_put(&mf_cpu->fifo, entry))
1469 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1470 else
1471 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1472 pfn);
1473 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1474 put_cpu_var(memory_failure_cpu);
1475}
1476EXPORT_SYMBOL_GPL(memory_failure_queue);
1477
1478static void memory_failure_work_func(struct work_struct *work)
1479{
1480 struct memory_failure_cpu *mf_cpu;
1481 struct memory_failure_entry entry = { 0, };
1482 unsigned long proc_flags;
1483 int gotten;
1484
1485 mf_cpu = this_cpu_ptr(&memory_failure_cpu);
1486 for (;;) {
1487 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1488 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1489 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1490 if (!gotten)
1491 break;
1492 if (entry.flags & MF_SOFT_OFFLINE)
1493 soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
1494 else
1495 memory_failure(entry.pfn, entry.flags);
1496 }
1497}
1498
1499static int __init memory_failure_init(void)
1500{
1501 struct memory_failure_cpu *mf_cpu;
1502 int cpu;
1503
1504 for_each_possible_cpu(cpu) {
1505 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1506 spin_lock_init(&mf_cpu->lock);
1507 INIT_KFIFO(mf_cpu->fifo);
1508 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1509 }
1510
1511 return 0;
1512}
1513core_initcall(memory_failure_init);
1514
1515#define unpoison_pr_info(fmt, pfn, rs) \
1516({ \
1517 if (__ratelimit(rs)) \
1518 pr_info(fmt, pfn); \
1519})
1520
1521/**
1522 * unpoison_memory - Unpoison a previously poisoned page
1523 * @pfn: Page number of the to be unpoisoned page
1524 *
1525 * Software-unpoison a page that has been poisoned by
1526 * memory_failure() earlier.
1527 *
1528 * This is only done on the software-level, so it only works
1529 * for linux injected failures, not real hardware failures
1530 *
1531 * Returns 0 for success, otherwise -errno.
1532 */
1533int unpoison_memory(unsigned long pfn)
1534{
1535 struct page *page;
1536 struct page *p;
1537 int freeit = 0;
1538 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1539 DEFAULT_RATELIMIT_BURST);
1540
1541 if (!pfn_valid(pfn))
1542 return -ENXIO;
1543
1544 p = pfn_to_page(pfn);
1545 page = compound_head(p);
1546
1547 if (!PageHWPoison(p)) {
1548 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
1549 pfn, &unpoison_rs);
1550 return 0;
1551 }
1552
1553 if (page_count(page) > 1) {
1554 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
1555 pfn, &unpoison_rs);
1556 return 0;
1557 }
1558
1559 if (page_mapped(page)) {
1560 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
1561 pfn, &unpoison_rs);
1562 return 0;
1563 }
1564
1565 if (page_mapping(page)) {
1566 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
1567 pfn, &unpoison_rs);
1568 return 0;
1569 }
1570
1571 /*
1572 * unpoison_memory() can encounter thp only when the thp is being
1573 * worked by memory_failure() and the page lock is not held yet.
1574 * In such case, we yield to memory_failure() and make unpoison fail.
1575 */
1576 if (!PageHuge(page) && PageTransHuge(page)) {
1577 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
1578 pfn, &unpoison_rs);
1579 return 0;
1580 }
1581
1582 if (!get_hwpoison_page(p)) {
1583 if (TestClearPageHWPoison(p))
1584 num_poisoned_pages_dec();
1585 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
1586 pfn, &unpoison_rs);
1587 return 0;
1588 }
1589
1590 lock_page(page);
1591 /*
1592 * This test is racy because PG_hwpoison is set outside of page lock.
1593 * That's acceptable because that won't trigger kernel panic. Instead,
1594 * the PG_hwpoison page will be caught and isolated on the entrance to
1595 * the free buddy page pool.
1596 */
1597 if (TestClearPageHWPoison(page)) {
1598 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
1599 pfn, &unpoison_rs);
1600 num_poisoned_pages_dec();
1601 freeit = 1;
1602 }
1603 unlock_page(page);
1604
1605 put_hwpoison_page(page);
1606 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
1607 put_hwpoison_page(page);
1608
1609 return 0;
1610}
1611EXPORT_SYMBOL(unpoison_memory);
1612
1613static struct page *new_page(struct page *p, unsigned long private)
1614{
1615 int nid = page_to_nid(p);
1616
1617 return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
1618}
1619
1620/*
1621 * Safely get reference count of an arbitrary page.
1622 * Returns 0 for a free page, -EIO for a zero refcount page
1623 * that is not free, and 1 for any other page type.
1624 * For 1 the page is returned with increased page count, otherwise not.
1625 */
1626static int __get_any_page(struct page *p, unsigned long pfn, int flags)
1627{
1628 int ret;
1629
1630 if (flags & MF_COUNT_INCREASED)
1631 return 1;
1632
1633 /*
1634 * When the target page is a free hugepage, just remove it
1635 * from free hugepage list.
1636 */
1637 if (!get_hwpoison_page(p)) {
1638 if (PageHuge(p)) {
1639 pr_info("%s: %#lx free huge page\n", __func__, pfn);
1640 ret = 0;
1641 } else if (is_free_buddy_page(p)) {
1642 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
1643 ret = 0;
1644 } else {
1645 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1646 __func__, pfn, p->flags);
1647 ret = -EIO;
1648 }
1649 } else {
1650 /* Not a free page */
1651 ret = 1;
1652 }
1653 return ret;
1654}
1655
1656static int get_any_page(struct page *page, unsigned long pfn, int flags)
1657{
1658 int ret = __get_any_page(page, pfn, flags);
1659
1660 if (ret == 1 && !PageHuge(page) &&
1661 !PageLRU(page) && !__PageMovable(page)) {
1662 /*
1663 * Try to free it.
1664 */
1665 put_hwpoison_page(page);
1666 shake_page(page, 1);
1667
1668 /*
1669 * Did it turn free?
1670 */
1671 ret = __get_any_page(page, pfn, 0);
1672 if (ret == 1 && !PageLRU(page)) {
1673 /* Drop page reference which is from __get_any_page() */
1674 put_hwpoison_page(page);
1675 pr_info("soft_offline: %#lx: unknown non LRU page type %lx (%pGp)\n",
1676 pfn, page->flags, &page->flags);
1677 return -EIO;
1678 }
1679 }
1680 return ret;
1681}
1682
1683static int soft_offline_huge_page(struct page *page, int flags)
1684{
1685 int ret;
1686 unsigned long pfn = page_to_pfn(page);
1687 struct page *hpage = compound_head(page);
1688 LIST_HEAD(pagelist);
1689
1690 /*
1691 * This double-check of PageHWPoison is to avoid the race with
1692 * memory_failure(). See also comment in __soft_offline_page().
1693 */
1694 lock_page(hpage);
1695 if (PageHWPoison(hpage)) {
1696 unlock_page(hpage);
1697 put_hwpoison_page(hpage);
1698 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
1699 return -EBUSY;
1700 }
1701 unlock_page(hpage);
1702
1703 ret = isolate_huge_page(hpage, &pagelist);
1704 /*
1705 * get_any_page() and isolate_huge_page() takes a refcount each,
1706 * so need to drop one here.
1707 */
1708 put_hwpoison_page(hpage);
1709 if (!ret) {
1710 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1711 return -EBUSY;
1712 }
1713
1714 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1715 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1716 if (ret) {
1717 pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
1718 pfn, ret, page->flags, &page->flags);
1719 if (!list_empty(&pagelist))
1720 putback_movable_pages(&pagelist);
1721 if (ret > 0)
1722 ret = -EIO;
1723 } else {
1724 /*
1725 * We set PG_hwpoison only when the migration source hugepage
1726 * was successfully dissolved, because otherwise hwpoisoned
1727 * hugepage remains on free hugepage list, then userspace will
1728 * find it as SIGBUS by allocation failure. That's not expected
1729 * in soft-offlining.
1730 */
1731 ret = dissolve_free_huge_page(page);
1732 if (!ret) {
1733 if (set_hwpoison_free_buddy_page(page))
1734 num_poisoned_pages_inc();
1735 else
1736 ret = -EBUSY;
1737 }
1738 }
1739 return ret;
1740}
1741
1742static int __soft_offline_page(struct page *page, int flags)
1743{
1744 int ret;
1745 unsigned long pfn = page_to_pfn(page);
1746
1747 /*
1748 * Check PageHWPoison again inside page lock because PageHWPoison
1749 * is set by memory_failure() outside page lock. Note that
1750 * memory_failure() also double-checks PageHWPoison inside page lock,
1751 * so there's no race between soft_offline_page() and memory_failure().
1752 */
1753 lock_page(page);
1754 wait_on_page_writeback(page);
1755 if (PageHWPoison(page)) {
1756 unlock_page(page);
1757 put_hwpoison_page(page);
1758 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1759 return -EBUSY;
1760 }
1761 /*
1762 * Try to invalidate first. This should work for
1763 * non dirty unmapped page cache pages.
1764 */
1765 ret = invalidate_inode_page(page);
1766 unlock_page(page);
1767 /*
1768 * RED-PEN would be better to keep it isolated here, but we
1769 * would need to fix isolation locking first.
1770 */
1771 if (ret == 1) {
1772 put_hwpoison_page(page);
1773 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1774 SetPageHWPoison(page);
1775 num_poisoned_pages_inc();
1776 return 0;
1777 }
1778
1779 /*
1780 * Simple invalidation didn't work.
1781 * Try to migrate to a new page instead. migrate.c
1782 * handles a large number of cases for us.
1783 */
1784 if (PageLRU(page))
1785 ret = isolate_lru_page(page);
1786 else
1787 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1788 /*
1789 * Drop page reference which is came from get_any_page()
1790 * successful isolate_lru_page() already took another one.
1791 */
1792 put_hwpoison_page(page);
1793 if (!ret) {
1794 LIST_HEAD(pagelist);
1795 /*
1796 * After isolated lru page, the PageLRU will be cleared,
1797 * so use !__PageMovable instead for LRU page's mapping
1798 * cannot have PAGE_MAPPING_MOVABLE.
1799 */
1800 if (!__PageMovable(page))
1801 inc_node_page_state(page, NR_ISOLATED_ANON +
1802 page_is_file_cache(page));
1803 list_add(&page->lru, &pagelist);
1804 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1805 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1806 if (ret) {
1807 if (!list_empty(&pagelist))
1808 putback_movable_pages(&pagelist);
1809
1810 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
1811 pfn, ret, page->flags, &page->flags);
1812 if (ret > 0)
1813 ret = -EIO;
1814 }
1815 } else {
1816 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n",
1817 pfn, ret, page_count(page), page->flags, &page->flags);
1818 }
1819 return ret;
1820}
1821
1822static int soft_offline_in_use_page(struct page *page, int flags)
1823{
1824 int ret;
1825 int mt;
1826 struct page *hpage = compound_head(page);
1827
1828 if (!PageHuge(page) && PageTransHuge(hpage)) {
1829 lock_page(page);
1830 if (!PageAnon(page) || unlikely(split_huge_page(page))) {
1831 unlock_page(page);
1832 if (!PageAnon(page))
1833 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1834 else
1835 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
1836 put_hwpoison_page(page);
1837 return -EBUSY;
1838 }
1839 unlock_page(page);
1840 }
1841
1842 /*
1843 * Setting MIGRATE_ISOLATE here ensures that the page will be linked
1844 * to free list immediately (not via pcplist) when released after
1845 * successful page migration. Otherwise we can't guarantee that the
1846 * page is really free after put_page() returns, so
1847 * set_hwpoison_free_buddy_page() highly likely fails.
1848 */
1849 mt = get_pageblock_migratetype(page);
1850 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
1851 if (PageHuge(page))
1852 ret = soft_offline_huge_page(page, flags);
1853 else
1854 ret = __soft_offline_page(page, flags);
1855 set_pageblock_migratetype(page, mt);
1856 return ret;
1857}
1858
1859static int soft_offline_free_page(struct page *page)
1860{
1861 int rc = dissolve_free_huge_page(page);
1862
1863 if (!rc) {
1864 if (set_hwpoison_free_buddy_page(page))
1865 num_poisoned_pages_inc();
1866 else
1867 rc = -EBUSY;
1868 }
1869 return rc;
1870}
1871
1872/**
1873 * soft_offline_page - Soft offline a page.
1874 * @page: page to offline
1875 * @flags: flags. Same as memory_failure().
1876 *
1877 * Returns 0 on success, otherwise negated errno.
1878 *
1879 * Soft offline a page, by migration or invalidation,
1880 * without killing anything. This is for the case when
1881 * a page is not corrupted yet (so it's still valid to access),
1882 * but has had a number of corrected errors and is better taken
1883 * out.
1884 *
1885 * The actual policy on when to do that is maintained by
1886 * user space.
1887 *
1888 * This should never impact any application or cause data loss,
1889 * however it might take some time.
1890 *
1891 * This is not a 100% solution for all memory, but tries to be
1892 * ``good enough'' for the majority of memory.
1893 */
1894int soft_offline_page(struct page *page, int flags)
1895{
1896 int ret;
1897 unsigned long pfn = page_to_pfn(page);
1898
1899 if (is_zone_device_page(page)) {
1900 pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
1901 pfn);
1902 if (flags & MF_COUNT_INCREASED)
1903 put_page(page);
1904 return -EIO;
1905 }
1906
1907 if (PageHWPoison(page)) {
1908 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1909 if (flags & MF_COUNT_INCREASED)
1910 put_hwpoison_page(page);
1911 return -EBUSY;
1912 }
1913
1914 get_online_mems();
1915 ret = get_any_page(page, pfn, flags);
1916 put_online_mems();
1917
1918 if (ret > 0)
1919 ret = soft_offline_in_use_page(page, flags);
1920 else if (ret == 0)
1921 ret = soft_offline_free_page(page);
1922
1923 return ret;
1924}