Loading...
1/*
2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
4 *
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
8 *
9 * High level machine check handler. Handles pages reported by the
10 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
11 * failure.
12 *
13 * In addition there is a "soft offline" entry point that allows stop using
14 * not-yet-corrupted-by-suspicious pages without killing anything.
15 *
16 * Handles page cache pages in various states. The tricky part
17 * here is that we can access any page asynchronously in respect to
18 * other VM users, because memory failures could happen anytime and
19 * anywhere. This could violate some of their assumptions. This is why
20 * this code has to be extremely careful. Generally it tries to use
21 * normal locking rules, as in get the standard locks, even if that means
22 * the error handling takes potentially a long time.
23 *
24 * There are several operations here with exponential complexity because
25 * of unsuitable VM data structures. For example the operation to map back
26 * from RMAP chains to processes has to walk the complete process list and
27 * has non linear complexity with the number. But since memory corruptions
28 * are rare we hope to get away with this. This avoids impacting the core
29 * VM.
30 */
31
32/*
33 * Notebook:
34 * - hugetlb needs more code
35 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
36 * - pass bad pages to kdump next kernel
37 */
38#include <linux/kernel.h>
39#include <linux/mm.h>
40#include <linux/page-flags.h>
41#include <linux/kernel-page-flags.h>
42#include <linux/sched.h>
43#include <linux/ksm.h>
44#include <linux/rmap.h>
45#include <linux/pagemap.h>
46#include <linux/swap.h>
47#include <linux/backing-dev.h>
48#include <linux/migrate.h>
49#include <linux/page-isolation.h>
50#include <linux/suspend.h>
51#include <linux/slab.h>
52#include <linux/swapops.h>
53#include <linux/hugetlb.h>
54#include <linux/memory_hotplug.h>
55#include <linux/mm_inline.h>
56#include <linux/kfifo.h>
57#include "internal.h"
58
59int sysctl_memory_failure_early_kill __read_mostly = 0;
60
61int sysctl_memory_failure_recovery __read_mostly = 1;
62
63atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64
65#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
66
67u32 hwpoison_filter_enable = 0;
68u32 hwpoison_filter_dev_major = ~0U;
69u32 hwpoison_filter_dev_minor = ~0U;
70u64 hwpoison_filter_flags_mask;
71u64 hwpoison_filter_flags_value;
72EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
73EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
74EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
75EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
76EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
77
78static int hwpoison_filter_dev(struct page *p)
79{
80 struct address_space *mapping;
81 dev_t dev;
82
83 if (hwpoison_filter_dev_major == ~0U &&
84 hwpoison_filter_dev_minor == ~0U)
85 return 0;
86
87 /*
88 * page_mapping() does not accept slab pages.
89 */
90 if (PageSlab(p))
91 return -EINVAL;
92
93 mapping = page_mapping(p);
94 if (mapping == NULL || mapping->host == NULL)
95 return -EINVAL;
96
97 dev = mapping->host->i_sb->s_dev;
98 if (hwpoison_filter_dev_major != ~0U &&
99 hwpoison_filter_dev_major != MAJOR(dev))
100 return -EINVAL;
101 if (hwpoison_filter_dev_minor != ~0U &&
102 hwpoison_filter_dev_minor != MINOR(dev))
103 return -EINVAL;
104
105 return 0;
106}
107
108static int hwpoison_filter_flags(struct page *p)
109{
110 if (!hwpoison_filter_flags_mask)
111 return 0;
112
113 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
114 hwpoison_filter_flags_value)
115 return 0;
116 else
117 return -EINVAL;
118}
119
120/*
121 * This allows stress tests to limit test scope to a collection of tasks
122 * by putting them under some memcg. This prevents killing unrelated/important
123 * processes such as /sbin/init. Note that the target task may share clean
124 * pages with init (eg. libc text), which is harmless. If the target task
125 * share _dirty_ pages with another task B, the test scheme must make sure B
126 * is also included in the memcg. At last, due to race conditions this filter
127 * can only guarantee that the page either belongs to the memcg tasks, or is
128 * a freed page.
129 */
130#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
131u64 hwpoison_filter_memcg;
132EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
133static int hwpoison_filter_task(struct page *p)
134{
135 struct mem_cgroup *mem;
136 struct cgroup_subsys_state *css;
137 unsigned long ino;
138
139 if (!hwpoison_filter_memcg)
140 return 0;
141
142 mem = try_get_mem_cgroup_from_page(p);
143 if (!mem)
144 return -EINVAL;
145
146 css = mem_cgroup_css(mem);
147 /* root_mem_cgroup has NULL dentries */
148 if (!css->cgroup->dentry)
149 return -EINVAL;
150
151 ino = css->cgroup->dentry->d_inode->i_ino;
152 css_put(css);
153
154 if (ino != hwpoison_filter_memcg)
155 return -EINVAL;
156
157 return 0;
158}
159#else
160static int hwpoison_filter_task(struct page *p) { return 0; }
161#endif
162
163int hwpoison_filter(struct page *p)
164{
165 if (!hwpoison_filter_enable)
166 return 0;
167
168 if (hwpoison_filter_dev(p))
169 return -EINVAL;
170
171 if (hwpoison_filter_flags(p))
172 return -EINVAL;
173
174 if (hwpoison_filter_task(p))
175 return -EINVAL;
176
177 return 0;
178}
179#else
180int hwpoison_filter(struct page *p)
181{
182 return 0;
183}
184#endif
185
186EXPORT_SYMBOL_GPL(hwpoison_filter);
187
188/*
189 * Send all the processes who have the page mapped an ``action optional''
190 * signal.
191 */
192static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
193 unsigned long pfn, struct page *page)
194{
195 struct siginfo si;
196 int ret;
197
198 printk(KERN_ERR
199 "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
200 pfn, t->comm, t->pid);
201 si.si_signo = SIGBUS;
202 si.si_errno = 0;
203 si.si_code = BUS_MCEERR_AO;
204 si.si_addr = (void *)addr;
205#ifdef __ARCH_SI_TRAPNO
206 si.si_trapno = trapno;
207#endif
208 si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
209 /*
210 * Don't use force here, it's convenient if the signal
211 * can be temporarily blocked.
212 * This could cause a loop when the user sets SIGBUS
213 * to SIG_IGN, but hopefully no one will do that?
214 */
215 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
216 if (ret < 0)
217 printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
218 t->comm, t->pid, ret);
219 return ret;
220}
221
222/*
223 * When a unknown page type is encountered drain as many buffers as possible
224 * in the hope to turn the page into a LRU or free page, which we can handle.
225 */
226void shake_page(struct page *p, int access)
227{
228 if (!PageSlab(p)) {
229 lru_add_drain_all();
230 if (PageLRU(p))
231 return;
232 drain_all_pages();
233 if (PageLRU(p) || is_free_buddy_page(p))
234 return;
235 }
236
237 /*
238 * Only call shrink_slab here (which would also shrink other caches) if
239 * access is not potentially fatal.
240 */
241 if (access) {
242 int nr;
243 do {
244 struct shrink_control shrink = {
245 .gfp_mask = GFP_KERNEL,
246 };
247
248 nr = shrink_slab(&shrink, 1000, 1000);
249 if (page_count(p) == 1)
250 break;
251 } while (nr > 10);
252 }
253}
254EXPORT_SYMBOL_GPL(shake_page);
255
256/*
257 * Kill all processes that have a poisoned page mapped and then isolate
258 * the page.
259 *
260 * General strategy:
261 * Find all processes having the page mapped and kill them.
262 * But we keep a page reference around so that the page is not
263 * actually freed yet.
264 * Then stash the page away
265 *
266 * There's no convenient way to get back to mapped processes
267 * from the VMAs. So do a brute-force search over all
268 * running processes.
269 *
270 * Remember that machine checks are not common (or rather
271 * if they are common you have other problems), so this shouldn't
272 * be a performance issue.
273 *
274 * Also there are some races possible while we get from the
275 * error detection to actually handle it.
276 */
277
278struct to_kill {
279 struct list_head nd;
280 struct task_struct *tsk;
281 unsigned long addr;
282 char addr_valid;
283};
284
285/*
286 * Failure handling: if we can't find or can't kill a process there's
287 * not much we can do. We just print a message and ignore otherwise.
288 */
289
290/*
291 * Schedule a process for later kill.
292 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
293 * TBD would GFP_NOIO be enough?
294 */
295static void add_to_kill(struct task_struct *tsk, struct page *p,
296 struct vm_area_struct *vma,
297 struct list_head *to_kill,
298 struct to_kill **tkc)
299{
300 struct to_kill *tk;
301
302 if (*tkc) {
303 tk = *tkc;
304 *tkc = NULL;
305 } else {
306 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
307 if (!tk) {
308 printk(KERN_ERR
309 "MCE: Out of memory while machine check handling\n");
310 return;
311 }
312 }
313 tk->addr = page_address_in_vma(p, vma);
314 tk->addr_valid = 1;
315
316 /*
317 * In theory we don't have to kill when the page was
318 * munmaped. But it could be also a mremap. Since that's
319 * likely very rare kill anyways just out of paranoia, but use
320 * a SIGKILL because the error is not contained anymore.
321 */
322 if (tk->addr == -EFAULT) {
323 pr_info("MCE: Unable to find user space address %lx in %s\n",
324 page_to_pfn(p), tsk->comm);
325 tk->addr_valid = 0;
326 }
327 get_task_struct(tsk);
328 tk->tsk = tsk;
329 list_add_tail(&tk->nd, to_kill);
330}
331
332/*
333 * Kill the processes that have been collected earlier.
334 *
335 * Only do anything when DOIT is set, otherwise just free the list
336 * (this is used for clean pages which do not need killing)
337 * Also when FAIL is set do a force kill because something went
338 * wrong earlier.
339 */
340static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
341 int fail, struct page *page, unsigned long pfn)
342{
343 struct to_kill *tk, *next;
344
345 list_for_each_entry_safe (tk, next, to_kill, nd) {
346 if (doit) {
347 /*
348 * In case something went wrong with munmapping
349 * make sure the process doesn't catch the
350 * signal and then access the memory. Just kill it.
351 */
352 if (fail || tk->addr_valid == 0) {
353 printk(KERN_ERR
354 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
355 pfn, tk->tsk->comm, tk->tsk->pid);
356 force_sig(SIGKILL, tk->tsk);
357 }
358
359 /*
360 * In theory the process could have mapped
361 * something else on the address in-between. We could
362 * check for that, but we need to tell the
363 * process anyways.
364 */
365 else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
366 pfn, page) < 0)
367 printk(KERN_ERR
368 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
369 pfn, tk->tsk->comm, tk->tsk->pid);
370 }
371 put_task_struct(tk->tsk);
372 kfree(tk);
373 }
374}
375
376static int task_early_kill(struct task_struct *tsk)
377{
378 if (!tsk->mm)
379 return 0;
380 if (tsk->flags & PF_MCE_PROCESS)
381 return !!(tsk->flags & PF_MCE_EARLY);
382 return sysctl_memory_failure_early_kill;
383}
384
385/*
386 * Collect processes when the error hit an anonymous page.
387 */
388static void collect_procs_anon(struct page *page, struct list_head *to_kill,
389 struct to_kill **tkc)
390{
391 struct vm_area_struct *vma;
392 struct task_struct *tsk;
393 struct anon_vma *av;
394
395 av = page_lock_anon_vma(page);
396 if (av == NULL) /* Not actually mapped anymore */
397 return;
398
399 read_lock(&tasklist_lock);
400 for_each_process (tsk) {
401 struct anon_vma_chain *vmac;
402
403 if (!task_early_kill(tsk))
404 continue;
405 list_for_each_entry(vmac, &av->head, same_anon_vma) {
406 vma = vmac->vma;
407 if (!page_mapped_in_vma(page, vma))
408 continue;
409 if (vma->vm_mm == tsk->mm)
410 add_to_kill(tsk, page, vma, to_kill, tkc);
411 }
412 }
413 read_unlock(&tasklist_lock);
414 page_unlock_anon_vma(av);
415}
416
417/*
418 * Collect processes when the error hit a file mapped page.
419 */
420static void collect_procs_file(struct page *page, struct list_head *to_kill,
421 struct to_kill **tkc)
422{
423 struct vm_area_struct *vma;
424 struct task_struct *tsk;
425 struct prio_tree_iter iter;
426 struct address_space *mapping = page->mapping;
427
428 mutex_lock(&mapping->i_mmap_mutex);
429 read_lock(&tasklist_lock);
430 for_each_process(tsk) {
431 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
432
433 if (!task_early_kill(tsk))
434 continue;
435
436 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
437 pgoff) {
438 /*
439 * Send early kill signal to tasks where a vma covers
440 * the page but the corrupted page is not necessarily
441 * mapped it in its pte.
442 * Assume applications who requested early kill want
443 * to be informed of all such data corruptions.
444 */
445 if (vma->vm_mm == tsk->mm)
446 add_to_kill(tsk, page, vma, to_kill, tkc);
447 }
448 }
449 read_unlock(&tasklist_lock);
450 mutex_unlock(&mapping->i_mmap_mutex);
451}
452
453/*
454 * Collect the processes who have the corrupted page mapped to kill.
455 * This is done in two steps for locking reasons.
456 * First preallocate one tokill structure outside the spin locks,
457 * so that we can kill at least one process reasonably reliable.
458 */
459static void collect_procs(struct page *page, struct list_head *tokill)
460{
461 struct to_kill *tk;
462
463 if (!page->mapping)
464 return;
465
466 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
467 if (!tk)
468 return;
469 if (PageAnon(page))
470 collect_procs_anon(page, tokill, &tk);
471 else
472 collect_procs_file(page, tokill, &tk);
473 kfree(tk);
474}
475
476/*
477 * Error handlers for various types of pages.
478 */
479
480enum outcome {
481 IGNORED, /* Error: cannot be handled */
482 FAILED, /* Error: handling failed */
483 DELAYED, /* Will be handled later */
484 RECOVERED, /* Successfully recovered */
485};
486
487static const char *action_name[] = {
488 [IGNORED] = "Ignored",
489 [FAILED] = "Failed",
490 [DELAYED] = "Delayed",
491 [RECOVERED] = "Recovered",
492};
493
494/*
495 * XXX: It is possible that a page is isolated from LRU cache,
496 * and then kept in swap cache or failed to remove from page cache.
497 * The page count will stop it from being freed by unpoison.
498 * Stress tests should be aware of this memory leak problem.
499 */
500static int delete_from_lru_cache(struct page *p)
501{
502 if (!isolate_lru_page(p)) {
503 /*
504 * Clear sensible page flags, so that the buddy system won't
505 * complain when the page is unpoison-and-freed.
506 */
507 ClearPageActive(p);
508 ClearPageUnevictable(p);
509 /*
510 * drop the page count elevated by isolate_lru_page()
511 */
512 page_cache_release(p);
513 return 0;
514 }
515 return -EIO;
516}
517
518/*
519 * Error hit kernel page.
520 * Do nothing, try to be lucky and not touch this instead. For a few cases we
521 * could be more sophisticated.
522 */
523static int me_kernel(struct page *p, unsigned long pfn)
524{
525 return IGNORED;
526}
527
528/*
529 * Page in unknown state. Do nothing.
530 */
531static int me_unknown(struct page *p, unsigned long pfn)
532{
533 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
534 return FAILED;
535}
536
537/*
538 * Clean (or cleaned) page cache page.
539 */
540static int me_pagecache_clean(struct page *p, unsigned long pfn)
541{
542 int err;
543 int ret = FAILED;
544 struct address_space *mapping;
545
546 delete_from_lru_cache(p);
547
548 /*
549 * For anonymous pages we're done the only reference left
550 * should be the one m_f() holds.
551 */
552 if (PageAnon(p))
553 return RECOVERED;
554
555 /*
556 * Now truncate the page in the page cache. This is really
557 * more like a "temporary hole punch"
558 * Don't do this for block devices when someone else
559 * has a reference, because it could be file system metadata
560 * and that's not safe to truncate.
561 */
562 mapping = page_mapping(p);
563 if (!mapping) {
564 /*
565 * Page has been teared down in the meanwhile
566 */
567 return FAILED;
568 }
569
570 /*
571 * Truncation is a bit tricky. Enable it per file system for now.
572 *
573 * Open: to take i_mutex or not for this? Right now we don't.
574 */
575 if (mapping->a_ops->error_remove_page) {
576 err = mapping->a_ops->error_remove_page(mapping, p);
577 if (err != 0) {
578 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
579 pfn, err);
580 } else if (page_has_private(p) &&
581 !try_to_release_page(p, GFP_NOIO)) {
582 pr_info("MCE %#lx: failed to release buffers\n", pfn);
583 } else {
584 ret = RECOVERED;
585 }
586 } else {
587 /*
588 * If the file system doesn't support it just invalidate
589 * This fails on dirty or anything with private pages
590 */
591 if (invalidate_inode_page(p))
592 ret = RECOVERED;
593 else
594 printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
595 pfn);
596 }
597 return ret;
598}
599
600/*
601 * Dirty cache page page
602 * Issues: when the error hit a hole page the error is not properly
603 * propagated.
604 */
605static int me_pagecache_dirty(struct page *p, unsigned long pfn)
606{
607 struct address_space *mapping = page_mapping(p);
608
609 SetPageError(p);
610 /* TBD: print more information about the file. */
611 if (mapping) {
612 /*
613 * IO error will be reported by write(), fsync(), etc.
614 * who check the mapping.
615 * This way the application knows that something went
616 * wrong with its dirty file data.
617 *
618 * There's one open issue:
619 *
620 * The EIO will be only reported on the next IO
621 * operation and then cleared through the IO map.
622 * Normally Linux has two mechanisms to pass IO error
623 * first through the AS_EIO flag in the address space
624 * and then through the PageError flag in the page.
625 * Since we drop pages on memory failure handling the
626 * only mechanism open to use is through AS_AIO.
627 *
628 * This has the disadvantage that it gets cleared on
629 * the first operation that returns an error, while
630 * the PageError bit is more sticky and only cleared
631 * when the page is reread or dropped. If an
632 * application assumes it will always get error on
633 * fsync, but does other operations on the fd before
634 * and the page is dropped between then the error
635 * will not be properly reported.
636 *
637 * This can already happen even without hwpoisoned
638 * pages: first on metadata IO errors (which only
639 * report through AS_EIO) or when the page is dropped
640 * at the wrong time.
641 *
642 * So right now we assume that the application DTRT on
643 * the first EIO, but we're not worse than other parts
644 * of the kernel.
645 */
646 mapping_set_error(mapping, EIO);
647 }
648
649 return me_pagecache_clean(p, pfn);
650}
651
652/*
653 * Clean and dirty swap cache.
654 *
655 * Dirty swap cache page is tricky to handle. The page could live both in page
656 * cache and swap cache(ie. page is freshly swapped in). So it could be
657 * referenced concurrently by 2 types of PTEs:
658 * normal PTEs and swap PTEs. We try to handle them consistently by calling
659 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
660 * and then
661 * - clear dirty bit to prevent IO
662 * - remove from LRU
663 * - but keep in the swap cache, so that when we return to it on
664 * a later page fault, we know the application is accessing
665 * corrupted data and shall be killed (we installed simple
666 * interception code in do_swap_page to catch it).
667 *
668 * Clean swap cache pages can be directly isolated. A later page fault will
669 * bring in the known good data from disk.
670 */
671static int me_swapcache_dirty(struct page *p, unsigned long pfn)
672{
673 ClearPageDirty(p);
674 /* Trigger EIO in shmem: */
675 ClearPageUptodate(p);
676
677 if (!delete_from_lru_cache(p))
678 return DELAYED;
679 else
680 return FAILED;
681}
682
683static int me_swapcache_clean(struct page *p, unsigned long pfn)
684{
685 delete_from_swap_cache(p);
686
687 if (!delete_from_lru_cache(p))
688 return RECOVERED;
689 else
690 return FAILED;
691}
692
693/*
694 * Huge pages. Needs work.
695 * Issues:
696 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
697 * To narrow down kill region to one page, we need to break up pmd.
698 */
699static int me_huge_page(struct page *p, unsigned long pfn)
700{
701 int res = 0;
702 struct page *hpage = compound_head(p);
703 /*
704 * We can safely recover from error on free or reserved (i.e.
705 * not in-use) hugepage by dequeuing it from freelist.
706 * To check whether a hugepage is in-use or not, we can't use
707 * page->lru because it can be used in other hugepage operations,
708 * such as __unmap_hugepage_range() and gather_surplus_pages().
709 * So instead we use page_mapping() and PageAnon().
710 * We assume that this function is called with page lock held,
711 * so there is no race between isolation and mapping/unmapping.
712 */
713 if (!(page_mapping(hpage) || PageAnon(hpage))) {
714 res = dequeue_hwpoisoned_huge_page(hpage);
715 if (!res)
716 return RECOVERED;
717 }
718 return DELAYED;
719}
720
721/*
722 * Various page states we can handle.
723 *
724 * A page state is defined by its current page->flags bits.
725 * The table matches them in order and calls the right handler.
726 *
727 * This is quite tricky because we can access page at any time
728 * in its live cycle, so all accesses have to be extremely careful.
729 *
730 * This is not complete. More states could be added.
731 * For any missing state don't attempt recovery.
732 */
733
734#define dirty (1UL << PG_dirty)
735#define sc (1UL << PG_swapcache)
736#define unevict (1UL << PG_unevictable)
737#define mlock (1UL << PG_mlocked)
738#define writeback (1UL << PG_writeback)
739#define lru (1UL << PG_lru)
740#define swapbacked (1UL << PG_swapbacked)
741#define head (1UL << PG_head)
742#define tail (1UL << PG_tail)
743#define compound (1UL << PG_compound)
744#define slab (1UL << PG_slab)
745#define reserved (1UL << PG_reserved)
746
747static struct page_state {
748 unsigned long mask;
749 unsigned long res;
750 char *msg;
751 int (*action)(struct page *p, unsigned long pfn);
752} error_states[] = {
753 { reserved, reserved, "reserved kernel", me_kernel },
754 /*
755 * free pages are specially detected outside this table:
756 * PG_buddy pages only make a small fraction of all free pages.
757 */
758
759 /*
760 * Could in theory check if slab page is free or if we can drop
761 * currently unused objects without touching them. But just
762 * treat it as standard kernel for now.
763 */
764 { slab, slab, "kernel slab", me_kernel },
765
766#ifdef CONFIG_PAGEFLAGS_EXTENDED
767 { head, head, "huge", me_huge_page },
768 { tail, tail, "huge", me_huge_page },
769#else
770 { compound, compound, "huge", me_huge_page },
771#endif
772
773 { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty },
774 { sc|dirty, sc, "swapcache", me_swapcache_clean },
775
776 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
777 { unevict, unevict, "unevictable LRU", me_pagecache_clean},
778
779 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
780 { mlock, mlock, "mlocked LRU", me_pagecache_clean },
781
782 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
783 { lru|dirty, lru, "clean LRU", me_pagecache_clean },
784
785 /*
786 * Catchall entry: must be at end.
787 */
788 { 0, 0, "unknown page state", me_unknown },
789};
790
791#undef dirty
792#undef sc
793#undef unevict
794#undef mlock
795#undef writeback
796#undef lru
797#undef swapbacked
798#undef head
799#undef tail
800#undef compound
801#undef slab
802#undef reserved
803
804static void action_result(unsigned long pfn, char *msg, int result)
805{
806 struct page *page = pfn_to_page(pfn);
807
808 printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
809 pfn,
810 PageDirty(page) ? "dirty " : "",
811 msg, action_name[result]);
812}
813
814static int page_action(struct page_state *ps, struct page *p,
815 unsigned long pfn)
816{
817 int result;
818 int count;
819
820 result = ps->action(p, pfn);
821 action_result(pfn, ps->msg, result);
822
823 count = page_count(p) - 1;
824 if (ps->action == me_swapcache_dirty && result == DELAYED)
825 count--;
826 if (count != 0) {
827 printk(KERN_ERR
828 "MCE %#lx: %s page still referenced by %d users\n",
829 pfn, ps->msg, count);
830 result = FAILED;
831 }
832
833 /* Could do more checks here if page looks ok */
834 /*
835 * Could adjust zone counters here to correct for the missing page.
836 */
837
838 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
839}
840
841/*
842 * Do all that is necessary to remove user space mappings. Unmap
843 * the pages and send SIGBUS to the processes if the data was dirty.
844 */
845static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
846 int trapno)
847{
848 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
849 struct address_space *mapping;
850 LIST_HEAD(tokill);
851 int ret;
852 int kill = 1;
853 struct page *hpage = compound_head(p);
854 struct page *ppage;
855
856 if (PageReserved(p) || PageSlab(p))
857 return SWAP_SUCCESS;
858
859 /*
860 * This check implies we don't kill processes if their pages
861 * are in the swap cache early. Those are always late kills.
862 */
863 if (!page_mapped(hpage))
864 return SWAP_SUCCESS;
865
866 if (PageKsm(p))
867 return SWAP_FAIL;
868
869 if (PageSwapCache(p)) {
870 printk(KERN_ERR
871 "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
872 ttu |= TTU_IGNORE_HWPOISON;
873 }
874
875 /*
876 * Propagate the dirty bit from PTEs to struct page first, because we
877 * need this to decide if we should kill or just drop the page.
878 * XXX: the dirty test could be racy: set_page_dirty() may not always
879 * be called inside page lock (it's recommended but not enforced).
880 */
881 mapping = page_mapping(hpage);
882 if (!PageDirty(hpage) && mapping &&
883 mapping_cap_writeback_dirty(mapping)) {
884 if (page_mkclean(hpage)) {
885 SetPageDirty(hpage);
886 } else {
887 kill = 0;
888 ttu |= TTU_IGNORE_HWPOISON;
889 printk(KERN_INFO
890 "MCE %#lx: corrupted page was clean: dropped without side effects\n",
891 pfn);
892 }
893 }
894
895 /*
896 * ppage: poisoned page
897 * if p is regular page(4k page)
898 * ppage == real poisoned page;
899 * else p is hugetlb or THP, ppage == head page.
900 */
901 ppage = hpage;
902
903 if (PageTransHuge(hpage)) {
904 /*
905 * Verify that this isn't a hugetlbfs head page, the check for
906 * PageAnon is just for avoid tripping a split_huge_page
907 * internal debug check, as split_huge_page refuses to deal with
908 * anything that isn't an anon page. PageAnon can't go away fro
909 * under us because we hold a refcount on the hpage, without a
910 * refcount on the hpage. split_huge_page can't be safely called
911 * in the first place, having a refcount on the tail isn't
912 * enough * to be safe.
913 */
914 if (!PageHuge(hpage) && PageAnon(hpage)) {
915 if (unlikely(split_huge_page(hpage))) {
916 /*
917 * FIXME: if splitting THP is failed, it is
918 * better to stop the following operation rather
919 * than causing panic by unmapping. System might
920 * survive if the page is freed later.
921 */
922 printk(KERN_INFO
923 "MCE %#lx: failed to split THP\n", pfn);
924
925 BUG_ON(!PageHWPoison(p));
926 return SWAP_FAIL;
927 }
928 /* THP is split, so ppage should be the real poisoned page. */
929 ppage = p;
930 }
931 }
932
933 /*
934 * First collect all the processes that have the page
935 * mapped in dirty form. This has to be done before try_to_unmap,
936 * because ttu takes the rmap data structures down.
937 *
938 * Error handling: We ignore errors here because
939 * there's nothing that can be done.
940 */
941 if (kill)
942 collect_procs(ppage, &tokill);
943
944 if (hpage != ppage)
945 lock_page(ppage);
946
947 ret = try_to_unmap(ppage, ttu);
948 if (ret != SWAP_SUCCESS)
949 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
950 pfn, page_mapcount(ppage));
951
952 if (hpage != ppage)
953 unlock_page(ppage);
954
955 /*
956 * Now that the dirty bit has been propagated to the
957 * struct page and all unmaps done we can decide if
958 * killing is needed or not. Only kill when the page
959 * was dirty, otherwise the tokill list is merely
960 * freed. When there was a problem unmapping earlier
961 * use a more force-full uncatchable kill to prevent
962 * any accesses to the poisoned memory.
963 */
964 kill_procs_ao(&tokill, !!PageDirty(ppage), trapno,
965 ret != SWAP_SUCCESS, p, pfn);
966
967 return ret;
968}
969
970static void set_page_hwpoison_huge_page(struct page *hpage)
971{
972 int i;
973 int nr_pages = 1 << compound_trans_order(hpage);
974 for (i = 0; i < nr_pages; i++)
975 SetPageHWPoison(hpage + i);
976}
977
978static void clear_page_hwpoison_huge_page(struct page *hpage)
979{
980 int i;
981 int nr_pages = 1 << compound_trans_order(hpage);
982 for (i = 0; i < nr_pages; i++)
983 ClearPageHWPoison(hpage + i);
984}
985
986int __memory_failure(unsigned long pfn, int trapno, int flags)
987{
988 struct page_state *ps;
989 struct page *p;
990 struct page *hpage;
991 int res;
992 unsigned int nr_pages;
993
994 if (!sysctl_memory_failure_recovery)
995 panic("Memory failure from trap %d on page %lx", trapno, pfn);
996
997 if (!pfn_valid(pfn)) {
998 printk(KERN_ERR
999 "MCE %#lx: memory outside kernel control\n",
1000 pfn);
1001 return -ENXIO;
1002 }
1003
1004 p = pfn_to_page(pfn);
1005 hpage = compound_head(p);
1006 if (TestSetPageHWPoison(p)) {
1007 printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
1008 return 0;
1009 }
1010
1011 nr_pages = 1 << compound_trans_order(hpage);
1012 atomic_long_add(nr_pages, &mce_bad_pages);
1013
1014 /*
1015 * We need/can do nothing about count=0 pages.
1016 * 1) it's a free page, and therefore in safe hand:
1017 * prep_new_page() will be the gate keeper.
1018 * 2) it's a free hugepage, which is also safe:
1019 * an affected hugepage will be dequeued from hugepage freelist,
1020 * so there's no concern about reusing it ever after.
1021 * 3) it's part of a non-compound high order page.
1022 * Implies some kernel user: cannot stop them from
1023 * R/W the page; let's pray that the page has been
1024 * used and will be freed some time later.
1025 * In fact it's dangerous to directly bump up page count from 0,
1026 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
1027 */
1028 if (!(flags & MF_COUNT_INCREASED) &&
1029 !get_page_unless_zero(hpage)) {
1030 if (is_free_buddy_page(p)) {
1031 action_result(pfn, "free buddy", DELAYED);
1032 return 0;
1033 } else if (PageHuge(hpage)) {
1034 /*
1035 * Check "just unpoisoned", "filter hit", and
1036 * "race with other subpage."
1037 */
1038 lock_page(hpage);
1039 if (!PageHWPoison(hpage)
1040 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
1041 || (p != hpage && TestSetPageHWPoison(hpage))) {
1042 atomic_long_sub(nr_pages, &mce_bad_pages);
1043 return 0;
1044 }
1045 set_page_hwpoison_huge_page(hpage);
1046 res = dequeue_hwpoisoned_huge_page(hpage);
1047 action_result(pfn, "free huge",
1048 res ? IGNORED : DELAYED);
1049 unlock_page(hpage);
1050 return res;
1051 } else {
1052 action_result(pfn, "high order kernel", IGNORED);
1053 return -EBUSY;
1054 }
1055 }
1056
1057 /*
1058 * We ignore non-LRU pages for good reasons.
1059 * - PG_locked is only well defined for LRU pages and a few others
1060 * - to avoid races with __set_page_locked()
1061 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1062 * The check (unnecessarily) ignores LRU pages being isolated and
1063 * walked by the page reclaim code, however that's not a big loss.
1064 */
1065 if (!PageHuge(p) && !PageTransCompound(p)) {
1066 if (!PageLRU(p))
1067 shake_page(p, 0);
1068 if (!PageLRU(p)) {
1069 /*
1070 * shake_page could have turned it free.
1071 */
1072 if (is_free_buddy_page(p)) {
1073 action_result(pfn, "free buddy, 2nd try",
1074 DELAYED);
1075 return 0;
1076 }
1077 action_result(pfn, "non LRU", IGNORED);
1078 put_page(p);
1079 return -EBUSY;
1080 }
1081 }
1082
1083 /*
1084 * Lock the page and wait for writeback to finish.
1085 * It's very difficult to mess with pages currently under IO
1086 * and in many cases impossible, so we just avoid it here.
1087 */
1088 lock_page(hpage);
1089
1090 /*
1091 * unpoison always clear PG_hwpoison inside page lock
1092 */
1093 if (!PageHWPoison(p)) {
1094 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1095 res = 0;
1096 goto out;
1097 }
1098 if (hwpoison_filter(p)) {
1099 if (TestClearPageHWPoison(p))
1100 atomic_long_sub(nr_pages, &mce_bad_pages);
1101 unlock_page(hpage);
1102 put_page(hpage);
1103 return 0;
1104 }
1105
1106 /*
1107 * For error on the tail page, we should set PG_hwpoison
1108 * on the head page to show that the hugepage is hwpoisoned
1109 */
1110 if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
1111 action_result(pfn, "hugepage already hardware poisoned",
1112 IGNORED);
1113 unlock_page(hpage);
1114 put_page(hpage);
1115 return 0;
1116 }
1117 /*
1118 * Set PG_hwpoison on all pages in an error hugepage,
1119 * because containment is done in hugepage unit for now.
1120 * Since we have done TestSetPageHWPoison() for the head page with
1121 * page lock held, we can safely set PG_hwpoison bits on tail pages.
1122 */
1123 if (PageHuge(p))
1124 set_page_hwpoison_huge_page(hpage);
1125
1126 wait_on_page_writeback(p);
1127
1128 /*
1129 * Now take care of user space mappings.
1130 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1131 */
1132 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
1133 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
1134 res = -EBUSY;
1135 goto out;
1136 }
1137
1138 /*
1139 * Torn down by someone else?
1140 */
1141 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1142 action_result(pfn, "already truncated LRU", IGNORED);
1143 res = -EBUSY;
1144 goto out;
1145 }
1146
1147 res = -EBUSY;
1148 for (ps = error_states;; ps++) {
1149 if ((p->flags & ps->mask) == ps->res) {
1150 res = page_action(ps, p, pfn);
1151 break;
1152 }
1153 }
1154out:
1155 unlock_page(hpage);
1156 return res;
1157}
1158EXPORT_SYMBOL_GPL(__memory_failure);
1159
1160/**
1161 * memory_failure - Handle memory failure of a page.
1162 * @pfn: Page Number of the corrupted page
1163 * @trapno: Trap number reported in the signal to user space.
1164 *
1165 * This function is called by the low level machine check code
1166 * of an architecture when it detects hardware memory corruption
1167 * of a page. It tries its best to recover, which includes
1168 * dropping pages, killing processes etc.
1169 *
1170 * The function is primarily of use for corruptions that
1171 * happen outside the current execution context (e.g. when
1172 * detected by a background scrubber)
1173 *
1174 * Must run in process context (e.g. a work queue) with interrupts
1175 * enabled and no spinlocks hold.
1176 */
1177void memory_failure(unsigned long pfn, int trapno)
1178{
1179 __memory_failure(pfn, trapno, 0);
1180}
1181
1182#define MEMORY_FAILURE_FIFO_ORDER 4
1183#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1184
1185struct memory_failure_entry {
1186 unsigned long pfn;
1187 int trapno;
1188 int flags;
1189};
1190
1191struct memory_failure_cpu {
1192 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1193 MEMORY_FAILURE_FIFO_SIZE);
1194 spinlock_t lock;
1195 struct work_struct work;
1196};
1197
1198static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1199
1200/**
1201 * memory_failure_queue - Schedule handling memory failure of a page.
1202 * @pfn: Page Number of the corrupted page
1203 * @trapno: Trap number reported in the signal to user space.
1204 * @flags: Flags for memory failure handling
1205 *
1206 * This function is called by the low level hardware error handler
1207 * when it detects hardware memory corruption of a page. It schedules
1208 * the recovering of error page, including dropping pages, killing
1209 * processes etc.
1210 *
1211 * The function is primarily of use for corruptions that
1212 * happen outside the current execution context (e.g. when
1213 * detected by a background scrubber)
1214 *
1215 * Can run in IRQ context.
1216 */
1217void memory_failure_queue(unsigned long pfn, int trapno, int flags)
1218{
1219 struct memory_failure_cpu *mf_cpu;
1220 unsigned long proc_flags;
1221 struct memory_failure_entry entry = {
1222 .pfn = pfn,
1223 .trapno = trapno,
1224 .flags = flags,
1225 };
1226
1227 mf_cpu = &get_cpu_var(memory_failure_cpu);
1228 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1229 if (kfifo_put(&mf_cpu->fifo, &entry))
1230 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1231 else
1232 pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n",
1233 pfn);
1234 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1235 put_cpu_var(memory_failure_cpu);
1236}
1237EXPORT_SYMBOL_GPL(memory_failure_queue);
1238
1239static void memory_failure_work_func(struct work_struct *work)
1240{
1241 struct memory_failure_cpu *mf_cpu;
1242 struct memory_failure_entry entry = { 0, };
1243 unsigned long proc_flags;
1244 int gotten;
1245
1246 mf_cpu = &__get_cpu_var(memory_failure_cpu);
1247 for (;;) {
1248 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1249 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1250 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1251 if (!gotten)
1252 break;
1253 __memory_failure(entry.pfn, entry.trapno, entry.flags);
1254 }
1255}
1256
1257static int __init memory_failure_init(void)
1258{
1259 struct memory_failure_cpu *mf_cpu;
1260 int cpu;
1261
1262 for_each_possible_cpu(cpu) {
1263 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1264 spin_lock_init(&mf_cpu->lock);
1265 INIT_KFIFO(mf_cpu->fifo);
1266 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1267 }
1268
1269 return 0;
1270}
1271core_initcall(memory_failure_init);
1272
1273/**
1274 * unpoison_memory - Unpoison a previously poisoned page
1275 * @pfn: Page number of the to be unpoisoned page
1276 *
1277 * Software-unpoison a page that has been poisoned by
1278 * memory_failure() earlier.
1279 *
1280 * This is only done on the software-level, so it only works
1281 * for linux injected failures, not real hardware failures
1282 *
1283 * Returns 0 for success, otherwise -errno.
1284 */
1285int unpoison_memory(unsigned long pfn)
1286{
1287 struct page *page;
1288 struct page *p;
1289 int freeit = 0;
1290 unsigned int nr_pages;
1291
1292 if (!pfn_valid(pfn))
1293 return -ENXIO;
1294
1295 p = pfn_to_page(pfn);
1296 page = compound_head(p);
1297
1298 if (!PageHWPoison(p)) {
1299 pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
1300 return 0;
1301 }
1302
1303 nr_pages = 1 << compound_trans_order(page);
1304
1305 if (!get_page_unless_zero(page)) {
1306 /*
1307 * Since HWPoisoned hugepage should have non-zero refcount,
1308 * race between memory failure and unpoison seems to happen.
1309 * In such case unpoison fails and memory failure runs
1310 * to the end.
1311 */
1312 if (PageHuge(page)) {
1313 pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
1314 return 0;
1315 }
1316 if (TestClearPageHWPoison(p))
1317 atomic_long_sub(nr_pages, &mce_bad_pages);
1318 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
1319 return 0;
1320 }
1321
1322 lock_page(page);
1323 /*
1324 * This test is racy because PG_hwpoison is set outside of page lock.
1325 * That's acceptable because that won't trigger kernel panic. Instead,
1326 * the PG_hwpoison page will be caught and isolated on the entrance to
1327 * the free buddy page pool.
1328 */
1329 if (TestClearPageHWPoison(page)) {
1330 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
1331 atomic_long_sub(nr_pages, &mce_bad_pages);
1332 freeit = 1;
1333 if (PageHuge(page))
1334 clear_page_hwpoison_huge_page(page);
1335 }
1336 unlock_page(page);
1337
1338 put_page(page);
1339 if (freeit)
1340 put_page(page);
1341
1342 return 0;
1343}
1344EXPORT_SYMBOL(unpoison_memory);
1345
1346static struct page *new_page(struct page *p, unsigned long private, int **x)
1347{
1348 int nid = page_to_nid(p);
1349 if (PageHuge(p))
1350 return alloc_huge_page_node(page_hstate(compound_head(p)),
1351 nid);
1352 else
1353 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
1354}
1355
1356/*
1357 * Safely get reference count of an arbitrary page.
1358 * Returns 0 for a free page, -EIO for a zero refcount page
1359 * that is not free, and 1 for any other page type.
1360 * For 1 the page is returned with increased page count, otherwise not.
1361 */
1362static int get_any_page(struct page *p, unsigned long pfn, int flags)
1363{
1364 int ret;
1365
1366 if (flags & MF_COUNT_INCREASED)
1367 return 1;
1368
1369 /*
1370 * The lock_memory_hotplug prevents a race with memory hotplug.
1371 * This is a big hammer, a better would be nicer.
1372 */
1373 lock_memory_hotplug();
1374
1375 /*
1376 * Isolate the page, so that it doesn't get reallocated if it
1377 * was free.
1378 */
1379 set_migratetype_isolate(p);
1380 /*
1381 * When the target page is a free hugepage, just remove it
1382 * from free hugepage list.
1383 */
1384 if (!get_page_unless_zero(compound_head(p))) {
1385 if (PageHuge(p)) {
1386 pr_info("get_any_page: %#lx free huge page\n", pfn);
1387 ret = dequeue_hwpoisoned_huge_page(compound_head(p));
1388 } else if (is_free_buddy_page(p)) {
1389 pr_info("get_any_page: %#lx free buddy page\n", pfn);
1390 /* Set hwpoison bit while page is still isolated */
1391 SetPageHWPoison(p);
1392 ret = 0;
1393 } else {
1394 pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
1395 pfn, p->flags);
1396 ret = -EIO;
1397 }
1398 } else {
1399 /* Not a free page */
1400 ret = 1;
1401 }
1402 unset_migratetype_isolate(p);
1403 unlock_memory_hotplug();
1404 return ret;
1405}
1406
1407static int soft_offline_huge_page(struct page *page, int flags)
1408{
1409 int ret;
1410 unsigned long pfn = page_to_pfn(page);
1411 struct page *hpage = compound_head(page);
1412 LIST_HEAD(pagelist);
1413
1414 ret = get_any_page(page, pfn, flags);
1415 if (ret < 0)
1416 return ret;
1417 if (ret == 0)
1418 goto done;
1419
1420 if (PageHWPoison(hpage)) {
1421 put_page(hpage);
1422 pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn);
1423 return -EBUSY;
1424 }
1425
1426 /* Keep page count to indicate a given hugepage is isolated. */
1427
1428 list_add(&hpage->lru, &pagelist);
1429 ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
1430 true);
1431 if (ret) {
1432 struct page *page1, *page2;
1433 list_for_each_entry_safe(page1, page2, &pagelist, lru)
1434 put_page(page1);
1435
1436 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
1437 pfn, ret, page->flags);
1438 if (ret > 0)
1439 ret = -EIO;
1440 return ret;
1441 }
1442done:
1443 if (!PageHWPoison(hpage))
1444 atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
1445 set_page_hwpoison_huge_page(hpage);
1446 dequeue_hwpoisoned_huge_page(hpage);
1447 /* keep elevated page count for bad page */
1448 return ret;
1449}
1450
1451/**
1452 * soft_offline_page - Soft offline a page.
1453 * @page: page to offline
1454 * @flags: flags. Same as memory_failure().
1455 *
1456 * Returns 0 on success, otherwise negated errno.
1457 *
1458 * Soft offline a page, by migration or invalidation,
1459 * without killing anything. This is for the case when
1460 * a page is not corrupted yet (so it's still valid to access),
1461 * but has had a number of corrected errors and is better taken
1462 * out.
1463 *
1464 * The actual policy on when to do that is maintained by
1465 * user space.
1466 *
1467 * This should never impact any application or cause data loss,
1468 * however it might take some time.
1469 *
1470 * This is not a 100% solution for all memory, but tries to be
1471 * ``good enough'' for the majority of memory.
1472 */
1473int soft_offline_page(struct page *page, int flags)
1474{
1475 int ret;
1476 unsigned long pfn = page_to_pfn(page);
1477
1478 if (PageHuge(page))
1479 return soft_offline_huge_page(page, flags);
1480
1481 ret = get_any_page(page, pfn, flags);
1482 if (ret < 0)
1483 return ret;
1484 if (ret == 0)
1485 goto done;
1486
1487 /*
1488 * Page cache page we can handle?
1489 */
1490 if (!PageLRU(page)) {
1491 /*
1492 * Try to free it.
1493 */
1494 put_page(page);
1495 shake_page(page, 1);
1496
1497 /*
1498 * Did it turn free?
1499 */
1500 ret = get_any_page(page, pfn, 0);
1501 if (ret < 0)
1502 return ret;
1503 if (ret == 0)
1504 goto done;
1505 }
1506 if (!PageLRU(page)) {
1507 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1508 pfn, page->flags);
1509 return -EIO;
1510 }
1511
1512 lock_page(page);
1513 wait_on_page_writeback(page);
1514
1515 /*
1516 * Synchronized using the page lock with memory_failure()
1517 */
1518 if (PageHWPoison(page)) {
1519 unlock_page(page);
1520 put_page(page);
1521 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1522 return -EBUSY;
1523 }
1524
1525 /*
1526 * Try to invalidate first. This should work for
1527 * non dirty unmapped page cache pages.
1528 */
1529 ret = invalidate_inode_page(page);
1530 unlock_page(page);
1531 /*
1532 * RED-PEN would be better to keep it isolated here, but we
1533 * would need to fix isolation locking first.
1534 */
1535 if (ret == 1) {
1536 put_page(page);
1537 ret = 0;
1538 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1539 goto done;
1540 }
1541
1542 /*
1543 * Simple invalidation didn't work.
1544 * Try to migrate to a new page instead. migrate.c
1545 * handles a large number of cases for us.
1546 */
1547 ret = isolate_lru_page(page);
1548 /*
1549 * Drop page reference which is came from get_any_page()
1550 * successful isolate_lru_page() already took another one.
1551 */
1552 put_page(page);
1553 if (!ret) {
1554 LIST_HEAD(pagelist);
1555 inc_zone_page_state(page, NR_ISOLATED_ANON +
1556 page_is_file_cache(page));
1557 list_add(&page->lru, &pagelist);
1558 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1559 0, true);
1560 if (ret) {
1561 putback_lru_pages(&pagelist);
1562 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1563 pfn, ret, page->flags);
1564 if (ret > 0)
1565 ret = -EIO;
1566 }
1567 } else {
1568 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
1569 pfn, ret, page_count(page), page->flags);
1570 }
1571 if (ret)
1572 return ret;
1573
1574done:
1575 atomic_long_add(1, &mce_bad_pages);
1576 SetPageHWPoison(page);
1577 /* keep elevated page count for bad page */
1578 return ret;
1579}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
6 * High level machine check handler. Handles pages reported by the
7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
8 * failure.
9 *
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
12 *
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
20 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/mm/page-types when running a real workload.
28 *
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
34 * VM.
35 */
36
37#define pr_fmt(fmt) "Memory failure: " fmt
38
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
42#include <linux/sched/signal.h>
43#include <linux/sched/task.h>
44#include <linux/dax.h>
45#include <linux/ksm.h>
46#include <linux/rmap.h>
47#include <linux/export.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/backing-dev.h>
51#include <linux/migrate.h>
52#include <linux/slab.h>
53#include <linux/swapops.h>
54#include <linux/hugetlb.h>
55#include <linux/memory_hotplug.h>
56#include <linux/mm_inline.h>
57#include <linux/memremap.h>
58#include <linux/kfifo.h>
59#include <linux/ratelimit.h>
60#include <linux/pagewalk.h>
61#include <linux/shmem_fs.h>
62#include <linux/sysctl.h>
63#include "swap.h"
64#include "internal.h"
65#include "ras/ras_event.h"
66
67static int sysctl_memory_failure_early_kill __read_mostly;
68
69static int sysctl_memory_failure_recovery __read_mostly = 1;
70
71atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
72
73static bool hw_memory_failure __read_mostly = false;
74
75static DEFINE_MUTEX(mf_mutex);
76
77void num_poisoned_pages_inc(unsigned long pfn)
78{
79 atomic_long_inc(&num_poisoned_pages);
80 memblk_nr_poison_inc(pfn);
81}
82
83void num_poisoned_pages_sub(unsigned long pfn, long i)
84{
85 atomic_long_sub(i, &num_poisoned_pages);
86 if (pfn != -1UL)
87 memblk_nr_poison_sub(pfn, i);
88}
89
90/**
91 * MF_ATTR_RO - Create sysfs entry for each memory failure statistics.
92 * @_name: name of the file in the per NUMA sysfs directory.
93 */
94#define MF_ATTR_RO(_name) \
95static ssize_t _name##_show(struct device *dev, \
96 struct device_attribute *attr, \
97 char *buf) \
98{ \
99 struct memory_failure_stats *mf_stats = \
100 &NODE_DATA(dev->id)->mf_stats; \
101 return sprintf(buf, "%lu\n", mf_stats->_name); \
102} \
103static DEVICE_ATTR_RO(_name)
104
105MF_ATTR_RO(total);
106MF_ATTR_RO(ignored);
107MF_ATTR_RO(failed);
108MF_ATTR_RO(delayed);
109MF_ATTR_RO(recovered);
110
111static struct attribute *memory_failure_attr[] = {
112 &dev_attr_total.attr,
113 &dev_attr_ignored.attr,
114 &dev_attr_failed.attr,
115 &dev_attr_delayed.attr,
116 &dev_attr_recovered.attr,
117 NULL,
118};
119
120const struct attribute_group memory_failure_attr_group = {
121 .name = "memory_failure",
122 .attrs = memory_failure_attr,
123};
124
125static struct ctl_table memory_failure_table[] = {
126 {
127 .procname = "memory_failure_early_kill",
128 .data = &sysctl_memory_failure_early_kill,
129 .maxlen = sizeof(sysctl_memory_failure_early_kill),
130 .mode = 0644,
131 .proc_handler = proc_dointvec_minmax,
132 .extra1 = SYSCTL_ZERO,
133 .extra2 = SYSCTL_ONE,
134 },
135 {
136 .procname = "memory_failure_recovery",
137 .data = &sysctl_memory_failure_recovery,
138 .maxlen = sizeof(sysctl_memory_failure_recovery),
139 .mode = 0644,
140 .proc_handler = proc_dointvec_minmax,
141 .extra1 = SYSCTL_ZERO,
142 .extra2 = SYSCTL_ONE,
143 },
144 { }
145};
146
147/*
148 * Return values:
149 * 1: the page is dissolved (if needed) and taken off from buddy,
150 * 0: the page is dissolved (if needed) and not taken off from buddy,
151 * < 0: failed to dissolve.
152 */
153static int __page_handle_poison(struct page *page)
154{
155 int ret;
156
157 /*
158 * zone_pcp_disable() can't be used here. It will
159 * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
160 * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
161 * optimization is enabled. This will break current lock dependency
162 * chain and leads to deadlock.
163 * Disabling pcp before dissolving the page was a deterministic
164 * approach because we made sure that those pages cannot end up in any
165 * PCP list. Draining PCP lists expels those pages to the buddy system,
166 * but nothing guarantees that those pages do not get back to a PCP
167 * queue if we need to refill those.
168 */
169 ret = dissolve_free_huge_page(page);
170 if (!ret) {
171 drain_all_pages(page_zone(page));
172 ret = take_page_off_buddy(page);
173 }
174
175 return ret;
176}
177
178static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
179{
180 if (hugepage_or_freepage) {
181 /*
182 * Doing this check for free pages is also fine since dissolve_free_huge_page
183 * returns 0 for non-hugetlb pages as well.
184 */
185 if (__page_handle_poison(page) <= 0)
186 /*
187 * We could fail to take off the target page from buddy
188 * for example due to racy page allocation, but that's
189 * acceptable because soft-offlined page is not broken
190 * and if someone really want to use it, they should
191 * take it.
192 */
193 return false;
194 }
195
196 SetPageHWPoison(page);
197 if (release)
198 put_page(page);
199 page_ref_inc(page);
200 num_poisoned_pages_inc(page_to_pfn(page));
201
202 return true;
203}
204
205#if IS_ENABLED(CONFIG_HWPOISON_INJECT)
206
207u32 hwpoison_filter_enable = 0;
208u32 hwpoison_filter_dev_major = ~0U;
209u32 hwpoison_filter_dev_minor = ~0U;
210u64 hwpoison_filter_flags_mask;
211u64 hwpoison_filter_flags_value;
212EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
213EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
214EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
215EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
216EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
217
218static int hwpoison_filter_dev(struct page *p)
219{
220 struct address_space *mapping;
221 dev_t dev;
222
223 if (hwpoison_filter_dev_major == ~0U &&
224 hwpoison_filter_dev_minor == ~0U)
225 return 0;
226
227 mapping = page_mapping(p);
228 if (mapping == NULL || mapping->host == NULL)
229 return -EINVAL;
230
231 dev = mapping->host->i_sb->s_dev;
232 if (hwpoison_filter_dev_major != ~0U &&
233 hwpoison_filter_dev_major != MAJOR(dev))
234 return -EINVAL;
235 if (hwpoison_filter_dev_minor != ~0U &&
236 hwpoison_filter_dev_minor != MINOR(dev))
237 return -EINVAL;
238
239 return 0;
240}
241
242static int hwpoison_filter_flags(struct page *p)
243{
244 if (!hwpoison_filter_flags_mask)
245 return 0;
246
247 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
248 hwpoison_filter_flags_value)
249 return 0;
250 else
251 return -EINVAL;
252}
253
254/*
255 * This allows stress tests to limit test scope to a collection of tasks
256 * by putting them under some memcg. This prevents killing unrelated/important
257 * processes such as /sbin/init. Note that the target task may share clean
258 * pages with init (eg. libc text), which is harmless. If the target task
259 * share _dirty_ pages with another task B, the test scheme must make sure B
260 * is also included in the memcg. At last, due to race conditions this filter
261 * can only guarantee that the page either belongs to the memcg tasks, or is
262 * a freed page.
263 */
264#ifdef CONFIG_MEMCG
265u64 hwpoison_filter_memcg;
266EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
267static int hwpoison_filter_task(struct page *p)
268{
269 if (!hwpoison_filter_memcg)
270 return 0;
271
272 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
273 return -EINVAL;
274
275 return 0;
276}
277#else
278static int hwpoison_filter_task(struct page *p) { return 0; }
279#endif
280
281int hwpoison_filter(struct page *p)
282{
283 if (!hwpoison_filter_enable)
284 return 0;
285
286 if (hwpoison_filter_dev(p))
287 return -EINVAL;
288
289 if (hwpoison_filter_flags(p))
290 return -EINVAL;
291
292 if (hwpoison_filter_task(p))
293 return -EINVAL;
294
295 return 0;
296}
297#else
298int hwpoison_filter(struct page *p)
299{
300 return 0;
301}
302#endif
303
304EXPORT_SYMBOL_GPL(hwpoison_filter);
305
306/*
307 * Kill all processes that have a poisoned page mapped and then isolate
308 * the page.
309 *
310 * General strategy:
311 * Find all processes having the page mapped and kill them.
312 * But we keep a page reference around so that the page is not
313 * actually freed yet.
314 * Then stash the page away
315 *
316 * There's no convenient way to get back to mapped processes
317 * from the VMAs. So do a brute-force search over all
318 * running processes.
319 *
320 * Remember that machine checks are not common (or rather
321 * if they are common you have other problems), so this shouldn't
322 * be a performance issue.
323 *
324 * Also there are some races possible while we get from the
325 * error detection to actually handle it.
326 */
327
328struct to_kill {
329 struct list_head nd;
330 struct task_struct *tsk;
331 unsigned long addr;
332 short size_shift;
333};
334
335/*
336 * Send all the processes who have the page mapped a signal.
337 * ``action optional'' if they are not immediately affected by the error
338 * ``action required'' if error happened in current execution context
339 */
340static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
341{
342 struct task_struct *t = tk->tsk;
343 short addr_lsb = tk->size_shift;
344 int ret = 0;
345
346 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
347 pfn, t->comm, t->pid);
348
349 if ((flags & MF_ACTION_REQUIRED) && (t == current))
350 ret = force_sig_mceerr(BUS_MCEERR_AR,
351 (void __user *)tk->addr, addr_lsb);
352 else
353 /*
354 * Signal other processes sharing the page if they have
355 * PF_MCE_EARLY set.
356 * Don't use force here, it's convenient if the signal
357 * can be temporarily blocked.
358 * This could cause a loop when the user sets SIGBUS
359 * to SIG_IGN, but hopefully no one will do that?
360 */
361 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
362 addr_lsb, t);
363 if (ret < 0)
364 pr_info("Error sending signal to %s:%d: %d\n",
365 t->comm, t->pid, ret);
366 return ret;
367}
368
369/*
370 * Unknown page type encountered. Try to check whether it can turn PageLRU by
371 * lru_add_drain_all.
372 */
373void shake_page(struct page *p)
374{
375 if (PageHuge(p))
376 return;
377 /*
378 * TODO: Could shrink slab caches here if a lightweight range-based
379 * shrinker will be available.
380 */
381 if (PageSlab(p))
382 return;
383
384 lru_add_drain_all();
385}
386EXPORT_SYMBOL_GPL(shake_page);
387
388static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
389 unsigned long address)
390{
391 unsigned long ret = 0;
392 pgd_t *pgd;
393 p4d_t *p4d;
394 pud_t *pud;
395 pmd_t *pmd;
396 pte_t *pte;
397 pte_t ptent;
398
399 VM_BUG_ON_VMA(address == -EFAULT, vma);
400 pgd = pgd_offset(vma->vm_mm, address);
401 if (!pgd_present(*pgd))
402 return 0;
403 p4d = p4d_offset(pgd, address);
404 if (!p4d_present(*p4d))
405 return 0;
406 pud = pud_offset(p4d, address);
407 if (!pud_present(*pud))
408 return 0;
409 if (pud_devmap(*pud))
410 return PUD_SHIFT;
411 pmd = pmd_offset(pud, address);
412 if (!pmd_present(*pmd))
413 return 0;
414 if (pmd_devmap(*pmd))
415 return PMD_SHIFT;
416 pte = pte_offset_map(pmd, address);
417 if (!pte)
418 return 0;
419 ptent = ptep_get(pte);
420 if (pte_present(ptent) && pte_devmap(ptent))
421 ret = PAGE_SHIFT;
422 pte_unmap(pte);
423 return ret;
424}
425
426/*
427 * Failure handling: if we can't find or can't kill a process there's
428 * not much we can do. We just print a message and ignore otherwise.
429 */
430
431#define FSDAX_INVALID_PGOFF ULONG_MAX
432
433/*
434 * Schedule a process for later kill.
435 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
436 *
437 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
438 * filesystem with a memory failure handler has claimed the
439 * memory_failure event. In all other cases, page->index and
440 * page->mapping are sufficient for mapping the page back to its
441 * corresponding user virtual address.
442 */
443static void __add_to_kill(struct task_struct *tsk, struct page *p,
444 struct vm_area_struct *vma, struct list_head *to_kill,
445 unsigned long ksm_addr, pgoff_t fsdax_pgoff)
446{
447 struct to_kill *tk;
448
449 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
450 if (!tk) {
451 pr_err("Out of memory while machine check handling\n");
452 return;
453 }
454
455 tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
456 if (is_zone_device_page(p)) {
457 if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
458 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
459 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
460 } else
461 tk->size_shift = page_shift(compound_head(p));
462
463 /*
464 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
465 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
466 * so "tk->size_shift == 0" effectively checks no mapping on
467 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
468 * to a process' address space, it's possible not all N VMAs
469 * contain mappings for the page, but at least one VMA does.
470 * Only deliver SIGBUS with payload derived from the VMA that
471 * has a mapping for the page.
472 */
473 if (tk->addr == -EFAULT) {
474 pr_info("Unable to find user space address %lx in %s\n",
475 page_to_pfn(p), tsk->comm);
476 } else if (tk->size_shift == 0) {
477 kfree(tk);
478 return;
479 }
480
481 get_task_struct(tsk);
482 tk->tsk = tsk;
483 list_add_tail(&tk->nd, to_kill);
484}
485
486static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
487 struct vm_area_struct *vma,
488 struct list_head *to_kill)
489{
490 __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF);
491}
492
493#ifdef CONFIG_KSM
494static bool task_in_to_kill_list(struct list_head *to_kill,
495 struct task_struct *tsk)
496{
497 struct to_kill *tk, *next;
498
499 list_for_each_entry_safe(tk, next, to_kill, nd) {
500 if (tk->tsk == tsk)
501 return true;
502 }
503
504 return false;
505}
506void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
507 struct vm_area_struct *vma, struct list_head *to_kill,
508 unsigned long ksm_addr)
509{
510 if (!task_in_to_kill_list(to_kill, tsk))
511 __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF);
512}
513#endif
514/*
515 * Kill the processes that have been collected earlier.
516 *
517 * Only do anything when FORCEKILL is set, otherwise just free the
518 * list (this is used for clean pages which do not need killing)
519 * Also when FAIL is set do a force kill because something went
520 * wrong earlier.
521 */
522static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
523 unsigned long pfn, int flags)
524{
525 struct to_kill *tk, *next;
526
527 list_for_each_entry_safe(tk, next, to_kill, nd) {
528 if (forcekill) {
529 /*
530 * In case something went wrong with munmapping
531 * make sure the process doesn't catch the
532 * signal and then access the memory. Just kill it.
533 */
534 if (fail || tk->addr == -EFAULT) {
535 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
536 pfn, tk->tsk->comm, tk->tsk->pid);
537 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
538 tk->tsk, PIDTYPE_PID);
539 }
540
541 /*
542 * In theory the process could have mapped
543 * something else on the address in-between. We could
544 * check for that, but we need to tell the
545 * process anyways.
546 */
547 else if (kill_proc(tk, pfn, flags) < 0)
548 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
549 pfn, tk->tsk->comm, tk->tsk->pid);
550 }
551 list_del(&tk->nd);
552 put_task_struct(tk->tsk);
553 kfree(tk);
554 }
555}
556
557/*
558 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
559 * on behalf of the thread group. Return task_struct of the (first found)
560 * dedicated thread if found, and return NULL otherwise.
561 *
562 * We already hold rcu lock in the caller, so we don't have to call
563 * rcu_read_lock/unlock() in this function.
564 */
565static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
566{
567 struct task_struct *t;
568
569 for_each_thread(tsk, t) {
570 if (t->flags & PF_MCE_PROCESS) {
571 if (t->flags & PF_MCE_EARLY)
572 return t;
573 } else {
574 if (sysctl_memory_failure_early_kill)
575 return t;
576 }
577 }
578 return NULL;
579}
580
581/*
582 * Determine whether a given process is "early kill" process which expects
583 * to be signaled when some page under the process is hwpoisoned.
584 * Return task_struct of the dedicated thread (main thread unless explicitly
585 * specified) if the process is "early kill" and otherwise returns NULL.
586 *
587 * Note that the above is true for Action Optional case. For Action Required
588 * case, it's only meaningful to the current thread which need to be signaled
589 * with SIGBUS, this error is Action Optional for other non current
590 * processes sharing the same error page,if the process is "early kill", the
591 * task_struct of the dedicated thread will also be returned.
592 */
593struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
594{
595 if (!tsk->mm)
596 return NULL;
597 /*
598 * Comparing ->mm here because current task might represent
599 * a subthread, while tsk always points to the main thread.
600 */
601 if (force_early && tsk->mm == current->mm)
602 return current;
603
604 return find_early_kill_thread(tsk);
605}
606
607/*
608 * Collect processes when the error hit an anonymous page.
609 */
610static void collect_procs_anon(struct folio *folio, struct page *page,
611 struct list_head *to_kill, int force_early)
612{
613 struct vm_area_struct *vma;
614 struct task_struct *tsk;
615 struct anon_vma *av;
616 pgoff_t pgoff;
617
618 av = folio_lock_anon_vma_read(folio, NULL);
619 if (av == NULL) /* Not actually mapped anymore */
620 return;
621
622 pgoff = page_to_pgoff(page);
623 rcu_read_lock();
624 for_each_process(tsk) {
625 struct anon_vma_chain *vmac;
626 struct task_struct *t = task_early_kill(tsk, force_early);
627
628 if (!t)
629 continue;
630 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
631 pgoff, pgoff) {
632 vma = vmac->vma;
633 if (vma->vm_mm != t->mm)
634 continue;
635 if (!page_mapped_in_vma(page, vma))
636 continue;
637 add_to_kill_anon_file(t, page, vma, to_kill);
638 }
639 }
640 rcu_read_unlock();
641 anon_vma_unlock_read(av);
642}
643
644/*
645 * Collect processes when the error hit a file mapped page.
646 */
647static void collect_procs_file(struct folio *folio, struct page *page,
648 struct list_head *to_kill, int force_early)
649{
650 struct vm_area_struct *vma;
651 struct task_struct *tsk;
652 struct address_space *mapping = folio->mapping;
653 pgoff_t pgoff;
654
655 i_mmap_lock_read(mapping);
656 rcu_read_lock();
657 pgoff = page_to_pgoff(page);
658 for_each_process(tsk) {
659 struct task_struct *t = task_early_kill(tsk, force_early);
660
661 if (!t)
662 continue;
663 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
664 pgoff) {
665 /*
666 * Send early kill signal to tasks where a vma covers
667 * the page but the corrupted page is not necessarily
668 * mapped in its pte.
669 * Assume applications who requested early kill want
670 * to be informed of all such data corruptions.
671 */
672 if (vma->vm_mm == t->mm)
673 add_to_kill_anon_file(t, page, vma, to_kill);
674 }
675 }
676 rcu_read_unlock();
677 i_mmap_unlock_read(mapping);
678}
679
680#ifdef CONFIG_FS_DAX
681static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
682 struct vm_area_struct *vma,
683 struct list_head *to_kill, pgoff_t pgoff)
684{
685 __add_to_kill(tsk, p, vma, to_kill, 0, pgoff);
686}
687
688/*
689 * Collect processes when the error hit a fsdax page.
690 */
691static void collect_procs_fsdax(struct page *page,
692 struct address_space *mapping, pgoff_t pgoff,
693 struct list_head *to_kill, bool pre_remove)
694{
695 struct vm_area_struct *vma;
696 struct task_struct *tsk;
697
698 i_mmap_lock_read(mapping);
699 rcu_read_lock();
700 for_each_process(tsk) {
701 struct task_struct *t = tsk;
702
703 /*
704 * Search for all tasks while MF_MEM_PRE_REMOVE is set, because
705 * the current may not be the one accessing the fsdax page.
706 * Otherwise, search for the current task.
707 */
708 if (!pre_remove)
709 t = task_early_kill(tsk, true);
710 if (!t)
711 continue;
712 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
713 if (vma->vm_mm == t->mm)
714 add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
715 }
716 }
717 rcu_read_unlock();
718 i_mmap_unlock_read(mapping);
719}
720#endif /* CONFIG_FS_DAX */
721
722/*
723 * Collect the processes who have the corrupted page mapped to kill.
724 */
725static void collect_procs(struct folio *folio, struct page *page,
726 struct list_head *tokill, int force_early)
727{
728 if (!folio->mapping)
729 return;
730 if (unlikely(PageKsm(page)))
731 collect_procs_ksm(page, tokill, force_early);
732 else if (PageAnon(page))
733 collect_procs_anon(folio, page, tokill, force_early);
734 else
735 collect_procs_file(folio, page, tokill, force_early);
736}
737
738struct hwpoison_walk {
739 struct to_kill tk;
740 unsigned long pfn;
741 int flags;
742};
743
744static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
745{
746 tk->addr = addr;
747 tk->size_shift = shift;
748}
749
750static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
751 unsigned long poisoned_pfn, struct to_kill *tk)
752{
753 unsigned long pfn = 0;
754
755 if (pte_present(pte)) {
756 pfn = pte_pfn(pte);
757 } else {
758 swp_entry_t swp = pte_to_swp_entry(pte);
759
760 if (is_hwpoison_entry(swp))
761 pfn = swp_offset_pfn(swp);
762 }
763
764 if (!pfn || pfn != poisoned_pfn)
765 return 0;
766
767 set_to_kill(tk, addr, shift);
768 return 1;
769}
770
771#ifdef CONFIG_TRANSPARENT_HUGEPAGE
772static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
773 struct hwpoison_walk *hwp)
774{
775 pmd_t pmd = *pmdp;
776 unsigned long pfn;
777 unsigned long hwpoison_vaddr;
778
779 if (!pmd_present(pmd))
780 return 0;
781 pfn = pmd_pfn(pmd);
782 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
783 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
784 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
785 return 1;
786 }
787 return 0;
788}
789#else
790static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
791 struct hwpoison_walk *hwp)
792{
793 return 0;
794}
795#endif
796
797static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
798 unsigned long end, struct mm_walk *walk)
799{
800 struct hwpoison_walk *hwp = walk->private;
801 int ret = 0;
802 pte_t *ptep, *mapped_pte;
803 spinlock_t *ptl;
804
805 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
806 if (ptl) {
807 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
808 spin_unlock(ptl);
809 goto out;
810 }
811
812 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
813 addr, &ptl);
814 if (!ptep)
815 goto out;
816
817 for (; addr != end; ptep++, addr += PAGE_SIZE) {
818 ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT,
819 hwp->pfn, &hwp->tk);
820 if (ret == 1)
821 break;
822 }
823 pte_unmap_unlock(mapped_pte, ptl);
824out:
825 cond_resched();
826 return ret;
827}
828
829#ifdef CONFIG_HUGETLB_PAGE
830static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
831 unsigned long addr, unsigned long end,
832 struct mm_walk *walk)
833{
834 struct hwpoison_walk *hwp = walk->private;
835 pte_t pte = huge_ptep_get(ptep);
836 struct hstate *h = hstate_vma(walk->vma);
837
838 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
839 hwp->pfn, &hwp->tk);
840}
841#else
842#define hwpoison_hugetlb_range NULL
843#endif
844
845static const struct mm_walk_ops hwpoison_walk_ops = {
846 .pmd_entry = hwpoison_pte_range,
847 .hugetlb_entry = hwpoison_hugetlb_range,
848 .walk_lock = PGWALK_RDLOCK,
849};
850
851/*
852 * Sends SIGBUS to the current process with error info.
853 *
854 * This function is intended to handle "Action Required" MCEs on already
855 * hardware poisoned pages. They could happen, for example, when
856 * memory_failure() failed to unmap the error page at the first call, or
857 * when multiple local machine checks happened on different CPUs.
858 *
859 * MCE handler currently has no easy access to the error virtual address,
860 * so this function walks page table to find it. The returned virtual address
861 * is proper in most cases, but it could be wrong when the application
862 * process has multiple entries mapping the error page.
863 */
864static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
865 int flags)
866{
867 int ret;
868 struct hwpoison_walk priv = {
869 .pfn = pfn,
870 };
871 priv.tk.tsk = p;
872
873 if (!p->mm)
874 return -EFAULT;
875
876 mmap_read_lock(p->mm);
877 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
878 (void *)&priv);
879 if (ret == 1 && priv.tk.addr)
880 kill_proc(&priv.tk, pfn, flags);
881 else
882 ret = 0;
883 mmap_read_unlock(p->mm);
884 return ret > 0 ? -EHWPOISON : -EFAULT;
885}
886
887static const char *action_name[] = {
888 [MF_IGNORED] = "Ignored",
889 [MF_FAILED] = "Failed",
890 [MF_DELAYED] = "Delayed",
891 [MF_RECOVERED] = "Recovered",
892};
893
894static const char * const action_page_types[] = {
895 [MF_MSG_KERNEL] = "reserved kernel page",
896 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
897 [MF_MSG_SLAB] = "kernel slab page",
898 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
899 [MF_MSG_HUGE] = "huge page",
900 [MF_MSG_FREE_HUGE] = "free huge page",
901 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
902 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
903 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
904 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
905 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
906 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
907 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
908 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
909 [MF_MSG_CLEAN_LRU] = "clean LRU page",
910 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
911 [MF_MSG_BUDDY] = "free buddy page",
912 [MF_MSG_DAX] = "dax page",
913 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
914 [MF_MSG_UNKNOWN] = "unknown page",
915};
916
917/*
918 * XXX: It is possible that a page is isolated from LRU cache,
919 * and then kept in swap cache or failed to remove from page cache.
920 * The page count will stop it from being freed by unpoison.
921 * Stress tests should be aware of this memory leak problem.
922 */
923static int delete_from_lru_cache(struct folio *folio)
924{
925 if (folio_isolate_lru(folio)) {
926 /*
927 * Clear sensible page flags, so that the buddy system won't
928 * complain when the folio is unpoison-and-freed.
929 */
930 folio_clear_active(folio);
931 folio_clear_unevictable(folio);
932
933 /*
934 * Poisoned page might never drop its ref count to 0 so we have
935 * to uncharge it manually from its memcg.
936 */
937 mem_cgroup_uncharge(folio);
938
939 /*
940 * drop the refcount elevated by folio_isolate_lru()
941 */
942 folio_put(folio);
943 return 0;
944 }
945 return -EIO;
946}
947
948static int truncate_error_folio(struct folio *folio, unsigned long pfn,
949 struct address_space *mapping)
950{
951 int ret = MF_FAILED;
952
953 if (mapping->a_ops->error_remove_folio) {
954 int err = mapping->a_ops->error_remove_folio(mapping, folio);
955
956 if (err != 0)
957 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
958 else if (!filemap_release_folio(folio, GFP_NOIO))
959 pr_info("%#lx: failed to release buffers\n", pfn);
960 else
961 ret = MF_RECOVERED;
962 } else {
963 /*
964 * If the file system doesn't support it just invalidate
965 * This fails on dirty or anything with private pages
966 */
967 if (mapping_evict_folio(mapping, folio))
968 ret = MF_RECOVERED;
969 else
970 pr_info("%#lx: Failed to invalidate\n", pfn);
971 }
972
973 return ret;
974}
975
976struct page_state {
977 unsigned long mask;
978 unsigned long res;
979 enum mf_action_page_type type;
980
981 /* Callback ->action() has to unlock the relevant page inside it. */
982 int (*action)(struct page_state *ps, struct page *p);
983};
984
985/*
986 * Return true if page is still referenced by others, otherwise return
987 * false.
988 *
989 * The extra_pins is true when one extra refcount is expected.
990 */
991static bool has_extra_refcount(struct page_state *ps, struct page *p,
992 bool extra_pins)
993{
994 int count = page_count(p) - 1;
995
996 if (extra_pins)
997 count -= folio_nr_pages(page_folio(p));
998
999 if (count > 0) {
1000 pr_err("%#lx: %s still referenced by %d users\n",
1001 page_to_pfn(p), action_page_types[ps->type], count);
1002 return true;
1003 }
1004
1005 return false;
1006}
1007
1008/*
1009 * Error hit kernel page.
1010 * Do nothing, try to be lucky and not touch this instead. For a few cases we
1011 * could be more sophisticated.
1012 */
1013static int me_kernel(struct page_state *ps, struct page *p)
1014{
1015 unlock_page(p);
1016 return MF_IGNORED;
1017}
1018
1019/*
1020 * Page in unknown state. Do nothing.
1021 */
1022static int me_unknown(struct page_state *ps, struct page *p)
1023{
1024 pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
1025 unlock_page(p);
1026 return MF_FAILED;
1027}
1028
1029/*
1030 * Clean (or cleaned) page cache page.
1031 */
1032static int me_pagecache_clean(struct page_state *ps, struct page *p)
1033{
1034 struct folio *folio = page_folio(p);
1035 int ret;
1036 struct address_space *mapping;
1037 bool extra_pins;
1038
1039 delete_from_lru_cache(folio);
1040
1041 /*
1042 * For anonymous folios the only reference left
1043 * should be the one m_f() holds.
1044 */
1045 if (folio_test_anon(folio)) {
1046 ret = MF_RECOVERED;
1047 goto out;
1048 }
1049
1050 /*
1051 * Now truncate the page in the page cache. This is really
1052 * more like a "temporary hole punch"
1053 * Don't do this for block devices when someone else
1054 * has a reference, because it could be file system metadata
1055 * and that's not safe to truncate.
1056 */
1057 mapping = folio_mapping(folio);
1058 if (!mapping) {
1059 /* Folio has been torn down in the meantime */
1060 ret = MF_FAILED;
1061 goto out;
1062 }
1063
1064 /*
1065 * The shmem page is kept in page cache instead of truncating
1066 * so is expected to have an extra refcount after error-handling.
1067 */
1068 extra_pins = shmem_mapping(mapping);
1069
1070 /*
1071 * Truncation is a bit tricky. Enable it per file system for now.
1072 *
1073 * Open: to take i_rwsem or not for this? Right now we don't.
1074 */
1075 ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
1076 if (has_extra_refcount(ps, p, extra_pins))
1077 ret = MF_FAILED;
1078
1079out:
1080 folio_unlock(folio);
1081
1082 return ret;
1083}
1084
1085/*
1086 * Dirty pagecache page
1087 * Issues: when the error hit a hole page the error is not properly
1088 * propagated.
1089 */
1090static int me_pagecache_dirty(struct page_state *ps, struct page *p)
1091{
1092 struct address_space *mapping = page_mapping(p);
1093
1094 SetPageError(p);
1095 /* TBD: print more information about the file. */
1096 if (mapping) {
1097 /*
1098 * IO error will be reported by write(), fsync(), etc.
1099 * who check the mapping.
1100 * This way the application knows that something went
1101 * wrong with its dirty file data.
1102 *
1103 * There's one open issue:
1104 *
1105 * The EIO will be only reported on the next IO
1106 * operation and then cleared through the IO map.
1107 * Normally Linux has two mechanisms to pass IO error
1108 * first through the AS_EIO flag in the address space
1109 * and then through the PageError flag in the page.
1110 * Since we drop pages on memory failure handling the
1111 * only mechanism open to use is through AS_AIO.
1112 *
1113 * This has the disadvantage that it gets cleared on
1114 * the first operation that returns an error, while
1115 * the PageError bit is more sticky and only cleared
1116 * when the page is reread or dropped. If an
1117 * application assumes it will always get error on
1118 * fsync, but does other operations on the fd before
1119 * and the page is dropped between then the error
1120 * will not be properly reported.
1121 *
1122 * This can already happen even without hwpoisoned
1123 * pages: first on metadata IO errors (which only
1124 * report through AS_EIO) or when the page is dropped
1125 * at the wrong time.
1126 *
1127 * So right now we assume that the application DTRT on
1128 * the first EIO, but we're not worse than other parts
1129 * of the kernel.
1130 */
1131 mapping_set_error(mapping, -EIO);
1132 }
1133
1134 return me_pagecache_clean(ps, p);
1135}
1136
1137/*
1138 * Clean and dirty swap cache.
1139 *
1140 * Dirty swap cache page is tricky to handle. The page could live both in page
1141 * cache and swap cache(ie. page is freshly swapped in). So it could be
1142 * referenced concurrently by 2 types of PTEs:
1143 * normal PTEs and swap PTEs. We try to handle them consistently by calling
1144 * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
1145 * and then
1146 * - clear dirty bit to prevent IO
1147 * - remove from LRU
1148 * - but keep in the swap cache, so that when we return to it on
1149 * a later page fault, we know the application is accessing
1150 * corrupted data and shall be killed (we installed simple
1151 * interception code in do_swap_page to catch it).
1152 *
1153 * Clean swap cache pages can be directly isolated. A later page fault will
1154 * bring in the known good data from disk.
1155 */
1156static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1157{
1158 struct folio *folio = page_folio(p);
1159 int ret;
1160 bool extra_pins = false;
1161
1162 folio_clear_dirty(folio);
1163 /* Trigger EIO in shmem: */
1164 folio_clear_uptodate(folio);
1165
1166 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
1167 folio_unlock(folio);
1168
1169 if (ret == MF_DELAYED)
1170 extra_pins = true;
1171
1172 if (has_extra_refcount(ps, p, extra_pins))
1173 ret = MF_FAILED;
1174
1175 return ret;
1176}
1177
1178static int me_swapcache_clean(struct page_state *ps, struct page *p)
1179{
1180 struct folio *folio = page_folio(p);
1181 int ret;
1182
1183 delete_from_swap_cache(folio);
1184
1185 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
1186 folio_unlock(folio);
1187
1188 if (has_extra_refcount(ps, p, false))
1189 ret = MF_FAILED;
1190
1191 return ret;
1192}
1193
1194/*
1195 * Huge pages. Needs work.
1196 * Issues:
1197 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1198 * To narrow down kill region to one page, we need to break up pmd.
1199 */
1200static int me_huge_page(struct page_state *ps, struct page *p)
1201{
1202 struct folio *folio = page_folio(p);
1203 int res;
1204 struct address_space *mapping;
1205 bool extra_pins = false;
1206
1207 mapping = folio_mapping(folio);
1208 if (mapping) {
1209 res = truncate_error_folio(folio, page_to_pfn(p), mapping);
1210 /* The page is kept in page cache. */
1211 extra_pins = true;
1212 folio_unlock(folio);
1213 } else {
1214 folio_unlock(folio);
1215 /*
1216 * migration entry prevents later access on error hugepage,
1217 * so we can free and dissolve it into buddy to save healthy
1218 * subpages.
1219 */
1220 folio_put(folio);
1221 if (__page_handle_poison(p) >= 0) {
1222 page_ref_inc(p);
1223 res = MF_RECOVERED;
1224 } else {
1225 res = MF_FAILED;
1226 }
1227 }
1228
1229 if (has_extra_refcount(ps, p, extra_pins))
1230 res = MF_FAILED;
1231
1232 return res;
1233}
1234
1235/*
1236 * Various page states we can handle.
1237 *
1238 * A page state is defined by its current page->flags bits.
1239 * The table matches them in order and calls the right handler.
1240 *
1241 * This is quite tricky because we can access page at any time
1242 * in its live cycle, so all accesses have to be extremely careful.
1243 *
1244 * This is not complete. More states could be added.
1245 * For any missing state don't attempt recovery.
1246 */
1247
1248#define dirty (1UL << PG_dirty)
1249#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1250#define unevict (1UL << PG_unevictable)
1251#define mlock (1UL << PG_mlocked)
1252#define lru (1UL << PG_lru)
1253#define head (1UL << PG_head)
1254#define slab (1UL << PG_slab)
1255#define reserved (1UL << PG_reserved)
1256
1257static struct page_state error_states[] = {
1258 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
1259 /*
1260 * free pages are specially detected outside this table:
1261 * PG_buddy pages only make a small fraction of all free pages.
1262 */
1263
1264 /*
1265 * Could in theory check if slab page is free or if we can drop
1266 * currently unused objects without touching them. But just
1267 * treat it as standard kernel for now.
1268 */
1269 { slab, slab, MF_MSG_SLAB, me_kernel },
1270
1271 { head, head, MF_MSG_HUGE, me_huge_page },
1272
1273 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1274 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
1275
1276 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1277 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
1278
1279 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1280 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
1281
1282 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1283 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
1284
1285 /*
1286 * Catchall entry: must be at end.
1287 */
1288 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
1289};
1290
1291#undef dirty
1292#undef sc
1293#undef unevict
1294#undef mlock
1295#undef lru
1296#undef head
1297#undef slab
1298#undef reserved
1299
1300static void update_per_node_mf_stats(unsigned long pfn,
1301 enum mf_result result)
1302{
1303 int nid = MAX_NUMNODES;
1304 struct memory_failure_stats *mf_stats = NULL;
1305
1306 nid = pfn_to_nid(pfn);
1307 if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) {
1308 WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
1309 return;
1310 }
1311
1312 mf_stats = &NODE_DATA(nid)->mf_stats;
1313 switch (result) {
1314 case MF_IGNORED:
1315 ++mf_stats->ignored;
1316 break;
1317 case MF_FAILED:
1318 ++mf_stats->failed;
1319 break;
1320 case MF_DELAYED:
1321 ++mf_stats->delayed;
1322 break;
1323 case MF_RECOVERED:
1324 ++mf_stats->recovered;
1325 break;
1326 default:
1327 WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result);
1328 break;
1329 }
1330 ++mf_stats->total;
1331}
1332
1333/*
1334 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1335 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1336 */
1337static int action_result(unsigned long pfn, enum mf_action_page_type type,
1338 enum mf_result result)
1339{
1340 trace_memory_failure_event(pfn, type, result);
1341
1342 num_poisoned_pages_inc(pfn);
1343
1344 update_per_node_mf_stats(pfn, result);
1345
1346 pr_err("%#lx: recovery action for %s: %s\n",
1347 pfn, action_page_types[type], action_name[result]);
1348
1349 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1350}
1351
1352static int page_action(struct page_state *ps, struct page *p,
1353 unsigned long pfn)
1354{
1355 int result;
1356
1357 /* page p should be unlocked after returning from ps->action(). */
1358 result = ps->action(ps, p);
1359
1360 /* Could do more checks here if page looks ok */
1361 /*
1362 * Could adjust zone counters here to correct for the missing page.
1363 */
1364
1365 return action_result(pfn, ps->type, result);
1366}
1367
1368static inline bool PageHWPoisonTakenOff(struct page *page)
1369{
1370 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1371}
1372
1373void SetPageHWPoisonTakenOff(struct page *page)
1374{
1375 set_page_private(page, MAGIC_HWPOISON);
1376}
1377
1378void ClearPageHWPoisonTakenOff(struct page *page)
1379{
1380 if (PageHWPoison(page))
1381 set_page_private(page, 0);
1382}
1383
1384/*
1385 * Return true if a page type of a given page is supported by hwpoison
1386 * mechanism (while handling could fail), otherwise false. This function
1387 * does not return true for hugetlb or device memory pages, so it's assumed
1388 * to be called only in the context where we never have such pages.
1389 */
1390static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1391{
1392 if (PageSlab(page))
1393 return false;
1394
1395 /* Soft offline could migrate non-LRU movable pages */
1396 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1397 return true;
1398
1399 return PageLRU(page) || is_free_buddy_page(page);
1400}
1401
1402static int __get_hwpoison_page(struct page *page, unsigned long flags)
1403{
1404 struct folio *folio = page_folio(page);
1405 int ret = 0;
1406 bool hugetlb = false;
1407
1408 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false);
1409 if (hugetlb) {
1410 /* Make sure hugetlb demotion did not happen from under us. */
1411 if (folio == page_folio(page))
1412 return ret;
1413 if (ret > 0) {
1414 folio_put(folio);
1415 folio = page_folio(page);
1416 }
1417 }
1418
1419 /*
1420 * This check prevents from calling folio_try_get() for any
1421 * unsupported type of folio in order to reduce the risk of unexpected
1422 * races caused by taking a folio refcount.
1423 */
1424 if (!HWPoisonHandlable(&folio->page, flags))
1425 return -EBUSY;
1426
1427 if (folio_try_get(folio)) {
1428 if (folio == page_folio(page))
1429 return 1;
1430
1431 pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1432 folio_put(folio);
1433 }
1434
1435 return 0;
1436}
1437
1438static int get_any_page(struct page *p, unsigned long flags)
1439{
1440 int ret = 0, pass = 0;
1441 bool count_increased = false;
1442
1443 if (flags & MF_COUNT_INCREASED)
1444 count_increased = true;
1445
1446try_again:
1447 if (!count_increased) {
1448 ret = __get_hwpoison_page(p, flags);
1449 if (!ret) {
1450 if (page_count(p)) {
1451 /* We raced with an allocation, retry. */
1452 if (pass++ < 3)
1453 goto try_again;
1454 ret = -EBUSY;
1455 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1456 /* We raced with put_page, retry. */
1457 if (pass++ < 3)
1458 goto try_again;
1459 ret = -EIO;
1460 }
1461 goto out;
1462 } else if (ret == -EBUSY) {
1463 /*
1464 * We raced with (possibly temporary) unhandlable
1465 * page, retry.
1466 */
1467 if (pass++ < 3) {
1468 shake_page(p);
1469 goto try_again;
1470 }
1471 ret = -EIO;
1472 goto out;
1473 }
1474 }
1475
1476 if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1477 ret = 1;
1478 } else {
1479 /*
1480 * A page we cannot handle. Check whether we can turn
1481 * it into something we can handle.
1482 */
1483 if (pass++ < 3) {
1484 put_page(p);
1485 shake_page(p);
1486 count_increased = false;
1487 goto try_again;
1488 }
1489 put_page(p);
1490 ret = -EIO;
1491 }
1492out:
1493 if (ret == -EIO)
1494 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1495
1496 return ret;
1497}
1498
1499static int __get_unpoison_page(struct page *page)
1500{
1501 struct folio *folio = page_folio(page);
1502 int ret = 0;
1503 bool hugetlb = false;
1504
1505 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true);
1506 if (hugetlb) {
1507 /* Make sure hugetlb demotion did not happen from under us. */
1508 if (folio == page_folio(page))
1509 return ret;
1510 if (ret > 0)
1511 folio_put(folio);
1512 }
1513
1514 /*
1515 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1516 * but also isolated from buddy freelist, so need to identify the
1517 * state and have to cancel both operations to unpoison.
1518 */
1519 if (PageHWPoisonTakenOff(page))
1520 return -EHWPOISON;
1521
1522 return get_page_unless_zero(page) ? 1 : 0;
1523}
1524
1525/**
1526 * get_hwpoison_page() - Get refcount for memory error handling
1527 * @p: Raw error page (hit by memory error)
1528 * @flags: Flags controlling behavior of error handling
1529 *
1530 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1531 * error on it, after checking that the error page is in a well-defined state
1532 * (defined as a page-type we can successfully handle the memory error on it,
1533 * such as LRU page and hugetlb page).
1534 *
1535 * Memory error handling could be triggered at any time on any type of page,
1536 * so it's prone to race with typical memory management lifecycle (like
1537 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1538 * extra care for the error page's state (as done in __get_hwpoison_page()),
1539 * and has some retry logic in get_any_page().
1540 *
1541 * When called from unpoison_memory(), the caller should already ensure that
1542 * the given page has PG_hwpoison. So it's never reused for other page
1543 * allocations, and __get_unpoison_page() never races with them.
1544 *
1545 * Return: 0 on failure,
1546 * 1 on success for in-use pages in a well-defined state,
1547 * -EIO for pages on which we can not handle memory errors,
1548 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1549 * operations like allocation and free,
1550 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1551 */
1552static int get_hwpoison_page(struct page *p, unsigned long flags)
1553{
1554 int ret;
1555
1556 zone_pcp_disable(page_zone(p));
1557 if (flags & MF_UNPOISON)
1558 ret = __get_unpoison_page(p);
1559 else
1560 ret = get_any_page(p, flags);
1561 zone_pcp_enable(page_zone(p));
1562
1563 return ret;
1564}
1565
1566/*
1567 * Do all that is necessary to remove user space mappings. Unmap
1568 * the pages and send SIGBUS to the processes if the data was dirty.
1569 */
1570static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1571 int flags, struct page *hpage)
1572{
1573 struct folio *folio = page_folio(hpage);
1574 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
1575 struct address_space *mapping;
1576 LIST_HEAD(tokill);
1577 bool unmap_success;
1578 int forcekill;
1579 bool mlocked = PageMlocked(hpage);
1580
1581 /*
1582 * Here we are interested only in user-mapped pages, so skip any
1583 * other types of pages.
1584 */
1585 if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
1586 return true;
1587 if (!(PageLRU(hpage) || PageHuge(p)))
1588 return true;
1589
1590 /*
1591 * This check implies we don't kill processes if their pages
1592 * are in the swap cache early. Those are always late kills.
1593 */
1594 if (!page_mapped(p))
1595 return true;
1596
1597 if (PageSwapCache(p)) {
1598 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1599 ttu &= ~TTU_HWPOISON;
1600 }
1601
1602 /*
1603 * Propagate the dirty bit from PTEs to struct page first, because we
1604 * need this to decide if we should kill or just drop the page.
1605 * XXX: the dirty test could be racy: set_page_dirty() may not always
1606 * be called inside page lock (it's recommended but not enforced).
1607 */
1608 mapping = page_mapping(hpage);
1609 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1610 mapping_can_writeback(mapping)) {
1611 if (page_mkclean(hpage)) {
1612 SetPageDirty(hpage);
1613 } else {
1614 ttu &= ~TTU_HWPOISON;
1615 pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1616 pfn);
1617 }
1618 }
1619
1620 /*
1621 * First collect all the processes that have the page
1622 * mapped in dirty form. This has to be done before try_to_unmap,
1623 * because ttu takes the rmap data structures down.
1624 */
1625 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
1626
1627 if (PageHuge(hpage) && !PageAnon(hpage)) {
1628 /*
1629 * For hugetlb pages in shared mappings, try_to_unmap
1630 * could potentially call huge_pmd_unshare. Because of
1631 * this, take semaphore in write mode here and set
1632 * TTU_RMAP_LOCKED to indicate we have taken the lock
1633 * at this higher level.
1634 */
1635 mapping = hugetlb_page_mapping_lock_write(hpage);
1636 if (mapping) {
1637 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1638 i_mmap_unlock_write(mapping);
1639 } else
1640 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1641 } else {
1642 try_to_unmap(folio, ttu);
1643 }
1644
1645 unmap_success = !page_mapped(p);
1646 if (!unmap_success)
1647 pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1648 pfn, page_mapcount(p));
1649
1650 /*
1651 * try_to_unmap() might put mlocked page in lru cache, so call
1652 * shake_page() again to ensure that it's flushed.
1653 */
1654 if (mlocked)
1655 shake_page(hpage);
1656
1657 /*
1658 * Now that the dirty bit has been propagated to the
1659 * struct page and all unmaps done we can decide if
1660 * killing is needed or not. Only kill when the page
1661 * was dirty or the process is not restartable,
1662 * otherwise the tokill list is merely
1663 * freed. When there was a problem unmapping earlier
1664 * use a more force-full uncatchable kill to prevent
1665 * any accesses to the poisoned memory.
1666 */
1667 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
1668 !unmap_success;
1669 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1670
1671 return unmap_success;
1672}
1673
1674static int identify_page_state(unsigned long pfn, struct page *p,
1675 unsigned long page_flags)
1676{
1677 struct page_state *ps;
1678
1679 /*
1680 * The first check uses the current page flags which may not have any
1681 * relevant information. The second check with the saved page flags is
1682 * carried out only if the first check can't determine the page status.
1683 */
1684 for (ps = error_states;; ps++)
1685 if ((p->flags & ps->mask) == ps->res)
1686 break;
1687
1688 page_flags |= (p->flags & (1UL << PG_dirty));
1689
1690 if (!ps->mask)
1691 for (ps = error_states;; ps++)
1692 if ((page_flags & ps->mask) == ps->res)
1693 break;
1694 return page_action(ps, p, pfn);
1695}
1696
1697static int try_to_split_thp_page(struct page *page)
1698{
1699 int ret;
1700
1701 lock_page(page);
1702 ret = split_huge_page(page);
1703 unlock_page(page);
1704
1705 if (unlikely(ret))
1706 put_page(page);
1707
1708 return ret;
1709}
1710
1711static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1712 struct address_space *mapping, pgoff_t index, int flags)
1713{
1714 struct to_kill *tk;
1715 unsigned long size = 0;
1716
1717 list_for_each_entry(tk, to_kill, nd)
1718 if (tk->size_shift)
1719 size = max(size, 1UL << tk->size_shift);
1720
1721 if (size) {
1722 /*
1723 * Unmap the largest mapping to avoid breaking up device-dax
1724 * mappings which are constant size. The actual size of the
1725 * mapping being torn down is communicated in siginfo, see
1726 * kill_proc()
1727 */
1728 loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
1729
1730 unmap_mapping_range(mapping, start, size, 0);
1731 }
1732
1733 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1734}
1735
1736/*
1737 * Only dev_pagemap pages get here, such as fsdax when the filesystem
1738 * either do not claim or fails to claim a hwpoison event, or devdax.
1739 * The fsdax pages are initialized per base page, and the devdax pages
1740 * could be initialized either as base pages, or as compound pages with
1741 * vmemmap optimization enabled. Devdax is simplistic in its dealing with
1742 * hwpoison, such that, if a subpage of a compound page is poisoned,
1743 * simply mark the compound head page is by far sufficient.
1744 */
1745static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1746 struct dev_pagemap *pgmap)
1747{
1748 struct folio *folio = pfn_folio(pfn);
1749 LIST_HEAD(to_kill);
1750 dax_entry_t cookie;
1751 int rc = 0;
1752
1753 /*
1754 * Prevent the inode from being freed while we are interrogating
1755 * the address_space, typically this would be handled by
1756 * lock_page(), but dax pages do not use the page lock. This
1757 * also prevents changes to the mapping of this pfn until
1758 * poison signaling is complete.
1759 */
1760 cookie = dax_lock_folio(folio);
1761 if (!cookie)
1762 return -EBUSY;
1763
1764 if (hwpoison_filter(&folio->page)) {
1765 rc = -EOPNOTSUPP;
1766 goto unlock;
1767 }
1768
1769 switch (pgmap->type) {
1770 case MEMORY_DEVICE_PRIVATE:
1771 case MEMORY_DEVICE_COHERENT:
1772 /*
1773 * TODO: Handle device pages which may need coordination
1774 * with device-side memory.
1775 */
1776 rc = -ENXIO;
1777 goto unlock;
1778 default:
1779 break;
1780 }
1781
1782 /*
1783 * Use this flag as an indication that the dax page has been
1784 * remapped UC to prevent speculative consumption of poison.
1785 */
1786 SetPageHWPoison(&folio->page);
1787
1788 /*
1789 * Unlike System-RAM there is no possibility to swap in a
1790 * different physical page at a given virtual address, so all
1791 * userspace consumption of ZONE_DEVICE memory necessitates
1792 * SIGBUS (i.e. MF_MUST_KILL)
1793 */
1794 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1795 collect_procs(folio, &folio->page, &to_kill, true);
1796
1797 unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
1798unlock:
1799 dax_unlock_folio(folio, cookie);
1800 return rc;
1801}
1802
1803#ifdef CONFIG_FS_DAX
1804/**
1805 * mf_dax_kill_procs - Collect and kill processes who are using this file range
1806 * @mapping: address_space of the file in use
1807 * @index: start pgoff of the range within the file
1808 * @count: length of the range, in unit of PAGE_SIZE
1809 * @mf_flags: memory failure flags
1810 */
1811int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1812 unsigned long count, int mf_flags)
1813{
1814 LIST_HEAD(to_kill);
1815 dax_entry_t cookie;
1816 struct page *page;
1817 size_t end = index + count;
1818 bool pre_remove = mf_flags & MF_MEM_PRE_REMOVE;
1819
1820 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1821
1822 for (; index < end; index++) {
1823 page = NULL;
1824 cookie = dax_lock_mapping_entry(mapping, index, &page);
1825 if (!cookie)
1826 return -EBUSY;
1827 if (!page)
1828 goto unlock;
1829
1830 if (!pre_remove)
1831 SetPageHWPoison(page);
1832
1833 /*
1834 * The pre_remove case is revoking access, the memory is still
1835 * good and could theoretically be put back into service.
1836 */
1837 collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove);
1838 unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1839 index, mf_flags);
1840unlock:
1841 dax_unlock_mapping_entry(mapping, index, cookie);
1842 }
1843 return 0;
1844}
1845EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1846#endif /* CONFIG_FS_DAX */
1847
1848#ifdef CONFIG_HUGETLB_PAGE
1849
1850/*
1851 * Struct raw_hwp_page represents information about "raw error page",
1852 * constructing singly linked list from ->_hugetlb_hwpoison field of folio.
1853 */
1854struct raw_hwp_page {
1855 struct llist_node node;
1856 struct page *page;
1857};
1858
1859static inline struct llist_head *raw_hwp_list_head(struct folio *folio)
1860{
1861 return (struct llist_head *)&folio->_hugetlb_hwpoison;
1862}
1863
1864bool is_raw_hwpoison_page_in_hugepage(struct page *page)
1865{
1866 struct llist_head *raw_hwp_head;
1867 struct raw_hwp_page *p;
1868 struct folio *folio = page_folio(page);
1869 bool ret = false;
1870
1871 if (!folio_test_hwpoison(folio))
1872 return false;
1873
1874 if (!folio_test_hugetlb(folio))
1875 return PageHWPoison(page);
1876
1877 /*
1878 * When RawHwpUnreliable is set, kernel lost track of which subpages
1879 * are HWPOISON. So return as if ALL subpages are HWPOISONed.
1880 */
1881 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1882 return true;
1883
1884 mutex_lock(&mf_mutex);
1885
1886 raw_hwp_head = raw_hwp_list_head(folio);
1887 llist_for_each_entry(p, raw_hwp_head->first, node) {
1888 if (page == p->page) {
1889 ret = true;
1890 break;
1891 }
1892 }
1893
1894 mutex_unlock(&mf_mutex);
1895
1896 return ret;
1897}
1898
1899static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag)
1900{
1901 struct llist_node *head;
1902 struct raw_hwp_page *p, *next;
1903 unsigned long count = 0;
1904
1905 head = llist_del_all(raw_hwp_list_head(folio));
1906 llist_for_each_entry_safe(p, next, head, node) {
1907 if (move_flag)
1908 SetPageHWPoison(p->page);
1909 else
1910 num_poisoned_pages_sub(page_to_pfn(p->page), 1);
1911 kfree(p);
1912 count++;
1913 }
1914 return count;
1915}
1916
1917static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
1918{
1919 struct llist_head *head;
1920 struct raw_hwp_page *raw_hwp;
1921 struct raw_hwp_page *p, *next;
1922 int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0;
1923
1924 /*
1925 * Once the hwpoison hugepage has lost reliable raw error info,
1926 * there is little meaning to keep additional error info precisely,
1927 * so skip to add additional raw error info.
1928 */
1929 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1930 return -EHWPOISON;
1931 head = raw_hwp_list_head(folio);
1932 llist_for_each_entry_safe(p, next, head->first, node) {
1933 if (p->page == page)
1934 return -EHWPOISON;
1935 }
1936
1937 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
1938 if (raw_hwp) {
1939 raw_hwp->page = page;
1940 llist_add(&raw_hwp->node, head);
1941 /* the first error event will be counted in action_result(). */
1942 if (ret)
1943 num_poisoned_pages_inc(page_to_pfn(page));
1944 } else {
1945 /*
1946 * Failed to save raw error info. We no longer trace all
1947 * hwpoisoned subpages, and we need refuse to free/dissolve
1948 * this hwpoisoned hugepage.
1949 */
1950 folio_set_hugetlb_raw_hwp_unreliable(folio);
1951 /*
1952 * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not
1953 * used any more, so free it.
1954 */
1955 __folio_free_raw_hwp(folio, false);
1956 }
1957 return ret;
1958}
1959
1960static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
1961{
1962 /*
1963 * hugetlb_vmemmap_optimized hugepages can't be freed because struct
1964 * pages for tail pages are required but they don't exist.
1965 */
1966 if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
1967 return 0;
1968
1969 /*
1970 * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
1971 * definition.
1972 */
1973 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1974 return 0;
1975
1976 return __folio_free_raw_hwp(folio, move_flag);
1977}
1978
1979void folio_clear_hugetlb_hwpoison(struct folio *folio)
1980{
1981 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1982 return;
1983 if (folio_test_hugetlb_vmemmap_optimized(folio))
1984 return;
1985 folio_clear_hwpoison(folio);
1986 folio_free_raw_hwp(folio, true);
1987}
1988
1989/*
1990 * Called from hugetlb code with hugetlb_lock held.
1991 *
1992 * Return values:
1993 * 0 - free hugepage
1994 * 1 - in-use hugepage
1995 * 2 - not a hugepage
1996 * -EBUSY - the hugepage is busy (try to retry)
1997 * -EHWPOISON - the hugepage is already hwpoisoned
1998 */
1999int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
2000 bool *migratable_cleared)
2001{
2002 struct page *page = pfn_to_page(pfn);
2003 struct folio *folio = page_folio(page);
2004 int ret = 2; /* fallback to normal page handling */
2005 bool count_increased = false;
2006
2007 if (!folio_test_hugetlb(folio))
2008 goto out;
2009
2010 if (flags & MF_COUNT_INCREASED) {
2011 ret = 1;
2012 count_increased = true;
2013 } else if (folio_test_hugetlb_freed(folio)) {
2014 ret = 0;
2015 } else if (folio_test_hugetlb_migratable(folio)) {
2016 ret = folio_try_get(folio);
2017 if (ret)
2018 count_increased = true;
2019 } else {
2020 ret = -EBUSY;
2021 if (!(flags & MF_NO_RETRY))
2022 goto out;
2023 }
2024
2025 if (folio_set_hugetlb_hwpoison(folio, page)) {
2026 ret = -EHWPOISON;
2027 goto out;
2028 }
2029
2030 /*
2031 * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them
2032 * from being migrated by memory hotremove.
2033 */
2034 if (count_increased && folio_test_hugetlb_migratable(folio)) {
2035 folio_clear_hugetlb_migratable(folio);
2036 *migratable_cleared = true;
2037 }
2038
2039 return ret;
2040out:
2041 if (count_increased)
2042 folio_put(folio);
2043 return ret;
2044}
2045
2046/*
2047 * Taking refcount of hugetlb pages needs extra care about race conditions
2048 * with basic operations like hugepage allocation/free/demotion.
2049 * So some of prechecks for hwpoison (pinning, and testing/setting
2050 * PageHWPoison) should be done in single hugetlb_lock range.
2051 */
2052static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2053{
2054 int res;
2055 struct page *p = pfn_to_page(pfn);
2056 struct folio *folio;
2057 unsigned long page_flags;
2058 bool migratable_cleared = false;
2059
2060 *hugetlb = 1;
2061retry:
2062 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
2063 if (res == 2) { /* fallback to normal page handling */
2064 *hugetlb = 0;
2065 return 0;
2066 } else if (res == -EHWPOISON) {
2067 pr_err("%#lx: already hardware poisoned\n", pfn);
2068 if (flags & MF_ACTION_REQUIRED) {
2069 folio = page_folio(p);
2070 res = kill_accessing_process(current, folio_pfn(folio), flags);
2071 }
2072 return res;
2073 } else if (res == -EBUSY) {
2074 if (!(flags & MF_NO_RETRY)) {
2075 flags |= MF_NO_RETRY;
2076 goto retry;
2077 }
2078 return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2079 }
2080
2081 folio = page_folio(p);
2082 folio_lock(folio);
2083
2084 if (hwpoison_filter(p)) {
2085 folio_clear_hugetlb_hwpoison(folio);
2086 if (migratable_cleared)
2087 folio_set_hugetlb_migratable(folio);
2088 folio_unlock(folio);
2089 if (res == 1)
2090 folio_put(folio);
2091 return -EOPNOTSUPP;
2092 }
2093
2094 /*
2095 * Handling free hugepage. The possible race with hugepage allocation
2096 * or demotion can be prevented by PageHWPoison flag.
2097 */
2098 if (res == 0) {
2099 folio_unlock(folio);
2100 if (__page_handle_poison(p) >= 0) {
2101 page_ref_inc(p);
2102 res = MF_RECOVERED;
2103 } else {
2104 res = MF_FAILED;
2105 }
2106 return action_result(pfn, MF_MSG_FREE_HUGE, res);
2107 }
2108
2109 page_flags = folio->flags;
2110
2111 if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
2112 folio_unlock(folio);
2113 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2114 }
2115
2116 return identify_page_state(pfn, p, page_flags);
2117}
2118
2119#else
2120static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2121{
2122 return 0;
2123}
2124
2125static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
2126{
2127 return 0;
2128}
2129#endif /* CONFIG_HUGETLB_PAGE */
2130
2131/* Drop the extra refcount in case we come from madvise() */
2132static void put_ref_page(unsigned long pfn, int flags)
2133{
2134 struct page *page;
2135
2136 if (!(flags & MF_COUNT_INCREASED))
2137 return;
2138
2139 page = pfn_to_page(pfn);
2140 if (page)
2141 put_page(page);
2142}
2143
2144static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
2145 struct dev_pagemap *pgmap)
2146{
2147 int rc = -ENXIO;
2148
2149 /* device metadata space is not recoverable */
2150 if (!pgmap_pfn_valid(pgmap, pfn))
2151 goto out;
2152
2153 /*
2154 * Call driver's implementation to handle the memory failure, otherwise
2155 * fall back to generic handler.
2156 */
2157 if (pgmap_has_memory_failure(pgmap)) {
2158 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
2159 /*
2160 * Fall back to generic handler too if operation is not
2161 * supported inside the driver/device/filesystem.
2162 */
2163 if (rc != -EOPNOTSUPP)
2164 goto out;
2165 }
2166
2167 rc = mf_generic_kill_procs(pfn, flags, pgmap);
2168out:
2169 /* drop pgmap ref acquired in caller */
2170 put_dev_pagemap(pgmap);
2171 if (rc != -EOPNOTSUPP)
2172 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
2173 return rc;
2174}
2175
2176/**
2177 * memory_failure - Handle memory failure of a page.
2178 * @pfn: Page Number of the corrupted page
2179 * @flags: fine tune action taken
2180 *
2181 * This function is called by the low level machine check code
2182 * of an architecture when it detects hardware memory corruption
2183 * of a page. It tries its best to recover, which includes
2184 * dropping pages, killing processes etc.
2185 *
2186 * The function is primarily of use for corruptions that
2187 * happen outside the current execution context (e.g. when
2188 * detected by a background scrubber)
2189 *
2190 * Must run in process context (e.g. a work queue) with interrupts
2191 * enabled and no spinlocks held.
2192 *
2193 * Return: 0 for successfully handled the memory error,
2194 * -EOPNOTSUPP for hwpoison_filter() filtered the error event,
2195 * < 0(except -EOPNOTSUPP) on failure.
2196 */
2197int memory_failure(unsigned long pfn, int flags)
2198{
2199 struct page *p;
2200 struct page *hpage;
2201 struct dev_pagemap *pgmap;
2202 int res = 0;
2203 unsigned long page_flags;
2204 bool retry = true;
2205 int hugetlb = 0;
2206
2207 if (!sysctl_memory_failure_recovery)
2208 panic("Memory failure on page %lx", pfn);
2209
2210 mutex_lock(&mf_mutex);
2211
2212 if (!(flags & MF_SW_SIMULATED))
2213 hw_memory_failure = true;
2214
2215 p = pfn_to_online_page(pfn);
2216 if (!p) {
2217 res = arch_memory_failure(pfn, flags);
2218 if (res == 0)
2219 goto unlock_mutex;
2220
2221 if (pfn_valid(pfn)) {
2222 pgmap = get_dev_pagemap(pfn, NULL);
2223 put_ref_page(pfn, flags);
2224 if (pgmap) {
2225 res = memory_failure_dev_pagemap(pfn, flags,
2226 pgmap);
2227 goto unlock_mutex;
2228 }
2229 }
2230 pr_err("%#lx: memory outside kernel control\n", pfn);
2231 res = -ENXIO;
2232 goto unlock_mutex;
2233 }
2234
2235try_again:
2236 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2237 if (hugetlb)
2238 goto unlock_mutex;
2239
2240 if (TestSetPageHWPoison(p)) {
2241 pr_err("%#lx: already hardware poisoned\n", pfn);
2242 res = -EHWPOISON;
2243 if (flags & MF_ACTION_REQUIRED)
2244 res = kill_accessing_process(current, pfn, flags);
2245 if (flags & MF_COUNT_INCREASED)
2246 put_page(p);
2247 goto unlock_mutex;
2248 }
2249
2250 /*
2251 * We need/can do nothing about count=0 pages.
2252 * 1) it's a free page, and therefore in safe hand:
2253 * check_new_page() will be the gate keeper.
2254 * 2) it's part of a non-compound high order page.
2255 * Implies some kernel user: cannot stop them from
2256 * R/W the page; let's pray that the page has been
2257 * used and will be freed some time later.
2258 * In fact it's dangerous to directly bump up page count from 0,
2259 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
2260 */
2261 if (!(flags & MF_COUNT_INCREASED)) {
2262 res = get_hwpoison_page(p, flags);
2263 if (!res) {
2264 if (is_free_buddy_page(p)) {
2265 if (take_page_off_buddy(p)) {
2266 page_ref_inc(p);
2267 res = MF_RECOVERED;
2268 } else {
2269 /* We lost the race, try again */
2270 if (retry) {
2271 ClearPageHWPoison(p);
2272 retry = false;
2273 goto try_again;
2274 }
2275 res = MF_FAILED;
2276 }
2277 res = action_result(pfn, MF_MSG_BUDDY, res);
2278 } else {
2279 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
2280 }
2281 goto unlock_mutex;
2282 } else if (res < 0) {
2283 res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2284 goto unlock_mutex;
2285 }
2286 }
2287
2288 hpage = compound_head(p);
2289 if (PageTransHuge(hpage)) {
2290 /*
2291 * The flag must be set after the refcount is bumped
2292 * otherwise it may race with THP split.
2293 * And the flag can't be set in get_hwpoison_page() since
2294 * it is called by soft offline too and it is just called
2295 * for !MF_COUNT_INCREASED. So here seems to be the best
2296 * place.
2297 *
2298 * Don't need care about the above error handling paths for
2299 * get_hwpoison_page() since they handle either free page
2300 * or unhandlable page. The refcount is bumped iff the
2301 * page is a valid handlable page.
2302 */
2303 SetPageHasHWPoisoned(hpage);
2304 if (try_to_split_thp_page(p) < 0) {
2305 res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
2306 goto unlock_mutex;
2307 }
2308 VM_BUG_ON_PAGE(!page_count(p), p);
2309 }
2310
2311 /*
2312 * We ignore non-LRU pages for good reasons.
2313 * - PG_locked is only well defined for LRU pages and a few others
2314 * - to avoid races with __SetPageLocked()
2315 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
2316 * The check (unnecessarily) ignores LRU pages being isolated and
2317 * walked by the page reclaim code, however that's not a big loss.
2318 */
2319 shake_page(p);
2320
2321 lock_page(p);
2322
2323 /*
2324 * We're only intended to deal with the non-Compound page here.
2325 * However, the page could have changed compound pages due to
2326 * race window. If this happens, we could try again to hopefully
2327 * handle the page next round.
2328 */
2329 if (PageCompound(p)) {
2330 if (retry) {
2331 ClearPageHWPoison(p);
2332 unlock_page(p);
2333 put_page(p);
2334 flags &= ~MF_COUNT_INCREASED;
2335 retry = false;
2336 goto try_again;
2337 }
2338 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2339 goto unlock_page;
2340 }
2341
2342 /*
2343 * We use page flags to determine what action should be taken, but
2344 * the flags can be modified by the error containment action. One
2345 * example is an mlocked page, where PG_mlocked is cleared by
2346 * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
2347 * status correctly, we save a copy of the page flags at this time.
2348 */
2349 page_flags = p->flags;
2350
2351 if (hwpoison_filter(p)) {
2352 ClearPageHWPoison(p);
2353 unlock_page(p);
2354 put_page(p);
2355 res = -EOPNOTSUPP;
2356 goto unlock_mutex;
2357 }
2358
2359 /*
2360 * __munlock_folio() may clear a writeback page's LRU flag without
2361 * page_lock. We need wait writeback completion for this page or it
2362 * may trigger vfs BUG while evict inode.
2363 */
2364 if (!PageLRU(p) && !PageWriteback(p))
2365 goto identify_page_state;
2366
2367 /*
2368 * It's very difficult to mess with pages currently under IO
2369 * and in many cases impossible, so we just avoid it here.
2370 */
2371 wait_on_page_writeback(p);
2372
2373 /*
2374 * Now take care of user space mappings.
2375 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2376 */
2377 if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2378 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2379 goto unlock_page;
2380 }
2381
2382 /*
2383 * Torn down by someone else?
2384 */
2385 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
2386 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2387 goto unlock_page;
2388 }
2389
2390identify_page_state:
2391 res = identify_page_state(pfn, p, page_flags);
2392 mutex_unlock(&mf_mutex);
2393 return res;
2394unlock_page:
2395 unlock_page(p);
2396unlock_mutex:
2397 mutex_unlock(&mf_mutex);
2398 return res;
2399}
2400EXPORT_SYMBOL_GPL(memory_failure);
2401
2402#define MEMORY_FAILURE_FIFO_ORDER 4
2403#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
2404
2405struct memory_failure_entry {
2406 unsigned long pfn;
2407 int flags;
2408};
2409
2410struct memory_failure_cpu {
2411 DECLARE_KFIFO(fifo, struct memory_failure_entry,
2412 MEMORY_FAILURE_FIFO_SIZE);
2413 spinlock_t lock;
2414 struct work_struct work;
2415};
2416
2417static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2418
2419/**
2420 * memory_failure_queue - Schedule handling memory failure of a page.
2421 * @pfn: Page Number of the corrupted page
2422 * @flags: Flags for memory failure handling
2423 *
2424 * This function is called by the low level hardware error handler
2425 * when it detects hardware memory corruption of a page. It schedules
2426 * the recovering of error page, including dropping pages, killing
2427 * processes etc.
2428 *
2429 * The function is primarily of use for corruptions that
2430 * happen outside the current execution context (e.g. when
2431 * detected by a background scrubber)
2432 *
2433 * Can run in IRQ context.
2434 */
2435void memory_failure_queue(unsigned long pfn, int flags)
2436{
2437 struct memory_failure_cpu *mf_cpu;
2438 unsigned long proc_flags;
2439 struct memory_failure_entry entry = {
2440 .pfn = pfn,
2441 .flags = flags,
2442 };
2443
2444 mf_cpu = &get_cpu_var(memory_failure_cpu);
2445 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2446 if (kfifo_put(&mf_cpu->fifo, entry))
2447 schedule_work_on(smp_processor_id(), &mf_cpu->work);
2448 else
2449 pr_err("buffer overflow when queuing memory failure at %#lx\n",
2450 pfn);
2451 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2452 put_cpu_var(memory_failure_cpu);
2453}
2454EXPORT_SYMBOL_GPL(memory_failure_queue);
2455
2456static void memory_failure_work_func(struct work_struct *work)
2457{
2458 struct memory_failure_cpu *mf_cpu;
2459 struct memory_failure_entry entry = { 0, };
2460 unsigned long proc_flags;
2461 int gotten;
2462
2463 mf_cpu = container_of(work, struct memory_failure_cpu, work);
2464 for (;;) {
2465 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2466 gotten = kfifo_get(&mf_cpu->fifo, &entry);
2467 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2468 if (!gotten)
2469 break;
2470 if (entry.flags & MF_SOFT_OFFLINE)
2471 soft_offline_page(entry.pfn, entry.flags);
2472 else
2473 memory_failure(entry.pfn, entry.flags);
2474 }
2475}
2476
2477/*
2478 * Process memory_failure work queued on the specified CPU.
2479 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
2480 */
2481void memory_failure_queue_kick(int cpu)
2482{
2483 struct memory_failure_cpu *mf_cpu;
2484
2485 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2486 cancel_work_sync(&mf_cpu->work);
2487 memory_failure_work_func(&mf_cpu->work);
2488}
2489
2490static int __init memory_failure_init(void)
2491{
2492 struct memory_failure_cpu *mf_cpu;
2493 int cpu;
2494
2495 for_each_possible_cpu(cpu) {
2496 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2497 spin_lock_init(&mf_cpu->lock);
2498 INIT_KFIFO(mf_cpu->fifo);
2499 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2500 }
2501
2502 register_sysctl_init("vm", memory_failure_table);
2503
2504 return 0;
2505}
2506core_initcall(memory_failure_init);
2507
2508#undef pr_fmt
2509#define pr_fmt(fmt) "" fmt
2510#define unpoison_pr_info(fmt, pfn, rs) \
2511({ \
2512 if (__ratelimit(rs)) \
2513 pr_info(fmt, pfn); \
2514})
2515
2516/**
2517 * unpoison_memory - Unpoison a previously poisoned page
2518 * @pfn: Page number of the to be unpoisoned page
2519 *
2520 * Software-unpoison a page that has been poisoned by
2521 * memory_failure() earlier.
2522 *
2523 * This is only done on the software-level, so it only works
2524 * for linux injected failures, not real hardware failures
2525 *
2526 * Returns 0 for success, otherwise -errno.
2527 */
2528int unpoison_memory(unsigned long pfn)
2529{
2530 struct folio *folio;
2531 struct page *p;
2532 int ret = -EBUSY, ghp;
2533 unsigned long count = 1;
2534 bool huge = false;
2535 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2536 DEFAULT_RATELIMIT_BURST);
2537
2538 if (!pfn_valid(pfn))
2539 return -ENXIO;
2540
2541 p = pfn_to_page(pfn);
2542 folio = page_folio(p);
2543
2544 mutex_lock(&mf_mutex);
2545
2546 if (hw_memory_failure) {
2547 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
2548 pfn, &unpoison_rs);
2549 ret = -EOPNOTSUPP;
2550 goto unlock_mutex;
2551 }
2552
2553 if (!PageHWPoison(p)) {
2554 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
2555 pfn, &unpoison_rs);
2556 goto unlock_mutex;
2557 }
2558
2559 if (folio_ref_count(folio) > 1) {
2560 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2561 pfn, &unpoison_rs);
2562 goto unlock_mutex;
2563 }
2564
2565 if (folio_test_slab(folio) || PageTable(&folio->page) ||
2566 folio_test_reserved(folio) || PageOffline(&folio->page))
2567 goto unlock_mutex;
2568
2569 /*
2570 * Note that folio->_mapcount is overloaded in SLAB, so the simple test
2571 * in folio_mapped() has to be done after folio_test_slab() is checked.
2572 */
2573 if (folio_mapped(folio)) {
2574 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2575 pfn, &unpoison_rs);
2576 goto unlock_mutex;
2577 }
2578
2579 if (folio_mapping(folio)) {
2580 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2581 pfn, &unpoison_rs);
2582 goto unlock_mutex;
2583 }
2584
2585 ghp = get_hwpoison_page(p, MF_UNPOISON);
2586 if (!ghp) {
2587 if (PageHuge(p)) {
2588 huge = true;
2589 count = folio_free_raw_hwp(folio, false);
2590 if (count == 0)
2591 goto unlock_mutex;
2592 }
2593 ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY;
2594 } else if (ghp < 0) {
2595 if (ghp == -EHWPOISON) {
2596 ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2597 } else {
2598 ret = ghp;
2599 unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2600 pfn, &unpoison_rs);
2601 }
2602 } else {
2603 if (PageHuge(p)) {
2604 huge = true;
2605 count = folio_free_raw_hwp(folio, false);
2606 if (count == 0) {
2607 folio_put(folio);
2608 goto unlock_mutex;
2609 }
2610 }
2611
2612 folio_put(folio);
2613 if (TestClearPageHWPoison(p)) {
2614 folio_put(folio);
2615 ret = 0;
2616 }
2617 }
2618
2619unlock_mutex:
2620 mutex_unlock(&mf_mutex);
2621 if (!ret) {
2622 if (!huge)
2623 num_poisoned_pages_sub(pfn, 1);
2624 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2625 page_to_pfn(p), &unpoison_rs);
2626 }
2627 return ret;
2628}
2629EXPORT_SYMBOL(unpoison_memory);
2630
2631static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
2632{
2633 bool isolated = false;
2634
2635 if (folio_test_hugetlb(folio)) {
2636 isolated = isolate_hugetlb(folio, pagelist);
2637 } else {
2638 bool lru = !__folio_test_movable(folio);
2639
2640 if (lru)
2641 isolated = folio_isolate_lru(folio);
2642 else
2643 isolated = isolate_movable_page(&folio->page,
2644 ISOLATE_UNEVICTABLE);
2645
2646 if (isolated) {
2647 list_add(&folio->lru, pagelist);
2648 if (lru)
2649 node_stat_add_folio(folio, NR_ISOLATED_ANON +
2650 folio_is_file_lru(folio));
2651 }
2652 }
2653
2654 /*
2655 * If we succeed to isolate the folio, we grabbed another refcount on
2656 * the folio, so we can safely drop the one we got from get_any_page().
2657 * If we failed to isolate the folio, it means that we cannot go further
2658 * and we will return an error, so drop the reference we got from
2659 * get_any_page() as well.
2660 */
2661 folio_put(folio);
2662 return isolated;
2663}
2664
2665/*
2666 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
2667 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2668 * If the page is mapped, it migrates the contents over.
2669 */
2670static int soft_offline_in_use_page(struct page *page)
2671{
2672 long ret = 0;
2673 unsigned long pfn = page_to_pfn(page);
2674 struct folio *folio = page_folio(page);
2675 char const *msg_page[] = {"page", "hugepage"};
2676 bool huge = folio_test_hugetlb(folio);
2677 LIST_HEAD(pagelist);
2678 struct migration_target_control mtc = {
2679 .nid = NUMA_NO_NODE,
2680 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2681 };
2682
2683 if (!huge && folio_test_large(folio)) {
2684 if (try_to_split_thp_page(page)) {
2685 pr_info("soft offline: %#lx: thp split failed\n", pfn);
2686 return -EBUSY;
2687 }
2688 folio = page_folio(page);
2689 }
2690
2691 folio_lock(folio);
2692 if (!huge)
2693 folio_wait_writeback(folio);
2694 if (PageHWPoison(page)) {
2695 folio_unlock(folio);
2696 folio_put(folio);
2697 pr_info("soft offline: %#lx page already poisoned\n", pfn);
2698 return 0;
2699 }
2700
2701 if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
2702 /*
2703 * Try to invalidate first. This should work for
2704 * non dirty unmapped page cache pages.
2705 */
2706 ret = mapping_evict_folio(folio_mapping(folio), folio);
2707 folio_unlock(folio);
2708
2709 if (ret) {
2710 pr_info("soft_offline: %#lx: invalidated\n", pfn);
2711 page_handle_poison(page, false, true);
2712 return 0;
2713 }
2714
2715 if (mf_isolate_folio(folio, &pagelist)) {
2716 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2717 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2718 if (!ret) {
2719 bool release = !huge;
2720
2721 if (!page_handle_poison(page, huge, release))
2722 ret = -EBUSY;
2723 } else {
2724 if (!list_empty(&pagelist))
2725 putback_movable_pages(&pagelist);
2726
2727 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
2728 pfn, msg_page[huge], ret, &page->flags);
2729 if (ret > 0)
2730 ret = -EBUSY;
2731 }
2732 } else {
2733 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2734 pfn, msg_page[huge], page_count(page), &page->flags);
2735 ret = -EBUSY;
2736 }
2737 return ret;
2738}
2739
2740/**
2741 * soft_offline_page - Soft offline a page.
2742 * @pfn: pfn to soft-offline
2743 * @flags: flags. Same as memory_failure().
2744 *
2745 * Returns 0 on success
2746 * -EOPNOTSUPP for hwpoison_filter() filtered the error event
2747 * < 0 otherwise negated errno.
2748 *
2749 * Soft offline a page, by migration or invalidation,
2750 * without killing anything. This is for the case when
2751 * a page is not corrupted yet (so it's still valid to access),
2752 * but has had a number of corrected errors and is better taken
2753 * out.
2754 *
2755 * The actual policy on when to do that is maintained by
2756 * user space.
2757 *
2758 * This should never impact any application or cause data loss,
2759 * however it might take some time.
2760 *
2761 * This is not a 100% solution for all memory, but tries to be
2762 * ``good enough'' for the majority of memory.
2763 */
2764int soft_offline_page(unsigned long pfn, int flags)
2765{
2766 int ret;
2767 bool try_again = true;
2768 struct page *page;
2769
2770 if (!pfn_valid(pfn)) {
2771 WARN_ON_ONCE(flags & MF_COUNT_INCREASED);
2772 return -ENXIO;
2773 }
2774
2775 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2776 page = pfn_to_online_page(pfn);
2777 if (!page) {
2778 put_ref_page(pfn, flags);
2779 return -EIO;
2780 }
2781
2782 mutex_lock(&mf_mutex);
2783
2784 if (PageHWPoison(page)) {
2785 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2786 put_ref_page(pfn, flags);
2787 mutex_unlock(&mf_mutex);
2788 return 0;
2789 }
2790
2791retry:
2792 get_online_mems();
2793 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2794 put_online_mems();
2795
2796 if (hwpoison_filter(page)) {
2797 if (ret > 0)
2798 put_page(page);
2799
2800 mutex_unlock(&mf_mutex);
2801 return -EOPNOTSUPP;
2802 }
2803
2804 if (ret > 0) {
2805 ret = soft_offline_in_use_page(page);
2806 } else if (ret == 0) {
2807 if (!page_handle_poison(page, true, false)) {
2808 if (try_again) {
2809 try_again = false;
2810 flags &= ~MF_COUNT_INCREASED;
2811 goto retry;
2812 }
2813 ret = -EBUSY;
2814 }
2815 }
2816
2817 mutex_unlock(&mf_mutex);
2818
2819 return ret;
2820}