Loading...
1/*
2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
4 *
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
8 *
9 * High level machine check handler. Handles pages reported by the
10 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
11 * failure.
12 *
13 * In addition there is a "soft offline" entry point that allows stop using
14 * not-yet-corrupted-by-suspicious pages without killing anything.
15 *
16 * Handles page cache pages in various states. The tricky part
17 * here is that we can access any page asynchronously in respect to
18 * other VM users, because memory failures could happen anytime and
19 * anywhere. This could violate some of their assumptions. This is why
20 * this code has to be extremely careful. Generally it tries to use
21 * normal locking rules, as in get the standard locks, even if that means
22 * the error handling takes potentially a long time.
23 *
24 * It can be very tempting to add handling for obscure cases here.
25 * In general any code for handling new cases should only be added iff:
26 * - You know how to test it.
27 * - You have a test that can be added to mce-test
28 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
29 * - The case actually shows up as a frequent (top 10) page state in
30 * tools/vm/page-types when running a real workload.
31 *
32 * There are several operations here with exponential complexity because
33 * of unsuitable VM data structures. For example the operation to map back
34 * from RMAP chains to processes has to walk the complete process list and
35 * has non linear complexity with the number. But since memory corruptions
36 * are rare we hope to get away with this. This avoids impacting the core
37 * VM.
38 */
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
42#include <linux/kernel-page-flags.h>
43#include <linux/sched/signal.h>
44#include <linux/sched/task.h>
45#include <linux/ksm.h>
46#include <linux/rmap.h>
47#include <linux/export.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/backing-dev.h>
51#include <linux/migrate.h>
52#include <linux/suspend.h>
53#include <linux/slab.h>
54#include <linux/swapops.h>
55#include <linux/hugetlb.h>
56#include <linux/memory_hotplug.h>
57#include <linux/mm_inline.h>
58#include <linux/kfifo.h>
59#include <linux/ratelimit.h>
60#include "internal.h"
61#include "ras/ras_event.h"
62
63int sysctl_memory_failure_early_kill __read_mostly = 0;
64
65int sysctl_memory_failure_recovery __read_mostly = 1;
66
67atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
68
69#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70
71u32 hwpoison_filter_enable = 0;
72u32 hwpoison_filter_dev_major = ~0U;
73u32 hwpoison_filter_dev_minor = ~0U;
74u64 hwpoison_filter_flags_mask;
75u64 hwpoison_filter_flags_value;
76EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
77EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
78EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
79EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
80EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
81
82static int hwpoison_filter_dev(struct page *p)
83{
84 struct address_space *mapping;
85 dev_t dev;
86
87 if (hwpoison_filter_dev_major == ~0U &&
88 hwpoison_filter_dev_minor == ~0U)
89 return 0;
90
91 /*
92 * page_mapping() does not accept slab pages.
93 */
94 if (PageSlab(p))
95 return -EINVAL;
96
97 mapping = page_mapping(p);
98 if (mapping == NULL || mapping->host == NULL)
99 return -EINVAL;
100
101 dev = mapping->host->i_sb->s_dev;
102 if (hwpoison_filter_dev_major != ~0U &&
103 hwpoison_filter_dev_major != MAJOR(dev))
104 return -EINVAL;
105 if (hwpoison_filter_dev_minor != ~0U &&
106 hwpoison_filter_dev_minor != MINOR(dev))
107 return -EINVAL;
108
109 return 0;
110}
111
112static int hwpoison_filter_flags(struct page *p)
113{
114 if (!hwpoison_filter_flags_mask)
115 return 0;
116
117 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
118 hwpoison_filter_flags_value)
119 return 0;
120 else
121 return -EINVAL;
122}
123
124/*
125 * This allows stress tests to limit test scope to a collection of tasks
126 * by putting them under some memcg. This prevents killing unrelated/important
127 * processes such as /sbin/init. Note that the target task may share clean
128 * pages with init (eg. libc text), which is harmless. If the target task
129 * share _dirty_ pages with another task B, the test scheme must make sure B
130 * is also included in the memcg. At last, due to race conditions this filter
131 * can only guarantee that the page either belongs to the memcg tasks, or is
132 * a freed page.
133 */
134#ifdef CONFIG_MEMCG
135u64 hwpoison_filter_memcg;
136EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
137static int hwpoison_filter_task(struct page *p)
138{
139 if (!hwpoison_filter_memcg)
140 return 0;
141
142 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
143 return -EINVAL;
144
145 return 0;
146}
147#else
148static int hwpoison_filter_task(struct page *p) { return 0; }
149#endif
150
151int hwpoison_filter(struct page *p)
152{
153 if (!hwpoison_filter_enable)
154 return 0;
155
156 if (hwpoison_filter_dev(p))
157 return -EINVAL;
158
159 if (hwpoison_filter_flags(p))
160 return -EINVAL;
161
162 if (hwpoison_filter_task(p))
163 return -EINVAL;
164
165 return 0;
166}
167#else
168int hwpoison_filter(struct page *p)
169{
170 return 0;
171}
172#endif
173
174EXPORT_SYMBOL_GPL(hwpoison_filter);
175
176/*
177 * Send all the processes who have the page mapped a signal.
178 * ``action optional'' if they are not immediately affected by the error
179 * ``action required'' if error happened in current execution context
180 */
181static int kill_proc(struct task_struct *t, unsigned long addr,
182 unsigned long pfn, struct page *page, int flags)
183{
184 short addr_lsb;
185 int ret;
186
187 pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
188 pfn, t->comm, t->pid);
189 addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
190
191 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
192 ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr,
193 addr_lsb, current);
194 } else {
195 /*
196 * Don't use force here, it's convenient if the signal
197 * can be temporarily blocked.
198 * This could cause a loop when the user sets SIGBUS
199 * to SIG_IGN, but hopefully no one will do that?
200 */
201 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)addr,
202 addr_lsb, t); /* synchronous? */
203 }
204 if (ret < 0)
205 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
206 t->comm, t->pid, ret);
207 return ret;
208}
209
210/*
211 * When a unknown page type is encountered drain as many buffers as possible
212 * in the hope to turn the page into a LRU or free page, which we can handle.
213 */
214void shake_page(struct page *p, int access)
215{
216 if (PageHuge(p))
217 return;
218
219 if (!PageSlab(p)) {
220 lru_add_drain_all();
221 if (PageLRU(p))
222 return;
223 drain_all_pages(page_zone(p));
224 if (PageLRU(p) || is_free_buddy_page(p))
225 return;
226 }
227
228 /*
229 * Only call shrink_node_slabs here (which would also shrink
230 * other caches) if access is not potentially fatal.
231 */
232 if (access)
233 drop_slab_node(page_to_nid(p));
234}
235EXPORT_SYMBOL_GPL(shake_page);
236
237/*
238 * Kill all processes that have a poisoned page mapped and then isolate
239 * the page.
240 *
241 * General strategy:
242 * Find all processes having the page mapped and kill them.
243 * But we keep a page reference around so that the page is not
244 * actually freed yet.
245 * Then stash the page away
246 *
247 * There's no convenient way to get back to mapped processes
248 * from the VMAs. So do a brute-force search over all
249 * running processes.
250 *
251 * Remember that machine checks are not common (or rather
252 * if they are common you have other problems), so this shouldn't
253 * be a performance issue.
254 *
255 * Also there are some races possible while we get from the
256 * error detection to actually handle it.
257 */
258
259struct to_kill {
260 struct list_head nd;
261 struct task_struct *tsk;
262 unsigned long addr;
263 char addr_valid;
264};
265
266/*
267 * Failure handling: if we can't find or can't kill a process there's
268 * not much we can do. We just print a message and ignore otherwise.
269 */
270
271/*
272 * Schedule a process for later kill.
273 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
274 * TBD would GFP_NOIO be enough?
275 */
276static void add_to_kill(struct task_struct *tsk, struct page *p,
277 struct vm_area_struct *vma,
278 struct list_head *to_kill,
279 struct to_kill **tkc)
280{
281 struct to_kill *tk;
282
283 if (*tkc) {
284 tk = *tkc;
285 *tkc = NULL;
286 } else {
287 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
288 if (!tk) {
289 pr_err("Memory failure: Out of memory while machine check handling\n");
290 return;
291 }
292 }
293 tk->addr = page_address_in_vma(p, vma);
294 tk->addr_valid = 1;
295
296 /*
297 * In theory we don't have to kill when the page was
298 * munmaped. But it could be also a mremap. Since that's
299 * likely very rare kill anyways just out of paranoia, but use
300 * a SIGKILL because the error is not contained anymore.
301 */
302 if (tk->addr == -EFAULT) {
303 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
304 page_to_pfn(p), tsk->comm);
305 tk->addr_valid = 0;
306 }
307 get_task_struct(tsk);
308 tk->tsk = tsk;
309 list_add_tail(&tk->nd, to_kill);
310}
311
312/*
313 * Kill the processes that have been collected earlier.
314 *
315 * Only do anything when DOIT is set, otherwise just free the list
316 * (this is used for clean pages which do not need killing)
317 * Also when FAIL is set do a force kill because something went
318 * wrong earlier.
319 */
320static void kill_procs(struct list_head *to_kill, int forcekill,
321 bool fail, struct page *page, unsigned long pfn,
322 int flags)
323{
324 struct to_kill *tk, *next;
325
326 list_for_each_entry_safe (tk, next, to_kill, nd) {
327 if (forcekill) {
328 /*
329 * In case something went wrong with munmapping
330 * make sure the process doesn't catch the
331 * signal and then access the memory. Just kill it.
332 */
333 if (fail || tk->addr_valid == 0) {
334 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
335 pfn, tk->tsk->comm, tk->tsk->pid);
336 force_sig(SIGKILL, tk->tsk);
337 }
338
339 /*
340 * In theory the process could have mapped
341 * something else on the address in-between. We could
342 * check for that, but we need to tell the
343 * process anyways.
344 */
345 else if (kill_proc(tk->tsk, tk->addr,
346 pfn, page, flags) < 0)
347 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
348 pfn, tk->tsk->comm, tk->tsk->pid);
349 }
350 put_task_struct(tk->tsk);
351 kfree(tk);
352 }
353}
354
355/*
356 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
357 * on behalf of the thread group. Return task_struct of the (first found)
358 * dedicated thread if found, and return NULL otherwise.
359 *
360 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
361 * have to call rcu_read_lock/unlock() in this function.
362 */
363static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
364{
365 struct task_struct *t;
366
367 for_each_thread(tsk, t)
368 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
369 return t;
370 return NULL;
371}
372
373/*
374 * Determine whether a given process is "early kill" process which expects
375 * to be signaled when some page under the process is hwpoisoned.
376 * Return task_struct of the dedicated thread (main thread unless explicitly
377 * specified) if the process is "early kill," and otherwise returns NULL.
378 */
379static struct task_struct *task_early_kill(struct task_struct *tsk,
380 int force_early)
381{
382 struct task_struct *t;
383 if (!tsk->mm)
384 return NULL;
385 if (force_early)
386 return tsk;
387 t = find_early_kill_thread(tsk);
388 if (t)
389 return t;
390 if (sysctl_memory_failure_early_kill)
391 return tsk;
392 return NULL;
393}
394
395/*
396 * Collect processes when the error hit an anonymous page.
397 */
398static void collect_procs_anon(struct page *page, struct list_head *to_kill,
399 struct to_kill **tkc, int force_early)
400{
401 struct vm_area_struct *vma;
402 struct task_struct *tsk;
403 struct anon_vma *av;
404 pgoff_t pgoff;
405
406 av = page_lock_anon_vma_read(page);
407 if (av == NULL) /* Not actually mapped anymore */
408 return;
409
410 pgoff = page_to_pgoff(page);
411 read_lock(&tasklist_lock);
412 for_each_process (tsk) {
413 struct anon_vma_chain *vmac;
414 struct task_struct *t = task_early_kill(tsk, force_early);
415
416 if (!t)
417 continue;
418 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
419 pgoff, pgoff) {
420 vma = vmac->vma;
421 if (!page_mapped_in_vma(page, vma))
422 continue;
423 if (vma->vm_mm == t->mm)
424 add_to_kill(t, page, vma, to_kill, tkc);
425 }
426 }
427 read_unlock(&tasklist_lock);
428 page_unlock_anon_vma_read(av);
429}
430
431/*
432 * Collect processes when the error hit a file mapped page.
433 */
434static void collect_procs_file(struct page *page, struct list_head *to_kill,
435 struct to_kill **tkc, int force_early)
436{
437 struct vm_area_struct *vma;
438 struct task_struct *tsk;
439 struct address_space *mapping = page->mapping;
440
441 i_mmap_lock_read(mapping);
442 read_lock(&tasklist_lock);
443 for_each_process(tsk) {
444 pgoff_t pgoff = page_to_pgoff(page);
445 struct task_struct *t = task_early_kill(tsk, force_early);
446
447 if (!t)
448 continue;
449 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
450 pgoff) {
451 /*
452 * Send early kill signal to tasks where a vma covers
453 * the page but the corrupted page is not necessarily
454 * mapped it in its pte.
455 * Assume applications who requested early kill want
456 * to be informed of all such data corruptions.
457 */
458 if (vma->vm_mm == t->mm)
459 add_to_kill(t, page, vma, to_kill, tkc);
460 }
461 }
462 read_unlock(&tasklist_lock);
463 i_mmap_unlock_read(mapping);
464}
465
466/*
467 * Collect the processes who have the corrupted page mapped to kill.
468 * This is done in two steps for locking reasons.
469 * First preallocate one tokill structure outside the spin locks,
470 * so that we can kill at least one process reasonably reliable.
471 */
472static void collect_procs(struct page *page, struct list_head *tokill,
473 int force_early)
474{
475 struct to_kill *tk;
476
477 if (!page->mapping)
478 return;
479
480 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
481 if (!tk)
482 return;
483 if (PageAnon(page))
484 collect_procs_anon(page, tokill, &tk, force_early);
485 else
486 collect_procs_file(page, tokill, &tk, force_early);
487 kfree(tk);
488}
489
490static const char *action_name[] = {
491 [MF_IGNORED] = "Ignored",
492 [MF_FAILED] = "Failed",
493 [MF_DELAYED] = "Delayed",
494 [MF_RECOVERED] = "Recovered",
495};
496
497static const char * const action_page_types[] = {
498 [MF_MSG_KERNEL] = "reserved kernel page",
499 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
500 [MF_MSG_SLAB] = "kernel slab page",
501 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
502 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
503 [MF_MSG_HUGE] = "huge page",
504 [MF_MSG_FREE_HUGE] = "free huge page",
505 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
506 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
507 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
508 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
509 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
510 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
511 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
512 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
513 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
514 [MF_MSG_CLEAN_LRU] = "clean LRU page",
515 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
516 [MF_MSG_BUDDY] = "free buddy page",
517 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
518 [MF_MSG_UNKNOWN] = "unknown page",
519};
520
521/*
522 * XXX: It is possible that a page is isolated from LRU cache,
523 * and then kept in swap cache or failed to remove from page cache.
524 * The page count will stop it from being freed by unpoison.
525 * Stress tests should be aware of this memory leak problem.
526 */
527static int delete_from_lru_cache(struct page *p)
528{
529 if (!isolate_lru_page(p)) {
530 /*
531 * Clear sensible page flags, so that the buddy system won't
532 * complain when the page is unpoison-and-freed.
533 */
534 ClearPageActive(p);
535 ClearPageUnevictable(p);
536
537 /*
538 * Poisoned page might never drop its ref count to 0 so we have
539 * to uncharge it manually from its memcg.
540 */
541 mem_cgroup_uncharge(p);
542
543 /*
544 * drop the page count elevated by isolate_lru_page()
545 */
546 put_page(p);
547 return 0;
548 }
549 return -EIO;
550}
551
552static int truncate_error_page(struct page *p, unsigned long pfn,
553 struct address_space *mapping)
554{
555 int ret = MF_FAILED;
556
557 if (mapping->a_ops->error_remove_page) {
558 int err = mapping->a_ops->error_remove_page(mapping, p);
559
560 if (err != 0) {
561 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
562 pfn, err);
563 } else if (page_has_private(p) &&
564 !try_to_release_page(p, GFP_NOIO)) {
565 pr_info("Memory failure: %#lx: failed to release buffers\n",
566 pfn);
567 } else {
568 ret = MF_RECOVERED;
569 }
570 } else {
571 /*
572 * If the file system doesn't support it just invalidate
573 * This fails on dirty or anything with private pages
574 */
575 if (invalidate_inode_page(p))
576 ret = MF_RECOVERED;
577 else
578 pr_info("Memory failure: %#lx: Failed to invalidate\n",
579 pfn);
580 }
581
582 return ret;
583}
584
585/*
586 * Error hit kernel page.
587 * Do nothing, try to be lucky and not touch this instead. For a few cases we
588 * could be more sophisticated.
589 */
590static int me_kernel(struct page *p, unsigned long pfn)
591{
592 return MF_IGNORED;
593}
594
595/*
596 * Page in unknown state. Do nothing.
597 */
598static int me_unknown(struct page *p, unsigned long pfn)
599{
600 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
601 return MF_FAILED;
602}
603
604/*
605 * Clean (or cleaned) page cache page.
606 */
607static int me_pagecache_clean(struct page *p, unsigned long pfn)
608{
609 struct address_space *mapping;
610
611 delete_from_lru_cache(p);
612
613 /*
614 * For anonymous pages we're done the only reference left
615 * should be the one m_f() holds.
616 */
617 if (PageAnon(p))
618 return MF_RECOVERED;
619
620 /*
621 * Now truncate the page in the page cache. This is really
622 * more like a "temporary hole punch"
623 * Don't do this for block devices when someone else
624 * has a reference, because it could be file system metadata
625 * and that's not safe to truncate.
626 */
627 mapping = page_mapping(p);
628 if (!mapping) {
629 /*
630 * Page has been teared down in the meanwhile
631 */
632 return MF_FAILED;
633 }
634
635 /*
636 * Truncation is a bit tricky. Enable it per file system for now.
637 *
638 * Open: to take i_mutex or not for this? Right now we don't.
639 */
640 return truncate_error_page(p, pfn, mapping);
641}
642
643/*
644 * Dirty pagecache page
645 * Issues: when the error hit a hole page the error is not properly
646 * propagated.
647 */
648static int me_pagecache_dirty(struct page *p, unsigned long pfn)
649{
650 struct address_space *mapping = page_mapping(p);
651
652 SetPageError(p);
653 /* TBD: print more information about the file. */
654 if (mapping) {
655 /*
656 * IO error will be reported by write(), fsync(), etc.
657 * who check the mapping.
658 * This way the application knows that something went
659 * wrong with its dirty file data.
660 *
661 * There's one open issue:
662 *
663 * The EIO will be only reported on the next IO
664 * operation and then cleared through the IO map.
665 * Normally Linux has two mechanisms to pass IO error
666 * first through the AS_EIO flag in the address space
667 * and then through the PageError flag in the page.
668 * Since we drop pages on memory failure handling the
669 * only mechanism open to use is through AS_AIO.
670 *
671 * This has the disadvantage that it gets cleared on
672 * the first operation that returns an error, while
673 * the PageError bit is more sticky and only cleared
674 * when the page is reread or dropped. If an
675 * application assumes it will always get error on
676 * fsync, but does other operations on the fd before
677 * and the page is dropped between then the error
678 * will not be properly reported.
679 *
680 * This can already happen even without hwpoisoned
681 * pages: first on metadata IO errors (which only
682 * report through AS_EIO) or when the page is dropped
683 * at the wrong time.
684 *
685 * So right now we assume that the application DTRT on
686 * the first EIO, but we're not worse than other parts
687 * of the kernel.
688 */
689 mapping_set_error(mapping, -EIO);
690 }
691
692 return me_pagecache_clean(p, pfn);
693}
694
695/*
696 * Clean and dirty swap cache.
697 *
698 * Dirty swap cache page is tricky to handle. The page could live both in page
699 * cache and swap cache(ie. page is freshly swapped in). So it could be
700 * referenced concurrently by 2 types of PTEs:
701 * normal PTEs and swap PTEs. We try to handle them consistently by calling
702 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
703 * and then
704 * - clear dirty bit to prevent IO
705 * - remove from LRU
706 * - but keep in the swap cache, so that when we return to it on
707 * a later page fault, we know the application is accessing
708 * corrupted data and shall be killed (we installed simple
709 * interception code in do_swap_page to catch it).
710 *
711 * Clean swap cache pages can be directly isolated. A later page fault will
712 * bring in the known good data from disk.
713 */
714static int me_swapcache_dirty(struct page *p, unsigned long pfn)
715{
716 ClearPageDirty(p);
717 /* Trigger EIO in shmem: */
718 ClearPageUptodate(p);
719
720 if (!delete_from_lru_cache(p))
721 return MF_DELAYED;
722 else
723 return MF_FAILED;
724}
725
726static int me_swapcache_clean(struct page *p, unsigned long pfn)
727{
728 delete_from_swap_cache(p);
729
730 if (!delete_from_lru_cache(p))
731 return MF_RECOVERED;
732 else
733 return MF_FAILED;
734}
735
736/*
737 * Huge pages. Needs work.
738 * Issues:
739 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
740 * To narrow down kill region to one page, we need to break up pmd.
741 */
742static int me_huge_page(struct page *p, unsigned long pfn)
743{
744 int res = 0;
745 struct page *hpage = compound_head(p);
746 struct address_space *mapping;
747
748 if (!PageHuge(hpage))
749 return MF_DELAYED;
750
751 mapping = page_mapping(hpage);
752 if (mapping) {
753 res = truncate_error_page(hpage, pfn, mapping);
754 } else {
755 unlock_page(hpage);
756 /*
757 * migration entry prevents later access on error anonymous
758 * hugepage, so we can free and dissolve it into buddy to
759 * save healthy subpages.
760 */
761 if (PageAnon(hpage))
762 put_page(hpage);
763 dissolve_free_huge_page(p);
764 res = MF_RECOVERED;
765 lock_page(hpage);
766 }
767
768 return res;
769}
770
771/*
772 * Various page states we can handle.
773 *
774 * A page state is defined by its current page->flags bits.
775 * The table matches them in order and calls the right handler.
776 *
777 * This is quite tricky because we can access page at any time
778 * in its live cycle, so all accesses have to be extremely careful.
779 *
780 * This is not complete. More states could be added.
781 * For any missing state don't attempt recovery.
782 */
783
784#define dirty (1UL << PG_dirty)
785#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
786#define unevict (1UL << PG_unevictable)
787#define mlock (1UL << PG_mlocked)
788#define writeback (1UL << PG_writeback)
789#define lru (1UL << PG_lru)
790#define head (1UL << PG_head)
791#define slab (1UL << PG_slab)
792#define reserved (1UL << PG_reserved)
793
794static struct page_state {
795 unsigned long mask;
796 unsigned long res;
797 enum mf_action_page_type type;
798 int (*action)(struct page *p, unsigned long pfn);
799} error_states[] = {
800 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
801 /*
802 * free pages are specially detected outside this table:
803 * PG_buddy pages only make a small fraction of all free pages.
804 */
805
806 /*
807 * Could in theory check if slab page is free or if we can drop
808 * currently unused objects without touching them. But just
809 * treat it as standard kernel for now.
810 */
811 { slab, slab, MF_MSG_SLAB, me_kernel },
812
813 { head, head, MF_MSG_HUGE, me_huge_page },
814
815 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
816 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
817
818 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
819 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
820
821 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
822 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
823
824 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
825 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
826
827 /*
828 * Catchall entry: must be at end.
829 */
830 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
831};
832
833#undef dirty
834#undef sc
835#undef unevict
836#undef mlock
837#undef writeback
838#undef lru
839#undef head
840#undef slab
841#undef reserved
842
843/*
844 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
845 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
846 */
847static void action_result(unsigned long pfn, enum mf_action_page_type type,
848 enum mf_result result)
849{
850 trace_memory_failure_event(pfn, type, result);
851
852 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
853 pfn, action_page_types[type], action_name[result]);
854}
855
856static int page_action(struct page_state *ps, struct page *p,
857 unsigned long pfn)
858{
859 int result;
860 int count;
861
862 result = ps->action(p, pfn);
863
864 count = page_count(p) - 1;
865 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
866 count--;
867 if (count > 0) {
868 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
869 pfn, action_page_types[ps->type], count);
870 result = MF_FAILED;
871 }
872 action_result(pfn, ps->type, result);
873
874 /* Could do more checks here if page looks ok */
875 /*
876 * Could adjust zone counters here to correct for the missing page.
877 */
878
879 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
880}
881
882/**
883 * get_hwpoison_page() - Get refcount for memory error handling:
884 * @page: raw error page (hit by memory error)
885 *
886 * Return: return 0 if failed to grab the refcount, otherwise true (some
887 * non-zero value.)
888 */
889int get_hwpoison_page(struct page *page)
890{
891 struct page *head = compound_head(page);
892
893 if (!PageHuge(head) && PageTransHuge(head)) {
894 /*
895 * Non anonymous thp exists only in allocation/free time. We
896 * can't handle such a case correctly, so let's give it up.
897 * This should be better than triggering BUG_ON when kernel
898 * tries to touch the "partially handled" page.
899 */
900 if (!PageAnon(head)) {
901 pr_err("Memory failure: %#lx: non anonymous thp\n",
902 page_to_pfn(page));
903 return 0;
904 }
905 }
906
907 if (get_page_unless_zero(head)) {
908 if (head == compound_head(page))
909 return 1;
910
911 pr_info("Memory failure: %#lx cannot catch tail\n",
912 page_to_pfn(page));
913 put_page(head);
914 }
915
916 return 0;
917}
918EXPORT_SYMBOL_GPL(get_hwpoison_page);
919
920/*
921 * Do all that is necessary to remove user space mappings. Unmap
922 * the pages and send SIGBUS to the processes if the data was dirty.
923 */
924static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
925 int flags, struct page **hpagep)
926{
927 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
928 struct address_space *mapping;
929 LIST_HEAD(tokill);
930 bool unmap_success;
931 int kill = 1, forcekill;
932 struct page *hpage = *hpagep;
933 bool mlocked = PageMlocked(hpage);
934
935 /*
936 * Here we are interested only in user-mapped pages, so skip any
937 * other types of pages.
938 */
939 if (PageReserved(p) || PageSlab(p))
940 return true;
941 if (!(PageLRU(hpage) || PageHuge(p)))
942 return true;
943
944 /*
945 * This check implies we don't kill processes if their pages
946 * are in the swap cache early. Those are always late kills.
947 */
948 if (!page_mapped(hpage))
949 return true;
950
951 if (PageKsm(p)) {
952 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
953 return false;
954 }
955
956 if (PageSwapCache(p)) {
957 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
958 pfn);
959 ttu |= TTU_IGNORE_HWPOISON;
960 }
961
962 /*
963 * Propagate the dirty bit from PTEs to struct page first, because we
964 * need this to decide if we should kill or just drop the page.
965 * XXX: the dirty test could be racy: set_page_dirty() may not always
966 * be called inside page lock (it's recommended but not enforced).
967 */
968 mapping = page_mapping(hpage);
969 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
970 mapping_cap_writeback_dirty(mapping)) {
971 if (page_mkclean(hpage)) {
972 SetPageDirty(hpage);
973 } else {
974 kill = 0;
975 ttu |= TTU_IGNORE_HWPOISON;
976 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
977 pfn);
978 }
979 }
980
981 /*
982 * First collect all the processes that have the page
983 * mapped in dirty form. This has to be done before try_to_unmap,
984 * because ttu takes the rmap data structures down.
985 *
986 * Error handling: We ignore errors here because
987 * there's nothing that can be done.
988 */
989 if (kill)
990 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
991
992 unmap_success = try_to_unmap(hpage, ttu);
993 if (!unmap_success)
994 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
995 pfn, page_mapcount(hpage));
996
997 /*
998 * try_to_unmap() might put mlocked page in lru cache, so call
999 * shake_page() again to ensure that it's flushed.
1000 */
1001 if (mlocked)
1002 shake_page(hpage, 0);
1003
1004 /*
1005 * Now that the dirty bit has been propagated to the
1006 * struct page and all unmaps done we can decide if
1007 * killing is needed or not. Only kill when the page
1008 * was dirty or the process is not restartable,
1009 * otherwise the tokill list is merely
1010 * freed. When there was a problem unmapping earlier
1011 * use a more force-full uncatchable kill to prevent
1012 * any accesses to the poisoned memory.
1013 */
1014 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1015 kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags);
1016
1017 return unmap_success;
1018}
1019
1020static int identify_page_state(unsigned long pfn, struct page *p,
1021 unsigned long page_flags)
1022{
1023 struct page_state *ps;
1024
1025 /*
1026 * The first check uses the current page flags which may not have any
1027 * relevant information. The second check with the saved page flags is
1028 * carried out only if the first check can't determine the page status.
1029 */
1030 for (ps = error_states;; ps++)
1031 if ((p->flags & ps->mask) == ps->res)
1032 break;
1033
1034 page_flags |= (p->flags & (1UL << PG_dirty));
1035
1036 if (!ps->mask)
1037 for (ps = error_states;; ps++)
1038 if ((page_flags & ps->mask) == ps->res)
1039 break;
1040 return page_action(ps, p, pfn);
1041}
1042
1043static int memory_failure_hugetlb(unsigned long pfn, int flags)
1044{
1045 struct page *p = pfn_to_page(pfn);
1046 struct page *head = compound_head(p);
1047 int res;
1048 unsigned long page_flags;
1049
1050 if (TestSetPageHWPoison(head)) {
1051 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1052 pfn);
1053 return 0;
1054 }
1055
1056 num_poisoned_pages_inc();
1057
1058 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1059 /*
1060 * Check "filter hit" and "race with other subpage."
1061 */
1062 lock_page(head);
1063 if (PageHWPoison(head)) {
1064 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1065 || (p != head && TestSetPageHWPoison(head))) {
1066 num_poisoned_pages_dec();
1067 unlock_page(head);
1068 return 0;
1069 }
1070 }
1071 unlock_page(head);
1072 dissolve_free_huge_page(p);
1073 action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED);
1074 return 0;
1075 }
1076
1077 lock_page(head);
1078 page_flags = head->flags;
1079
1080 if (!PageHWPoison(head)) {
1081 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1082 num_poisoned_pages_dec();
1083 unlock_page(head);
1084 put_hwpoison_page(head);
1085 return 0;
1086 }
1087
1088 /*
1089 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1090 * simply disable it. In order to make it work properly, we need
1091 * make sure that:
1092 * - conversion of a pud that maps an error hugetlb into hwpoison
1093 * entry properly works, and
1094 * - other mm code walking over page table is aware of pud-aligned
1095 * hwpoison entries.
1096 */
1097 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1098 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1099 res = -EBUSY;
1100 goto out;
1101 }
1102
1103 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
1104 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1105 res = -EBUSY;
1106 goto out;
1107 }
1108
1109 res = identify_page_state(pfn, p, page_flags);
1110out:
1111 unlock_page(head);
1112 return res;
1113}
1114
1115/**
1116 * memory_failure - Handle memory failure of a page.
1117 * @pfn: Page Number of the corrupted page
1118 * @flags: fine tune action taken
1119 *
1120 * This function is called by the low level machine check code
1121 * of an architecture when it detects hardware memory corruption
1122 * of a page. It tries its best to recover, which includes
1123 * dropping pages, killing processes etc.
1124 *
1125 * The function is primarily of use for corruptions that
1126 * happen outside the current execution context (e.g. when
1127 * detected by a background scrubber)
1128 *
1129 * Must run in process context (e.g. a work queue) with interrupts
1130 * enabled and no spinlocks hold.
1131 */
1132int memory_failure(unsigned long pfn, int flags)
1133{
1134 struct page *p;
1135 struct page *hpage;
1136 struct page *orig_head;
1137 int res;
1138 unsigned long page_flags;
1139
1140 if (!sysctl_memory_failure_recovery)
1141 panic("Memory failure on page %lx", pfn);
1142
1143 if (!pfn_valid(pfn)) {
1144 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1145 pfn);
1146 return -ENXIO;
1147 }
1148
1149 p = pfn_to_page(pfn);
1150 if (PageHuge(p))
1151 return memory_failure_hugetlb(pfn, flags);
1152 if (TestSetPageHWPoison(p)) {
1153 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1154 pfn);
1155 return 0;
1156 }
1157
1158 orig_head = hpage = compound_head(p);
1159 num_poisoned_pages_inc();
1160
1161 /*
1162 * We need/can do nothing about count=0 pages.
1163 * 1) it's a free page, and therefore in safe hand:
1164 * prep_new_page() will be the gate keeper.
1165 * 2) it's part of a non-compound high order page.
1166 * Implies some kernel user: cannot stop them from
1167 * R/W the page; let's pray that the page has been
1168 * used and will be freed some time later.
1169 * In fact it's dangerous to directly bump up page count from 0,
1170 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
1171 */
1172 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1173 if (is_free_buddy_page(p)) {
1174 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1175 return 0;
1176 } else {
1177 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1178 return -EBUSY;
1179 }
1180 }
1181
1182 if (PageTransHuge(hpage)) {
1183 lock_page(p);
1184 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1185 unlock_page(p);
1186 if (!PageAnon(p))
1187 pr_err("Memory failure: %#lx: non anonymous thp\n",
1188 pfn);
1189 else
1190 pr_err("Memory failure: %#lx: thp split failed\n",
1191 pfn);
1192 if (TestClearPageHWPoison(p))
1193 num_poisoned_pages_dec();
1194 put_hwpoison_page(p);
1195 return -EBUSY;
1196 }
1197 unlock_page(p);
1198 VM_BUG_ON_PAGE(!page_count(p), p);
1199 hpage = compound_head(p);
1200 }
1201
1202 /*
1203 * We ignore non-LRU pages for good reasons.
1204 * - PG_locked is only well defined for LRU pages and a few others
1205 * - to avoid races with __SetPageLocked()
1206 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1207 * The check (unnecessarily) ignores LRU pages being isolated and
1208 * walked by the page reclaim code, however that's not a big loss.
1209 */
1210 shake_page(p, 0);
1211 /* shake_page could have turned it free. */
1212 if (!PageLRU(p) && is_free_buddy_page(p)) {
1213 if (flags & MF_COUNT_INCREASED)
1214 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1215 else
1216 action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
1217 return 0;
1218 }
1219
1220 lock_page(p);
1221
1222 /*
1223 * The page could have changed compound pages during the locking.
1224 * If this happens just bail out.
1225 */
1226 if (PageCompound(p) && compound_head(p) != orig_head) {
1227 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
1228 res = -EBUSY;
1229 goto out;
1230 }
1231
1232 /*
1233 * We use page flags to determine what action should be taken, but
1234 * the flags can be modified by the error containment action. One
1235 * example is an mlocked page, where PG_mlocked is cleared by
1236 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1237 * correctly, we save a copy of the page flags at this time.
1238 */
1239 if (PageHuge(p))
1240 page_flags = hpage->flags;
1241 else
1242 page_flags = p->flags;
1243
1244 /*
1245 * unpoison always clear PG_hwpoison inside page lock
1246 */
1247 if (!PageHWPoison(p)) {
1248 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1249 num_poisoned_pages_dec();
1250 unlock_page(p);
1251 put_hwpoison_page(p);
1252 return 0;
1253 }
1254 if (hwpoison_filter(p)) {
1255 if (TestClearPageHWPoison(p))
1256 num_poisoned_pages_dec();
1257 unlock_page(p);
1258 put_hwpoison_page(p);
1259 return 0;
1260 }
1261
1262 if (!PageTransTail(p) && !PageLRU(p))
1263 goto identify_page_state;
1264
1265 /*
1266 * It's very difficult to mess with pages currently under IO
1267 * and in many cases impossible, so we just avoid it here.
1268 */
1269 wait_on_page_writeback(p);
1270
1271 /*
1272 * Now take care of user space mappings.
1273 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1274 *
1275 * When the raw error page is thp tail page, hpage points to the raw
1276 * page after thp split.
1277 */
1278 if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
1279 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1280 res = -EBUSY;
1281 goto out;
1282 }
1283
1284 /*
1285 * Torn down by someone else?
1286 */
1287 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1288 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
1289 res = -EBUSY;
1290 goto out;
1291 }
1292
1293identify_page_state:
1294 res = identify_page_state(pfn, p, page_flags);
1295out:
1296 unlock_page(p);
1297 return res;
1298}
1299EXPORT_SYMBOL_GPL(memory_failure);
1300
1301#define MEMORY_FAILURE_FIFO_ORDER 4
1302#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1303
1304struct memory_failure_entry {
1305 unsigned long pfn;
1306 int flags;
1307};
1308
1309struct memory_failure_cpu {
1310 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1311 MEMORY_FAILURE_FIFO_SIZE);
1312 spinlock_t lock;
1313 struct work_struct work;
1314};
1315
1316static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1317
1318/**
1319 * memory_failure_queue - Schedule handling memory failure of a page.
1320 * @pfn: Page Number of the corrupted page
1321 * @flags: Flags for memory failure handling
1322 *
1323 * This function is called by the low level hardware error handler
1324 * when it detects hardware memory corruption of a page. It schedules
1325 * the recovering of error page, including dropping pages, killing
1326 * processes etc.
1327 *
1328 * The function is primarily of use for corruptions that
1329 * happen outside the current execution context (e.g. when
1330 * detected by a background scrubber)
1331 *
1332 * Can run in IRQ context.
1333 */
1334void memory_failure_queue(unsigned long pfn, int flags)
1335{
1336 struct memory_failure_cpu *mf_cpu;
1337 unsigned long proc_flags;
1338 struct memory_failure_entry entry = {
1339 .pfn = pfn,
1340 .flags = flags,
1341 };
1342
1343 mf_cpu = &get_cpu_var(memory_failure_cpu);
1344 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1345 if (kfifo_put(&mf_cpu->fifo, entry))
1346 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1347 else
1348 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1349 pfn);
1350 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1351 put_cpu_var(memory_failure_cpu);
1352}
1353EXPORT_SYMBOL_GPL(memory_failure_queue);
1354
1355static void memory_failure_work_func(struct work_struct *work)
1356{
1357 struct memory_failure_cpu *mf_cpu;
1358 struct memory_failure_entry entry = { 0, };
1359 unsigned long proc_flags;
1360 int gotten;
1361
1362 mf_cpu = this_cpu_ptr(&memory_failure_cpu);
1363 for (;;) {
1364 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1365 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1366 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1367 if (!gotten)
1368 break;
1369 if (entry.flags & MF_SOFT_OFFLINE)
1370 soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
1371 else
1372 memory_failure(entry.pfn, entry.flags);
1373 }
1374}
1375
1376static int __init memory_failure_init(void)
1377{
1378 struct memory_failure_cpu *mf_cpu;
1379 int cpu;
1380
1381 for_each_possible_cpu(cpu) {
1382 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1383 spin_lock_init(&mf_cpu->lock);
1384 INIT_KFIFO(mf_cpu->fifo);
1385 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1386 }
1387
1388 return 0;
1389}
1390core_initcall(memory_failure_init);
1391
1392#define unpoison_pr_info(fmt, pfn, rs) \
1393({ \
1394 if (__ratelimit(rs)) \
1395 pr_info(fmt, pfn); \
1396})
1397
1398/**
1399 * unpoison_memory - Unpoison a previously poisoned page
1400 * @pfn: Page number of the to be unpoisoned page
1401 *
1402 * Software-unpoison a page that has been poisoned by
1403 * memory_failure() earlier.
1404 *
1405 * This is only done on the software-level, so it only works
1406 * for linux injected failures, not real hardware failures
1407 *
1408 * Returns 0 for success, otherwise -errno.
1409 */
1410int unpoison_memory(unsigned long pfn)
1411{
1412 struct page *page;
1413 struct page *p;
1414 int freeit = 0;
1415 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1416 DEFAULT_RATELIMIT_BURST);
1417
1418 if (!pfn_valid(pfn))
1419 return -ENXIO;
1420
1421 p = pfn_to_page(pfn);
1422 page = compound_head(p);
1423
1424 if (!PageHWPoison(p)) {
1425 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
1426 pfn, &unpoison_rs);
1427 return 0;
1428 }
1429
1430 if (page_count(page) > 1) {
1431 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
1432 pfn, &unpoison_rs);
1433 return 0;
1434 }
1435
1436 if (page_mapped(page)) {
1437 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
1438 pfn, &unpoison_rs);
1439 return 0;
1440 }
1441
1442 if (page_mapping(page)) {
1443 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
1444 pfn, &unpoison_rs);
1445 return 0;
1446 }
1447
1448 /*
1449 * unpoison_memory() can encounter thp only when the thp is being
1450 * worked by memory_failure() and the page lock is not held yet.
1451 * In such case, we yield to memory_failure() and make unpoison fail.
1452 */
1453 if (!PageHuge(page) && PageTransHuge(page)) {
1454 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
1455 pfn, &unpoison_rs);
1456 return 0;
1457 }
1458
1459 if (!get_hwpoison_page(p)) {
1460 if (TestClearPageHWPoison(p))
1461 num_poisoned_pages_dec();
1462 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
1463 pfn, &unpoison_rs);
1464 return 0;
1465 }
1466
1467 lock_page(page);
1468 /*
1469 * This test is racy because PG_hwpoison is set outside of page lock.
1470 * That's acceptable because that won't trigger kernel panic. Instead,
1471 * the PG_hwpoison page will be caught and isolated on the entrance to
1472 * the free buddy page pool.
1473 */
1474 if (TestClearPageHWPoison(page)) {
1475 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
1476 pfn, &unpoison_rs);
1477 num_poisoned_pages_dec();
1478 freeit = 1;
1479 }
1480 unlock_page(page);
1481
1482 put_hwpoison_page(page);
1483 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
1484 put_hwpoison_page(page);
1485
1486 return 0;
1487}
1488EXPORT_SYMBOL(unpoison_memory);
1489
1490static struct page *new_page(struct page *p, unsigned long private)
1491{
1492 int nid = page_to_nid(p);
1493
1494 return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
1495}
1496
1497/*
1498 * Safely get reference count of an arbitrary page.
1499 * Returns 0 for a free page, -EIO for a zero refcount page
1500 * that is not free, and 1 for any other page type.
1501 * For 1 the page is returned with increased page count, otherwise not.
1502 */
1503static int __get_any_page(struct page *p, unsigned long pfn, int flags)
1504{
1505 int ret;
1506
1507 if (flags & MF_COUNT_INCREASED)
1508 return 1;
1509
1510 /*
1511 * When the target page is a free hugepage, just remove it
1512 * from free hugepage list.
1513 */
1514 if (!get_hwpoison_page(p)) {
1515 if (PageHuge(p)) {
1516 pr_info("%s: %#lx free huge page\n", __func__, pfn);
1517 ret = 0;
1518 } else if (is_free_buddy_page(p)) {
1519 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
1520 ret = 0;
1521 } else {
1522 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1523 __func__, pfn, p->flags);
1524 ret = -EIO;
1525 }
1526 } else {
1527 /* Not a free page */
1528 ret = 1;
1529 }
1530 return ret;
1531}
1532
1533static int get_any_page(struct page *page, unsigned long pfn, int flags)
1534{
1535 int ret = __get_any_page(page, pfn, flags);
1536
1537 if (ret == 1 && !PageHuge(page) &&
1538 !PageLRU(page) && !__PageMovable(page)) {
1539 /*
1540 * Try to free it.
1541 */
1542 put_hwpoison_page(page);
1543 shake_page(page, 1);
1544
1545 /*
1546 * Did it turn free?
1547 */
1548 ret = __get_any_page(page, pfn, 0);
1549 if (ret == 1 && !PageLRU(page)) {
1550 /* Drop page reference which is from __get_any_page() */
1551 put_hwpoison_page(page);
1552 pr_info("soft_offline: %#lx: unknown non LRU page type %lx (%pGp)\n",
1553 pfn, page->flags, &page->flags);
1554 return -EIO;
1555 }
1556 }
1557 return ret;
1558}
1559
1560static int soft_offline_huge_page(struct page *page, int flags)
1561{
1562 int ret;
1563 unsigned long pfn = page_to_pfn(page);
1564 struct page *hpage = compound_head(page);
1565 LIST_HEAD(pagelist);
1566
1567 /*
1568 * This double-check of PageHWPoison is to avoid the race with
1569 * memory_failure(). See also comment in __soft_offline_page().
1570 */
1571 lock_page(hpage);
1572 if (PageHWPoison(hpage)) {
1573 unlock_page(hpage);
1574 put_hwpoison_page(hpage);
1575 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
1576 return -EBUSY;
1577 }
1578 unlock_page(hpage);
1579
1580 ret = isolate_huge_page(hpage, &pagelist);
1581 /*
1582 * get_any_page() and isolate_huge_page() takes a refcount each,
1583 * so need to drop one here.
1584 */
1585 put_hwpoison_page(hpage);
1586 if (!ret) {
1587 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1588 return -EBUSY;
1589 }
1590
1591 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1592 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1593 if (ret) {
1594 pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
1595 pfn, ret, page->flags, &page->flags);
1596 if (!list_empty(&pagelist))
1597 putback_movable_pages(&pagelist);
1598 if (ret > 0)
1599 ret = -EIO;
1600 } else {
1601 if (PageHuge(page))
1602 dissolve_free_huge_page(page);
1603 }
1604 return ret;
1605}
1606
1607static int __soft_offline_page(struct page *page, int flags)
1608{
1609 int ret;
1610 unsigned long pfn = page_to_pfn(page);
1611
1612 /*
1613 * Check PageHWPoison again inside page lock because PageHWPoison
1614 * is set by memory_failure() outside page lock. Note that
1615 * memory_failure() also double-checks PageHWPoison inside page lock,
1616 * so there's no race between soft_offline_page() and memory_failure().
1617 */
1618 lock_page(page);
1619 wait_on_page_writeback(page);
1620 if (PageHWPoison(page)) {
1621 unlock_page(page);
1622 put_hwpoison_page(page);
1623 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1624 return -EBUSY;
1625 }
1626 /*
1627 * Try to invalidate first. This should work for
1628 * non dirty unmapped page cache pages.
1629 */
1630 ret = invalidate_inode_page(page);
1631 unlock_page(page);
1632 /*
1633 * RED-PEN would be better to keep it isolated here, but we
1634 * would need to fix isolation locking first.
1635 */
1636 if (ret == 1) {
1637 put_hwpoison_page(page);
1638 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1639 SetPageHWPoison(page);
1640 num_poisoned_pages_inc();
1641 return 0;
1642 }
1643
1644 /*
1645 * Simple invalidation didn't work.
1646 * Try to migrate to a new page instead. migrate.c
1647 * handles a large number of cases for us.
1648 */
1649 if (PageLRU(page))
1650 ret = isolate_lru_page(page);
1651 else
1652 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1653 /*
1654 * Drop page reference which is came from get_any_page()
1655 * successful isolate_lru_page() already took another one.
1656 */
1657 put_hwpoison_page(page);
1658 if (!ret) {
1659 LIST_HEAD(pagelist);
1660 /*
1661 * After isolated lru page, the PageLRU will be cleared,
1662 * so use !__PageMovable instead for LRU page's mapping
1663 * cannot have PAGE_MAPPING_MOVABLE.
1664 */
1665 if (!__PageMovable(page))
1666 inc_node_page_state(page, NR_ISOLATED_ANON +
1667 page_is_file_cache(page));
1668 list_add(&page->lru, &pagelist);
1669 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1670 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1671 if (ret) {
1672 if (!list_empty(&pagelist))
1673 putback_movable_pages(&pagelist);
1674
1675 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
1676 pfn, ret, page->flags, &page->flags);
1677 if (ret > 0)
1678 ret = -EIO;
1679 }
1680 } else {
1681 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n",
1682 pfn, ret, page_count(page), page->flags, &page->flags);
1683 }
1684 return ret;
1685}
1686
1687static int soft_offline_in_use_page(struct page *page, int flags)
1688{
1689 int ret;
1690 struct page *hpage = compound_head(page);
1691
1692 if (!PageHuge(page) && PageTransHuge(hpage)) {
1693 lock_page(hpage);
1694 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1695 unlock_page(hpage);
1696 if (!PageAnon(hpage))
1697 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1698 else
1699 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
1700 put_hwpoison_page(hpage);
1701 return -EBUSY;
1702 }
1703 unlock_page(hpage);
1704 get_hwpoison_page(page);
1705 put_hwpoison_page(hpage);
1706 }
1707
1708 if (PageHuge(page))
1709 ret = soft_offline_huge_page(page, flags);
1710 else
1711 ret = __soft_offline_page(page, flags);
1712
1713 return ret;
1714}
1715
1716static void soft_offline_free_page(struct page *page)
1717{
1718 struct page *head = compound_head(page);
1719
1720 if (!TestSetPageHWPoison(head)) {
1721 num_poisoned_pages_inc();
1722 if (PageHuge(head))
1723 dissolve_free_huge_page(page);
1724 }
1725}
1726
1727/**
1728 * soft_offline_page - Soft offline a page.
1729 * @page: page to offline
1730 * @flags: flags. Same as memory_failure().
1731 *
1732 * Returns 0 on success, otherwise negated errno.
1733 *
1734 * Soft offline a page, by migration or invalidation,
1735 * without killing anything. This is for the case when
1736 * a page is not corrupted yet (so it's still valid to access),
1737 * but has had a number of corrected errors and is better taken
1738 * out.
1739 *
1740 * The actual policy on when to do that is maintained by
1741 * user space.
1742 *
1743 * This should never impact any application or cause data loss,
1744 * however it might take some time.
1745 *
1746 * This is not a 100% solution for all memory, but tries to be
1747 * ``good enough'' for the majority of memory.
1748 */
1749int soft_offline_page(struct page *page, int flags)
1750{
1751 int ret;
1752 unsigned long pfn = page_to_pfn(page);
1753
1754 if (PageHWPoison(page)) {
1755 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1756 if (flags & MF_COUNT_INCREASED)
1757 put_hwpoison_page(page);
1758 return -EBUSY;
1759 }
1760
1761 get_online_mems();
1762 ret = get_any_page(page, pfn, flags);
1763 put_online_mems();
1764
1765 if (ret > 0)
1766 ret = soft_offline_in_use_page(page, flags);
1767 else if (ret == 0)
1768 soft_offline_free_page(page);
1769
1770 return ret;
1771}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
6 * High level machine check handler. Handles pages reported by the
7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
8 * failure.
9 *
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
12 *
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
20 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
28 *
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
34 * VM.
35 */
36
37#define pr_fmt(fmt) "Memory failure: " fmt
38
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
42#include <linux/kernel-page-flags.h>
43#include <linux/sched/signal.h>
44#include <linux/sched/task.h>
45#include <linux/dax.h>
46#include <linux/ksm.h>
47#include <linux/rmap.h>
48#include <linux/export.h>
49#include <linux/pagemap.h>
50#include <linux/swap.h>
51#include <linux/backing-dev.h>
52#include <linux/migrate.h>
53#include <linux/suspend.h>
54#include <linux/slab.h>
55#include <linux/swapops.h>
56#include <linux/hugetlb.h>
57#include <linux/memory_hotplug.h>
58#include <linux/mm_inline.h>
59#include <linux/memremap.h>
60#include <linux/kfifo.h>
61#include <linux/ratelimit.h>
62#include <linux/page-isolation.h>
63#include <linux/pagewalk.h>
64#include <linux/shmem_fs.h>
65#include "swap.h"
66#include "internal.h"
67#include "ras/ras_event.h"
68
69int sysctl_memory_failure_early_kill __read_mostly = 0;
70
71int sysctl_memory_failure_recovery __read_mostly = 1;
72
73atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
74
75static bool hw_memory_failure __read_mostly = false;
76
77inline void num_poisoned_pages_inc(unsigned long pfn)
78{
79 atomic_long_inc(&num_poisoned_pages);
80 memblk_nr_poison_inc(pfn);
81}
82
83inline void num_poisoned_pages_sub(unsigned long pfn, long i)
84{
85 atomic_long_sub(i, &num_poisoned_pages);
86 if (pfn != -1UL)
87 memblk_nr_poison_sub(pfn, i);
88}
89
90/*
91 * Return values:
92 * 1: the page is dissolved (if needed) and taken off from buddy,
93 * 0: the page is dissolved (if needed) and not taken off from buddy,
94 * < 0: failed to dissolve.
95 */
96static int __page_handle_poison(struct page *page)
97{
98 int ret;
99
100 zone_pcp_disable(page_zone(page));
101 ret = dissolve_free_huge_page(page);
102 if (!ret)
103 ret = take_page_off_buddy(page);
104 zone_pcp_enable(page_zone(page));
105
106 return ret;
107}
108
109static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
110{
111 if (hugepage_or_freepage) {
112 /*
113 * Doing this check for free pages is also fine since dissolve_free_huge_page
114 * returns 0 for non-hugetlb pages as well.
115 */
116 if (__page_handle_poison(page) <= 0)
117 /*
118 * We could fail to take off the target page from buddy
119 * for example due to racy page allocation, but that's
120 * acceptable because soft-offlined page is not broken
121 * and if someone really want to use it, they should
122 * take it.
123 */
124 return false;
125 }
126
127 SetPageHWPoison(page);
128 if (release)
129 put_page(page);
130 page_ref_inc(page);
131 num_poisoned_pages_inc(page_to_pfn(page));
132
133 return true;
134}
135
136#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
137
138u32 hwpoison_filter_enable = 0;
139u32 hwpoison_filter_dev_major = ~0U;
140u32 hwpoison_filter_dev_minor = ~0U;
141u64 hwpoison_filter_flags_mask;
142u64 hwpoison_filter_flags_value;
143EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
144EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
145EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
146EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
147EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
148
149static int hwpoison_filter_dev(struct page *p)
150{
151 struct address_space *mapping;
152 dev_t dev;
153
154 if (hwpoison_filter_dev_major == ~0U &&
155 hwpoison_filter_dev_minor == ~0U)
156 return 0;
157
158 mapping = page_mapping(p);
159 if (mapping == NULL || mapping->host == NULL)
160 return -EINVAL;
161
162 dev = mapping->host->i_sb->s_dev;
163 if (hwpoison_filter_dev_major != ~0U &&
164 hwpoison_filter_dev_major != MAJOR(dev))
165 return -EINVAL;
166 if (hwpoison_filter_dev_minor != ~0U &&
167 hwpoison_filter_dev_minor != MINOR(dev))
168 return -EINVAL;
169
170 return 0;
171}
172
173static int hwpoison_filter_flags(struct page *p)
174{
175 if (!hwpoison_filter_flags_mask)
176 return 0;
177
178 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
179 hwpoison_filter_flags_value)
180 return 0;
181 else
182 return -EINVAL;
183}
184
185/*
186 * This allows stress tests to limit test scope to a collection of tasks
187 * by putting them under some memcg. This prevents killing unrelated/important
188 * processes such as /sbin/init. Note that the target task may share clean
189 * pages with init (eg. libc text), which is harmless. If the target task
190 * share _dirty_ pages with another task B, the test scheme must make sure B
191 * is also included in the memcg. At last, due to race conditions this filter
192 * can only guarantee that the page either belongs to the memcg tasks, or is
193 * a freed page.
194 */
195#ifdef CONFIG_MEMCG
196u64 hwpoison_filter_memcg;
197EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
198static int hwpoison_filter_task(struct page *p)
199{
200 if (!hwpoison_filter_memcg)
201 return 0;
202
203 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
204 return -EINVAL;
205
206 return 0;
207}
208#else
209static int hwpoison_filter_task(struct page *p) { return 0; }
210#endif
211
212int hwpoison_filter(struct page *p)
213{
214 if (!hwpoison_filter_enable)
215 return 0;
216
217 if (hwpoison_filter_dev(p))
218 return -EINVAL;
219
220 if (hwpoison_filter_flags(p))
221 return -EINVAL;
222
223 if (hwpoison_filter_task(p))
224 return -EINVAL;
225
226 return 0;
227}
228#else
229int hwpoison_filter(struct page *p)
230{
231 return 0;
232}
233#endif
234
235EXPORT_SYMBOL_GPL(hwpoison_filter);
236
237/*
238 * Kill all processes that have a poisoned page mapped and then isolate
239 * the page.
240 *
241 * General strategy:
242 * Find all processes having the page mapped and kill them.
243 * But we keep a page reference around so that the page is not
244 * actually freed yet.
245 * Then stash the page away
246 *
247 * There's no convenient way to get back to mapped processes
248 * from the VMAs. So do a brute-force search over all
249 * running processes.
250 *
251 * Remember that machine checks are not common (or rather
252 * if they are common you have other problems), so this shouldn't
253 * be a performance issue.
254 *
255 * Also there are some races possible while we get from the
256 * error detection to actually handle it.
257 */
258
259struct to_kill {
260 struct list_head nd;
261 struct task_struct *tsk;
262 unsigned long addr;
263 short size_shift;
264};
265
266/*
267 * Send all the processes who have the page mapped a signal.
268 * ``action optional'' if they are not immediately affected by the error
269 * ``action required'' if error happened in current execution context
270 */
271static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
272{
273 struct task_struct *t = tk->tsk;
274 short addr_lsb = tk->size_shift;
275 int ret = 0;
276
277 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
278 pfn, t->comm, t->pid);
279
280 if ((flags & MF_ACTION_REQUIRED) && (t == current))
281 ret = force_sig_mceerr(BUS_MCEERR_AR,
282 (void __user *)tk->addr, addr_lsb);
283 else
284 /*
285 * Signal other processes sharing the page if they have
286 * PF_MCE_EARLY set.
287 * Don't use force here, it's convenient if the signal
288 * can be temporarily blocked.
289 * This could cause a loop when the user sets SIGBUS
290 * to SIG_IGN, but hopefully no one will do that?
291 */
292 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
293 addr_lsb, t);
294 if (ret < 0)
295 pr_info("Error sending signal to %s:%d: %d\n",
296 t->comm, t->pid, ret);
297 return ret;
298}
299
300/*
301 * Unknown page type encountered. Try to check whether it can turn PageLRU by
302 * lru_add_drain_all.
303 */
304void shake_page(struct page *p)
305{
306 if (PageHuge(p))
307 return;
308
309 if (!PageSlab(p)) {
310 lru_add_drain_all();
311 if (PageLRU(p) || is_free_buddy_page(p))
312 return;
313 }
314
315 /*
316 * TODO: Could shrink slab caches here if a lightweight range-based
317 * shrinker will be available.
318 */
319}
320EXPORT_SYMBOL_GPL(shake_page);
321
322static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
323 unsigned long address)
324{
325 unsigned long ret = 0;
326 pgd_t *pgd;
327 p4d_t *p4d;
328 pud_t *pud;
329 pmd_t *pmd;
330 pte_t *pte;
331
332 VM_BUG_ON_VMA(address == -EFAULT, vma);
333 pgd = pgd_offset(vma->vm_mm, address);
334 if (!pgd_present(*pgd))
335 return 0;
336 p4d = p4d_offset(pgd, address);
337 if (!p4d_present(*p4d))
338 return 0;
339 pud = pud_offset(p4d, address);
340 if (!pud_present(*pud))
341 return 0;
342 if (pud_devmap(*pud))
343 return PUD_SHIFT;
344 pmd = pmd_offset(pud, address);
345 if (!pmd_present(*pmd))
346 return 0;
347 if (pmd_devmap(*pmd))
348 return PMD_SHIFT;
349 pte = pte_offset_map(pmd, address);
350 if (pte_present(*pte) && pte_devmap(*pte))
351 ret = PAGE_SHIFT;
352 pte_unmap(pte);
353 return ret;
354}
355
356/*
357 * Failure handling: if we can't find or can't kill a process there's
358 * not much we can do. We just print a message and ignore otherwise.
359 */
360
361#define FSDAX_INVALID_PGOFF ULONG_MAX
362
363/*
364 * Schedule a process for later kill.
365 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
366 *
367 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
368 * filesystem with a memory failure handler has claimed the
369 * memory_failure event. In all other cases, page->index and
370 * page->mapping are sufficient for mapping the page back to its
371 * corresponding user virtual address.
372 */
373static void add_to_kill(struct task_struct *tsk, struct page *p,
374 pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
375 struct list_head *to_kill)
376{
377 struct to_kill *tk;
378
379 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
380 if (!tk) {
381 pr_err("Out of memory while machine check handling\n");
382 return;
383 }
384
385 tk->addr = page_address_in_vma(p, vma);
386 if (is_zone_device_page(p)) {
387 if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
388 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
389 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
390 } else
391 tk->size_shift = page_shift(compound_head(p));
392
393 /*
394 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
395 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
396 * so "tk->size_shift == 0" effectively checks no mapping on
397 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
398 * to a process' address space, it's possible not all N VMAs
399 * contain mappings for the page, but at least one VMA does.
400 * Only deliver SIGBUS with payload derived from the VMA that
401 * has a mapping for the page.
402 */
403 if (tk->addr == -EFAULT) {
404 pr_info("Unable to find user space address %lx in %s\n",
405 page_to_pfn(p), tsk->comm);
406 } else if (tk->size_shift == 0) {
407 kfree(tk);
408 return;
409 }
410
411 get_task_struct(tsk);
412 tk->tsk = tsk;
413 list_add_tail(&tk->nd, to_kill);
414}
415
416/*
417 * Kill the processes that have been collected earlier.
418 *
419 * Only do anything when FORCEKILL is set, otherwise just free the
420 * list (this is used for clean pages which do not need killing)
421 * Also when FAIL is set do a force kill because something went
422 * wrong earlier.
423 */
424static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
425 unsigned long pfn, int flags)
426{
427 struct to_kill *tk, *next;
428
429 list_for_each_entry_safe(tk, next, to_kill, nd) {
430 if (forcekill) {
431 /*
432 * In case something went wrong with munmapping
433 * make sure the process doesn't catch the
434 * signal and then access the memory. Just kill it.
435 */
436 if (fail || tk->addr == -EFAULT) {
437 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
438 pfn, tk->tsk->comm, tk->tsk->pid);
439 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
440 tk->tsk, PIDTYPE_PID);
441 }
442
443 /*
444 * In theory the process could have mapped
445 * something else on the address in-between. We could
446 * check for that, but we need to tell the
447 * process anyways.
448 */
449 else if (kill_proc(tk, pfn, flags) < 0)
450 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
451 pfn, tk->tsk->comm, tk->tsk->pid);
452 }
453 list_del(&tk->nd);
454 put_task_struct(tk->tsk);
455 kfree(tk);
456 }
457}
458
459/*
460 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
461 * on behalf of the thread group. Return task_struct of the (first found)
462 * dedicated thread if found, and return NULL otherwise.
463 *
464 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
465 * have to call rcu_read_lock/unlock() in this function.
466 */
467static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
468{
469 struct task_struct *t;
470
471 for_each_thread(tsk, t) {
472 if (t->flags & PF_MCE_PROCESS) {
473 if (t->flags & PF_MCE_EARLY)
474 return t;
475 } else {
476 if (sysctl_memory_failure_early_kill)
477 return t;
478 }
479 }
480 return NULL;
481}
482
483/*
484 * Determine whether a given process is "early kill" process which expects
485 * to be signaled when some page under the process is hwpoisoned.
486 * Return task_struct of the dedicated thread (main thread unless explicitly
487 * specified) if the process is "early kill" and otherwise returns NULL.
488 *
489 * Note that the above is true for Action Optional case. For Action Required
490 * case, it's only meaningful to the current thread which need to be signaled
491 * with SIGBUS, this error is Action Optional for other non current
492 * processes sharing the same error page,if the process is "early kill", the
493 * task_struct of the dedicated thread will also be returned.
494 */
495static struct task_struct *task_early_kill(struct task_struct *tsk,
496 int force_early)
497{
498 if (!tsk->mm)
499 return NULL;
500 /*
501 * Comparing ->mm here because current task might represent
502 * a subthread, while tsk always points to the main thread.
503 */
504 if (force_early && tsk->mm == current->mm)
505 return current;
506
507 return find_early_kill_thread(tsk);
508}
509
510/*
511 * Collect processes when the error hit an anonymous page.
512 */
513static void collect_procs_anon(struct page *page, struct list_head *to_kill,
514 int force_early)
515{
516 struct folio *folio = page_folio(page);
517 struct vm_area_struct *vma;
518 struct task_struct *tsk;
519 struct anon_vma *av;
520 pgoff_t pgoff;
521
522 av = folio_lock_anon_vma_read(folio, NULL);
523 if (av == NULL) /* Not actually mapped anymore */
524 return;
525
526 pgoff = page_to_pgoff(page);
527 read_lock(&tasklist_lock);
528 for_each_process (tsk) {
529 struct anon_vma_chain *vmac;
530 struct task_struct *t = task_early_kill(tsk, force_early);
531
532 if (!t)
533 continue;
534 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
535 pgoff, pgoff) {
536 vma = vmac->vma;
537 if (vma->vm_mm != t->mm)
538 continue;
539 if (!page_mapped_in_vma(page, vma))
540 continue;
541 add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, to_kill);
542 }
543 }
544 read_unlock(&tasklist_lock);
545 anon_vma_unlock_read(av);
546}
547
548/*
549 * Collect processes when the error hit a file mapped page.
550 */
551static void collect_procs_file(struct page *page, struct list_head *to_kill,
552 int force_early)
553{
554 struct vm_area_struct *vma;
555 struct task_struct *tsk;
556 struct address_space *mapping = page->mapping;
557 pgoff_t pgoff;
558
559 i_mmap_lock_read(mapping);
560 read_lock(&tasklist_lock);
561 pgoff = page_to_pgoff(page);
562 for_each_process(tsk) {
563 struct task_struct *t = task_early_kill(tsk, force_early);
564
565 if (!t)
566 continue;
567 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
568 pgoff) {
569 /*
570 * Send early kill signal to tasks where a vma covers
571 * the page but the corrupted page is not necessarily
572 * mapped it in its pte.
573 * Assume applications who requested early kill want
574 * to be informed of all such data corruptions.
575 */
576 if (vma->vm_mm == t->mm)
577 add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
578 to_kill);
579 }
580 }
581 read_unlock(&tasklist_lock);
582 i_mmap_unlock_read(mapping);
583}
584
585#ifdef CONFIG_FS_DAX
586/*
587 * Collect processes when the error hit a fsdax page.
588 */
589static void collect_procs_fsdax(struct page *page,
590 struct address_space *mapping, pgoff_t pgoff,
591 struct list_head *to_kill)
592{
593 struct vm_area_struct *vma;
594 struct task_struct *tsk;
595
596 i_mmap_lock_read(mapping);
597 read_lock(&tasklist_lock);
598 for_each_process(tsk) {
599 struct task_struct *t = task_early_kill(tsk, true);
600
601 if (!t)
602 continue;
603 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
604 if (vma->vm_mm == t->mm)
605 add_to_kill(t, page, pgoff, vma, to_kill);
606 }
607 }
608 read_unlock(&tasklist_lock);
609 i_mmap_unlock_read(mapping);
610}
611#endif /* CONFIG_FS_DAX */
612
613/*
614 * Collect the processes who have the corrupted page mapped to kill.
615 */
616static void collect_procs(struct page *page, struct list_head *tokill,
617 int force_early)
618{
619 if (!page->mapping)
620 return;
621
622 if (PageAnon(page))
623 collect_procs_anon(page, tokill, force_early);
624 else
625 collect_procs_file(page, tokill, force_early);
626}
627
628struct hwp_walk {
629 struct to_kill tk;
630 unsigned long pfn;
631 int flags;
632};
633
634static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
635{
636 tk->addr = addr;
637 tk->size_shift = shift;
638}
639
640static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
641 unsigned long poisoned_pfn, struct to_kill *tk)
642{
643 unsigned long pfn = 0;
644
645 if (pte_present(pte)) {
646 pfn = pte_pfn(pte);
647 } else {
648 swp_entry_t swp = pte_to_swp_entry(pte);
649
650 if (is_hwpoison_entry(swp))
651 pfn = swp_offset_pfn(swp);
652 }
653
654 if (!pfn || pfn != poisoned_pfn)
655 return 0;
656
657 set_to_kill(tk, addr, shift);
658 return 1;
659}
660
661#ifdef CONFIG_TRANSPARENT_HUGEPAGE
662static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
663 struct hwp_walk *hwp)
664{
665 pmd_t pmd = *pmdp;
666 unsigned long pfn;
667 unsigned long hwpoison_vaddr;
668
669 if (!pmd_present(pmd))
670 return 0;
671 pfn = pmd_pfn(pmd);
672 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
673 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
674 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
675 return 1;
676 }
677 return 0;
678}
679#else
680static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
681 struct hwp_walk *hwp)
682{
683 return 0;
684}
685#endif
686
687static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
688 unsigned long end, struct mm_walk *walk)
689{
690 struct hwp_walk *hwp = walk->private;
691 int ret = 0;
692 pte_t *ptep, *mapped_pte;
693 spinlock_t *ptl;
694
695 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
696 if (ptl) {
697 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
698 spin_unlock(ptl);
699 goto out;
700 }
701
702 if (pmd_trans_unstable(pmdp))
703 goto out;
704
705 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
706 addr, &ptl);
707 for (; addr != end; ptep++, addr += PAGE_SIZE) {
708 ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
709 hwp->pfn, &hwp->tk);
710 if (ret == 1)
711 break;
712 }
713 pte_unmap_unlock(mapped_pte, ptl);
714out:
715 cond_resched();
716 return ret;
717}
718
719#ifdef CONFIG_HUGETLB_PAGE
720static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
721 unsigned long addr, unsigned long end,
722 struct mm_walk *walk)
723{
724 struct hwp_walk *hwp = walk->private;
725 pte_t pte = huge_ptep_get(ptep);
726 struct hstate *h = hstate_vma(walk->vma);
727
728 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
729 hwp->pfn, &hwp->tk);
730}
731#else
732#define hwpoison_hugetlb_range NULL
733#endif
734
735static const struct mm_walk_ops hwp_walk_ops = {
736 .pmd_entry = hwpoison_pte_range,
737 .hugetlb_entry = hwpoison_hugetlb_range,
738};
739
740/*
741 * Sends SIGBUS to the current process with error info.
742 *
743 * This function is intended to handle "Action Required" MCEs on already
744 * hardware poisoned pages. They could happen, for example, when
745 * memory_failure() failed to unmap the error page at the first call, or
746 * when multiple local machine checks happened on different CPUs.
747 *
748 * MCE handler currently has no easy access to the error virtual address,
749 * so this function walks page table to find it. The returned virtual address
750 * is proper in most cases, but it could be wrong when the application
751 * process has multiple entries mapping the error page.
752 */
753static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
754 int flags)
755{
756 int ret;
757 struct hwp_walk priv = {
758 .pfn = pfn,
759 };
760 priv.tk.tsk = p;
761
762 if (!p->mm)
763 return -EFAULT;
764
765 mmap_read_lock(p->mm);
766 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
767 (void *)&priv);
768 if (ret == 1 && priv.tk.addr)
769 kill_proc(&priv.tk, pfn, flags);
770 else
771 ret = 0;
772 mmap_read_unlock(p->mm);
773 return ret > 0 ? -EHWPOISON : -EFAULT;
774}
775
776static const char *action_name[] = {
777 [MF_IGNORED] = "Ignored",
778 [MF_FAILED] = "Failed",
779 [MF_DELAYED] = "Delayed",
780 [MF_RECOVERED] = "Recovered",
781};
782
783static const char * const action_page_types[] = {
784 [MF_MSG_KERNEL] = "reserved kernel page",
785 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
786 [MF_MSG_SLAB] = "kernel slab page",
787 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
788 [MF_MSG_HUGE] = "huge page",
789 [MF_MSG_FREE_HUGE] = "free huge page",
790 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
791 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
792 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
793 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
794 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
795 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
796 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
797 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
798 [MF_MSG_CLEAN_LRU] = "clean LRU page",
799 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
800 [MF_MSG_BUDDY] = "free buddy page",
801 [MF_MSG_DAX] = "dax page",
802 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
803 [MF_MSG_UNKNOWN] = "unknown page",
804};
805
806/*
807 * XXX: It is possible that a page is isolated from LRU cache,
808 * and then kept in swap cache or failed to remove from page cache.
809 * The page count will stop it from being freed by unpoison.
810 * Stress tests should be aware of this memory leak problem.
811 */
812static int delete_from_lru_cache(struct page *p)
813{
814 if (!isolate_lru_page(p)) {
815 /*
816 * Clear sensible page flags, so that the buddy system won't
817 * complain when the page is unpoison-and-freed.
818 */
819 ClearPageActive(p);
820 ClearPageUnevictable(p);
821
822 /*
823 * Poisoned page might never drop its ref count to 0 so we have
824 * to uncharge it manually from its memcg.
825 */
826 mem_cgroup_uncharge(page_folio(p));
827
828 /*
829 * drop the page count elevated by isolate_lru_page()
830 */
831 put_page(p);
832 return 0;
833 }
834 return -EIO;
835}
836
837static int truncate_error_page(struct page *p, unsigned long pfn,
838 struct address_space *mapping)
839{
840 int ret = MF_FAILED;
841
842 if (mapping->a_ops->error_remove_page) {
843 struct folio *folio = page_folio(p);
844 int err = mapping->a_ops->error_remove_page(mapping, p);
845
846 if (err != 0) {
847 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
848 } else if (folio_has_private(folio) &&
849 !filemap_release_folio(folio, GFP_NOIO)) {
850 pr_info("%#lx: failed to release buffers\n", pfn);
851 } else {
852 ret = MF_RECOVERED;
853 }
854 } else {
855 /*
856 * If the file system doesn't support it just invalidate
857 * This fails on dirty or anything with private pages
858 */
859 if (invalidate_inode_page(p))
860 ret = MF_RECOVERED;
861 else
862 pr_info("%#lx: Failed to invalidate\n", pfn);
863 }
864
865 return ret;
866}
867
868struct page_state {
869 unsigned long mask;
870 unsigned long res;
871 enum mf_action_page_type type;
872
873 /* Callback ->action() has to unlock the relevant page inside it. */
874 int (*action)(struct page_state *ps, struct page *p);
875};
876
877/*
878 * Return true if page is still referenced by others, otherwise return
879 * false.
880 *
881 * The extra_pins is true when one extra refcount is expected.
882 */
883static bool has_extra_refcount(struct page_state *ps, struct page *p,
884 bool extra_pins)
885{
886 int count = page_count(p) - 1;
887
888 if (extra_pins)
889 count -= 1;
890
891 if (count > 0) {
892 pr_err("%#lx: %s still referenced by %d users\n",
893 page_to_pfn(p), action_page_types[ps->type], count);
894 return true;
895 }
896
897 return false;
898}
899
900/*
901 * Error hit kernel page.
902 * Do nothing, try to be lucky and not touch this instead. For a few cases we
903 * could be more sophisticated.
904 */
905static int me_kernel(struct page_state *ps, struct page *p)
906{
907 unlock_page(p);
908 return MF_IGNORED;
909}
910
911/*
912 * Page in unknown state. Do nothing.
913 */
914static int me_unknown(struct page_state *ps, struct page *p)
915{
916 pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
917 unlock_page(p);
918 return MF_FAILED;
919}
920
921/*
922 * Clean (or cleaned) page cache page.
923 */
924static int me_pagecache_clean(struct page_state *ps, struct page *p)
925{
926 int ret;
927 struct address_space *mapping;
928 bool extra_pins;
929
930 delete_from_lru_cache(p);
931
932 /*
933 * For anonymous pages we're done the only reference left
934 * should be the one m_f() holds.
935 */
936 if (PageAnon(p)) {
937 ret = MF_RECOVERED;
938 goto out;
939 }
940
941 /*
942 * Now truncate the page in the page cache. This is really
943 * more like a "temporary hole punch"
944 * Don't do this for block devices when someone else
945 * has a reference, because it could be file system metadata
946 * and that's not safe to truncate.
947 */
948 mapping = page_mapping(p);
949 if (!mapping) {
950 /*
951 * Page has been teared down in the meanwhile
952 */
953 ret = MF_FAILED;
954 goto out;
955 }
956
957 /*
958 * The shmem page is kept in page cache instead of truncating
959 * so is expected to have an extra refcount after error-handling.
960 */
961 extra_pins = shmem_mapping(mapping);
962
963 /*
964 * Truncation is a bit tricky. Enable it per file system for now.
965 *
966 * Open: to take i_rwsem or not for this? Right now we don't.
967 */
968 ret = truncate_error_page(p, page_to_pfn(p), mapping);
969 if (has_extra_refcount(ps, p, extra_pins))
970 ret = MF_FAILED;
971
972out:
973 unlock_page(p);
974
975 return ret;
976}
977
978/*
979 * Dirty pagecache page
980 * Issues: when the error hit a hole page the error is not properly
981 * propagated.
982 */
983static int me_pagecache_dirty(struct page_state *ps, struct page *p)
984{
985 struct address_space *mapping = page_mapping(p);
986
987 SetPageError(p);
988 /* TBD: print more information about the file. */
989 if (mapping) {
990 /*
991 * IO error will be reported by write(), fsync(), etc.
992 * who check the mapping.
993 * This way the application knows that something went
994 * wrong with its dirty file data.
995 *
996 * There's one open issue:
997 *
998 * The EIO will be only reported on the next IO
999 * operation and then cleared through the IO map.
1000 * Normally Linux has two mechanisms to pass IO error
1001 * first through the AS_EIO flag in the address space
1002 * and then through the PageError flag in the page.
1003 * Since we drop pages on memory failure handling the
1004 * only mechanism open to use is through AS_AIO.
1005 *
1006 * This has the disadvantage that it gets cleared on
1007 * the first operation that returns an error, while
1008 * the PageError bit is more sticky and only cleared
1009 * when the page is reread or dropped. If an
1010 * application assumes it will always get error on
1011 * fsync, but does other operations on the fd before
1012 * and the page is dropped between then the error
1013 * will not be properly reported.
1014 *
1015 * This can already happen even without hwpoisoned
1016 * pages: first on metadata IO errors (which only
1017 * report through AS_EIO) or when the page is dropped
1018 * at the wrong time.
1019 *
1020 * So right now we assume that the application DTRT on
1021 * the first EIO, but we're not worse than other parts
1022 * of the kernel.
1023 */
1024 mapping_set_error(mapping, -EIO);
1025 }
1026
1027 return me_pagecache_clean(ps, p);
1028}
1029
1030/*
1031 * Clean and dirty swap cache.
1032 *
1033 * Dirty swap cache page is tricky to handle. The page could live both in page
1034 * cache and swap cache(ie. page is freshly swapped in). So it could be
1035 * referenced concurrently by 2 types of PTEs:
1036 * normal PTEs and swap PTEs. We try to handle them consistently by calling
1037 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
1038 * and then
1039 * - clear dirty bit to prevent IO
1040 * - remove from LRU
1041 * - but keep in the swap cache, so that when we return to it on
1042 * a later page fault, we know the application is accessing
1043 * corrupted data and shall be killed (we installed simple
1044 * interception code in do_swap_page to catch it).
1045 *
1046 * Clean swap cache pages can be directly isolated. A later page fault will
1047 * bring in the known good data from disk.
1048 */
1049static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1050{
1051 int ret;
1052 bool extra_pins = false;
1053
1054 ClearPageDirty(p);
1055 /* Trigger EIO in shmem: */
1056 ClearPageUptodate(p);
1057
1058 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
1059 unlock_page(p);
1060
1061 if (ret == MF_DELAYED)
1062 extra_pins = true;
1063
1064 if (has_extra_refcount(ps, p, extra_pins))
1065 ret = MF_FAILED;
1066
1067 return ret;
1068}
1069
1070static int me_swapcache_clean(struct page_state *ps, struct page *p)
1071{
1072 struct folio *folio = page_folio(p);
1073 int ret;
1074
1075 delete_from_swap_cache(folio);
1076
1077 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
1078 folio_unlock(folio);
1079
1080 if (has_extra_refcount(ps, p, false))
1081 ret = MF_FAILED;
1082
1083 return ret;
1084}
1085
1086/*
1087 * Huge pages. Needs work.
1088 * Issues:
1089 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1090 * To narrow down kill region to one page, we need to break up pmd.
1091 */
1092static int me_huge_page(struct page_state *ps, struct page *p)
1093{
1094 int res;
1095 struct page *hpage = compound_head(p);
1096 struct address_space *mapping;
1097 bool extra_pins = false;
1098
1099 if (!PageHuge(hpage))
1100 return MF_DELAYED;
1101
1102 mapping = page_mapping(hpage);
1103 if (mapping) {
1104 res = truncate_error_page(hpage, page_to_pfn(p), mapping);
1105 /* The page is kept in page cache. */
1106 extra_pins = true;
1107 unlock_page(hpage);
1108 } else {
1109 unlock_page(hpage);
1110 /*
1111 * migration entry prevents later access on error hugepage,
1112 * so we can free and dissolve it into buddy to save healthy
1113 * subpages.
1114 */
1115 put_page(hpage);
1116 if (__page_handle_poison(p) >= 0) {
1117 page_ref_inc(p);
1118 res = MF_RECOVERED;
1119 } else {
1120 res = MF_FAILED;
1121 }
1122 }
1123
1124 if (has_extra_refcount(ps, p, extra_pins))
1125 res = MF_FAILED;
1126
1127 return res;
1128}
1129
1130/*
1131 * Various page states we can handle.
1132 *
1133 * A page state is defined by its current page->flags bits.
1134 * The table matches them in order and calls the right handler.
1135 *
1136 * This is quite tricky because we can access page at any time
1137 * in its live cycle, so all accesses have to be extremely careful.
1138 *
1139 * This is not complete. More states could be added.
1140 * For any missing state don't attempt recovery.
1141 */
1142
1143#define dirty (1UL << PG_dirty)
1144#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1145#define unevict (1UL << PG_unevictable)
1146#define mlock (1UL << PG_mlocked)
1147#define lru (1UL << PG_lru)
1148#define head (1UL << PG_head)
1149#define slab (1UL << PG_slab)
1150#define reserved (1UL << PG_reserved)
1151
1152static struct page_state error_states[] = {
1153 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
1154 /*
1155 * free pages are specially detected outside this table:
1156 * PG_buddy pages only make a small fraction of all free pages.
1157 */
1158
1159 /*
1160 * Could in theory check if slab page is free or if we can drop
1161 * currently unused objects without touching them. But just
1162 * treat it as standard kernel for now.
1163 */
1164 { slab, slab, MF_MSG_SLAB, me_kernel },
1165
1166 { head, head, MF_MSG_HUGE, me_huge_page },
1167
1168 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1169 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
1170
1171 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1172 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
1173
1174 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1175 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
1176
1177 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1178 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
1179
1180 /*
1181 * Catchall entry: must be at end.
1182 */
1183 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
1184};
1185
1186#undef dirty
1187#undef sc
1188#undef unevict
1189#undef mlock
1190#undef lru
1191#undef head
1192#undef slab
1193#undef reserved
1194
1195/*
1196 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1197 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1198 */
1199static int action_result(unsigned long pfn, enum mf_action_page_type type,
1200 enum mf_result result)
1201{
1202 trace_memory_failure_event(pfn, type, result);
1203
1204 num_poisoned_pages_inc(pfn);
1205 pr_err("%#lx: recovery action for %s: %s\n",
1206 pfn, action_page_types[type], action_name[result]);
1207
1208 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1209}
1210
1211static int page_action(struct page_state *ps, struct page *p,
1212 unsigned long pfn)
1213{
1214 int result;
1215
1216 /* page p should be unlocked after returning from ps->action(). */
1217 result = ps->action(ps, p);
1218
1219 /* Could do more checks here if page looks ok */
1220 /*
1221 * Could adjust zone counters here to correct for the missing page.
1222 */
1223
1224 return action_result(pfn, ps->type, result);
1225}
1226
1227static inline bool PageHWPoisonTakenOff(struct page *page)
1228{
1229 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1230}
1231
1232void SetPageHWPoisonTakenOff(struct page *page)
1233{
1234 set_page_private(page, MAGIC_HWPOISON);
1235}
1236
1237void ClearPageHWPoisonTakenOff(struct page *page)
1238{
1239 if (PageHWPoison(page))
1240 set_page_private(page, 0);
1241}
1242
1243/*
1244 * Return true if a page type of a given page is supported by hwpoison
1245 * mechanism (while handling could fail), otherwise false. This function
1246 * does not return true for hugetlb or device memory pages, so it's assumed
1247 * to be called only in the context where we never have such pages.
1248 */
1249static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1250{
1251 /* Soft offline could migrate non-LRU movable pages */
1252 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1253 return true;
1254
1255 return PageLRU(page) || is_free_buddy_page(page);
1256}
1257
1258static int __get_hwpoison_page(struct page *page, unsigned long flags)
1259{
1260 struct page *head = compound_head(page);
1261 int ret = 0;
1262 bool hugetlb = false;
1263
1264 ret = get_hwpoison_huge_page(head, &hugetlb, false);
1265 if (hugetlb)
1266 return ret;
1267
1268 /*
1269 * This check prevents from calling get_page_unless_zero() for any
1270 * unsupported type of page in order to reduce the risk of unexpected
1271 * races caused by taking a page refcount.
1272 */
1273 if (!HWPoisonHandlable(head, flags))
1274 return -EBUSY;
1275
1276 if (get_page_unless_zero(head)) {
1277 if (head == compound_head(page))
1278 return 1;
1279
1280 pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1281 put_page(head);
1282 }
1283
1284 return 0;
1285}
1286
1287static int get_any_page(struct page *p, unsigned long flags)
1288{
1289 int ret = 0, pass = 0;
1290 bool count_increased = false;
1291
1292 if (flags & MF_COUNT_INCREASED)
1293 count_increased = true;
1294
1295try_again:
1296 if (!count_increased) {
1297 ret = __get_hwpoison_page(p, flags);
1298 if (!ret) {
1299 if (page_count(p)) {
1300 /* We raced with an allocation, retry. */
1301 if (pass++ < 3)
1302 goto try_again;
1303 ret = -EBUSY;
1304 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1305 /* We raced with put_page, retry. */
1306 if (pass++ < 3)
1307 goto try_again;
1308 ret = -EIO;
1309 }
1310 goto out;
1311 } else if (ret == -EBUSY) {
1312 /*
1313 * We raced with (possibly temporary) unhandlable
1314 * page, retry.
1315 */
1316 if (pass++ < 3) {
1317 shake_page(p);
1318 goto try_again;
1319 }
1320 ret = -EIO;
1321 goto out;
1322 }
1323 }
1324
1325 if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1326 ret = 1;
1327 } else {
1328 /*
1329 * A page we cannot handle. Check whether we can turn
1330 * it into something we can handle.
1331 */
1332 if (pass++ < 3) {
1333 put_page(p);
1334 shake_page(p);
1335 count_increased = false;
1336 goto try_again;
1337 }
1338 put_page(p);
1339 ret = -EIO;
1340 }
1341out:
1342 if (ret == -EIO)
1343 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1344
1345 return ret;
1346}
1347
1348static int __get_unpoison_page(struct page *page)
1349{
1350 struct page *head = compound_head(page);
1351 int ret = 0;
1352 bool hugetlb = false;
1353
1354 ret = get_hwpoison_huge_page(head, &hugetlb, true);
1355 if (hugetlb)
1356 return ret;
1357
1358 /*
1359 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1360 * but also isolated from buddy freelist, so need to identify the
1361 * state and have to cancel both operations to unpoison.
1362 */
1363 if (PageHWPoisonTakenOff(page))
1364 return -EHWPOISON;
1365
1366 return get_page_unless_zero(page) ? 1 : 0;
1367}
1368
1369/**
1370 * get_hwpoison_page() - Get refcount for memory error handling
1371 * @p: Raw error page (hit by memory error)
1372 * @flags: Flags controlling behavior of error handling
1373 *
1374 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1375 * error on it, after checking that the error page is in a well-defined state
1376 * (defined as a page-type we can successfully handle the memory error on it,
1377 * such as LRU page and hugetlb page).
1378 *
1379 * Memory error handling could be triggered at any time on any type of page,
1380 * so it's prone to race with typical memory management lifecycle (like
1381 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1382 * extra care for the error page's state (as done in __get_hwpoison_page()),
1383 * and has some retry logic in get_any_page().
1384 *
1385 * When called from unpoison_memory(), the caller should already ensure that
1386 * the given page has PG_hwpoison. So it's never reused for other page
1387 * allocations, and __get_unpoison_page() never races with them.
1388 *
1389 * Return: 0 on failure,
1390 * 1 on success for in-use pages in a well-defined state,
1391 * -EIO for pages on which we can not handle memory errors,
1392 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1393 * operations like allocation and free,
1394 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1395 */
1396static int get_hwpoison_page(struct page *p, unsigned long flags)
1397{
1398 int ret;
1399
1400 zone_pcp_disable(page_zone(p));
1401 if (flags & MF_UNPOISON)
1402 ret = __get_unpoison_page(p);
1403 else
1404 ret = get_any_page(p, flags);
1405 zone_pcp_enable(page_zone(p));
1406
1407 return ret;
1408}
1409
1410/*
1411 * Do all that is necessary to remove user space mappings. Unmap
1412 * the pages and send SIGBUS to the processes if the data was dirty.
1413 */
1414static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1415 int flags, struct page *hpage)
1416{
1417 struct folio *folio = page_folio(hpage);
1418 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
1419 struct address_space *mapping;
1420 LIST_HEAD(tokill);
1421 bool unmap_success;
1422 int forcekill;
1423 bool mlocked = PageMlocked(hpage);
1424
1425 /*
1426 * Here we are interested only in user-mapped pages, so skip any
1427 * other types of pages.
1428 */
1429 if (PageReserved(p) || PageSlab(p) || PageTable(p))
1430 return true;
1431 if (!(PageLRU(hpage) || PageHuge(p)))
1432 return true;
1433
1434 /*
1435 * This check implies we don't kill processes if their pages
1436 * are in the swap cache early. Those are always late kills.
1437 */
1438 if (!page_mapped(hpage))
1439 return true;
1440
1441 if (PageKsm(p)) {
1442 pr_err("%#lx: can't handle KSM pages.\n", pfn);
1443 return false;
1444 }
1445
1446 if (PageSwapCache(p)) {
1447 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1448 ttu |= TTU_IGNORE_HWPOISON;
1449 }
1450
1451 /*
1452 * Propagate the dirty bit from PTEs to struct page first, because we
1453 * need this to decide if we should kill or just drop the page.
1454 * XXX: the dirty test could be racy: set_page_dirty() may not always
1455 * be called inside page lock (it's recommended but not enforced).
1456 */
1457 mapping = page_mapping(hpage);
1458 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1459 mapping_can_writeback(mapping)) {
1460 if (page_mkclean(hpage)) {
1461 SetPageDirty(hpage);
1462 } else {
1463 ttu |= TTU_IGNORE_HWPOISON;
1464 pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1465 pfn);
1466 }
1467 }
1468
1469 /*
1470 * First collect all the processes that have the page
1471 * mapped in dirty form. This has to be done before try_to_unmap,
1472 * because ttu takes the rmap data structures down.
1473 */
1474 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1475
1476 if (PageHuge(hpage) && !PageAnon(hpage)) {
1477 /*
1478 * For hugetlb pages in shared mappings, try_to_unmap
1479 * could potentially call huge_pmd_unshare. Because of
1480 * this, take semaphore in write mode here and set
1481 * TTU_RMAP_LOCKED to indicate we have taken the lock
1482 * at this higher level.
1483 */
1484 mapping = hugetlb_page_mapping_lock_write(hpage);
1485 if (mapping) {
1486 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1487 i_mmap_unlock_write(mapping);
1488 } else
1489 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1490 } else {
1491 try_to_unmap(folio, ttu);
1492 }
1493
1494 unmap_success = !page_mapped(hpage);
1495 if (!unmap_success)
1496 pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1497 pfn, page_mapcount(hpage));
1498
1499 /*
1500 * try_to_unmap() might put mlocked page in lru cache, so call
1501 * shake_page() again to ensure that it's flushed.
1502 */
1503 if (mlocked)
1504 shake_page(hpage);
1505
1506 /*
1507 * Now that the dirty bit has been propagated to the
1508 * struct page and all unmaps done we can decide if
1509 * killing is needed or not. Only kill when the page
1510 * was dirty or the process is not restartable,
1511 * otherwise the tokill list is merely
1512 * freed. When there was a problem unmapping earlier
1513 * use a more force-full uncatchable kill to prevent
1514 * any accesses to the poisoned memory.
1515 */
1516 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
1517 !unmap_success;
1518 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1519
1520 return unmap_success;
1521}
1522
1523static int identify_page_state(unsigned long pfn, struct page *p,
1524 unsigned long page_flags)
1525{
1526 struct page_state *ps;
1527
1528 /*
1529 * The first check uses the current page flags which may not have any
1530 * relevant information. The second check with the saved page flags is
1531 * carried out only if the first check can't determine the page status.
1532 */
1533 for (ps = error_states;; ps++)
1534 if ((p->flags & ps->mask) == ps->res)
1535 break;
1536
1537 page_flags |= (p->flags & (1UL << PG_dirty));
1538
1539 if (!ps->mask)
1540 for (ps = error_states;; ps++)
1541 if ((page_flags & ps->mask) == ps->res)
1542 break;
1543 return page_action(ps, p, pfn);
1544}
1545
1546static int try_to_split_thp_page(struct page *page)
1547{
1548 int ret;
1549
1550 lock_page(page);
1551 ret = split_huge_page(page);
1552 unlock_page(page);
1553
1554 if (unlikely(ret))
1555 put_page(page);
1556
1557 return ret;
1558}
1559
1560static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1561 struct address_space *mapping, pgoff_t index, int flags)
1562{
1563 struct to_kill *tk;
1564 unsigned long size = 0;
1565
1566 list_for_each_entry(tk, to_kill, nd)
1567 if (tk->size_shift)
1568 size = max(size, 1UL << tk->size_shift);
1569
1570 if (size) {
1571 /*
1572 * Unmap the largest mapping to avoid breaking up device-dax
1573 * mappings which are constant size. The actual size of the
1574 * mapping being torn down is communicated in siginfo, see
1575 * kill_proc()
1576 */
1577 loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
1578
1579 unmap_mapping_range(mapping, start, size, 0);
1580 }
1581
1582 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1583}
1584
1585static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1586 struct dev_pagemap *pgmap)
1587{
1588 struct page *page = pfn_to_page(pfn);
1589 LIST_HEAD(to_kill);
1590 dax_entry_t cookie;
1591 int rc = 0;
1592
1593 /*
1594 * Pages instantiated by device-dax (not filesystem-dax)
1595 * may be compound pages.
1596 */
1597 page = compound_head(page);
1598
1599 /*
1600 * Prevent the inode from being freed while we are interrogating
1601 * the address_space, typically this would be handled by
1602 * lock_page(), but dax pages do not use the page lock. This
1603 * also prevents changes to the mapping of this pfn until
1604 * poison signaling is complete.
1605 */
1606 cookie = dax_lock_page(page);
1607 if (!cookie)
1608 return -EBUSY;
1609
1610 if (hwpoison_filter(page)) {
1611 rc = -EOPNOTSUPP;
1612 goto unlock;
1613 }
1614
1615 switch (pgmap->type) {
1616 case MEMORY_DEVICE_PRIVATE:
1617 case MEMORY_DEVICE_COHERENT:
1618 /*
1619 * TODO: Handle device pages which may need coordination
1620 * with device-side memory.
1621 */
1622 rc = -ENXIO;
1623 goto unlock;
1624 default:
1625 break;
1626 }
1627
1628 /*
1629 * Use this flag as an indication that the dax page has been
1630 * remapped UC to prevent speculative consumption of poison.
1631 */
1632 SetPageHWPoison(page);
1633
1634 /*
1635 * Unlike System-RAM there is no possibility to swap in a
1636 * different physical page at a given virtual address, so all
1637 * userspace consumption of ZONE_DEVICE memory necessitates
1638 * SIGBUS (i.e. MF_MUST_KILL)
1639 */
1640 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1641 collect_procs(page, &to_kill, true);
1642
1643 unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
1644unlock:
1645 dax_unlock_page(page, cookie);
1646 return rc;
1647}
1648
1649#ifdef CONFIG_FS_DAX
1650/**
1651 * mf_dax_kill_procs - Collect and kill processes who are using this file range
1652 * @mapping: address_space of the file in use
1653 * @index: start pgoff of the range within the file
1654 * @count: length of the range, in unit of PAGE_SIZE
1655 * @mf_flags: memory failure flags
1656 */
1657int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1658 unsigned long count, int mf_flags)
1659{
1660 LIST_HEAD(to_kill);
1661 dax_entry_t cookie;
1662 struct page *page;
1663 size_t end = index + count;
1664
1665 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1666
1667 for (; index < end; index++) {
1668 page = NULL;
1669 cookie = dax_lock_mapping_entry(mapping, index, &page);
1670 if (!cookie)
1671 return -EBUSY;
1672 if (!page)
1673 goto unlock;
1674
1675 SetPageHWPoison(page);
1676
1677 collect_procs_fsdax(page, mapping, index, &to_kill);
1678 unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1679 index, mf_flags);
1680unlock:
1681 dax_unlock_mapping_entry(mapping, index, cookie);
1682 }
1683 return 0;
1684}
1685EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1686#endif /* CONFIG_FS_DAX */
1687
1688#ifdef CONFIG_HUGETLB_PAGE
1689/*
1690 * Struct raw_hwp_page represents information about "raw error page",
1691 * constructing singly linked list from ->_hugetlb_hwpoison field of folio.
1692 */
1693struct raw_hwp_page {
1694 struct llist_node node;
1695 struct page *page;
1696};
1697
1698static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
1699{
1700 return (struct llist_head *)&page_folio(hpage)->_hugetlb_hwpoison;
1701}
1702
1703static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag)
1704{
1705 struct llist_head *head;
1706 struct llist_node *t, *tnode;
1707 unsigned long count = 0;
1708
1709 head = raw_hwp_list_head(hpage);
1710 llist_for_each_safe(tnode, t, head->first) {
1711 struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
1712
1713 if (move_flag)
1714 SetPageHWPoison(p->page);
1715 else
1716 num_poisoned_pages_sub(page_to_pfn(p->page), 1);
1717 kfree(p);
1718 count++;
1719 }
1720 llist_del_all(head);
1721 return count;
1722}
1723
1724static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
1725{
1726 struct llist_head *head;
1727 struct raw_hwp_page *raw_hwp;
1728 struct llist_node *t, *tnode;
1729 int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0;
1730
1731 /*
1732 * Once the hwpoison hugepage has lost reliable raw error info,
1733 * there is little meaning to keep additional error info precisely,
1734 * so skip to add additional raw error info.
1735 */
1736 if (HPageRawHwpUnreliable(hpage))
1737 return -EHWPOISON;
1738 head = raw_hwp_list_head(hpage);
1739 llist_for_each_safe(tnode, t, head->first) {
1740 struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
1741
1742 if (p->page == page)
1743 return -EHWPOISON;
1744 }
1745
1746 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
1747 if (raw_hwp) {
1748 raw_hwp->page = page;
1749 llist_add(&raw_hwp->node, head);
1750 /* the first error event will be counted in action_result(). */
1751 if (ret)
1752 num_poisoned_pages_inc(page_to_pfn(page));
1753 } else {
1754 /*
1755 * Failed to save raw error info. We no longer trace all
1756 * hwpoisoned subpages, and we need refuse to free/dissolve
1757 * this hwpoisoned hugepage.
1758 */
1759 SetHPageRawHwpUnreliable(hpage);
1760 /*
1761 * Once HPageRawHwpUnreliable is set, raw_hwp_page is not
1762 * used any more, so free it.
1763 */
1764 __free_raw_hwp_pages(hpage, false);
1765 }
1766 return ret;
1767}
1768
1769static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
1770{
1771 /*
1772 * HPageVmemmapOptimized hugepages can't be freed because struct
1773 * pages for tail pages are required but they don't exist.
1774 */
1775 if (move_flag && HPageVmemmapOptimized(hpage))
1776 return 0;
1777
1778 /*
1779 * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
1780 * definition.
1781 */
1782 if (HPageRawHwpUnreliable(hpage))
1783 return 0;
1784
1785 return __free_raw_hwp_pages(hpage, move_flag);
1786}
1787
1788void hugetlb_clear_page_hwpoison(struct page *hpage)
1789{
1790 if (HPageRawHwpUnreliable(hpage))
1791 return;
1792 ClearPageHWPoison(hpage);
1793 free_raw_hwp_pages(hpage, true);
1794}
1795
1796/*
1797 * Called from hugetlb code with hugetlb_lock held.
1798 *
1799 * Return values:
1800 * 0 - free hugepage
1801 * 1 - in-use hugepage
1802 * 2 - not a hugepage
1803 * -EBUSY - the hugepage is busy (try to retry)
1804 * -EHWPOISON - the hugepage is already hwpoisoned
1805 */
1806int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
1807 bool *migratable_cleared)
1808{
1809 struct page *page = pfn_to_page(pfn);
1810 struct page *head = compound_head(page);
1811 int ret = 2; /* fallback to normal page handling */
1812 bool count_increased = false;
1813
1814 if (!PageHeadHuge(head))
1815 goto out;
1816
1817 if (flags & MF_COUNT_INCREASED) {
1818 ret = 1;
1819 count_increased = true;
1820 } else if (HPageFreed(head)) {
1821 ret = 0;
1822 } else if (HPageMigratable(head)) {
1823 ret = get_page_unless_zero(head);
1824 if (ret)
1825 count_increased = true;
1826 } else {
1827 ret = -EBUSY;
1828 if (!(flags & MF_NO_RETRY))
1829 goto out;
1830 }
1831
1832 if (hugetlb_set_page_hwpoison(head, page)) {
1833 ret = -EHWPOISON;
1834 goto out;
1835 }
1836
1837 /*
1838 * Clearing HPageMigratable for hwpoisoned hugepages to prevent them
1839 * from being migrated by memory hotremove.
1840 */
1841 if (count_increased && HPageMigratable(head)) {
1842 ClearHPageMigratable(head);
1843 *migratable_cleared = true;
1844 }
1845
1846 return ret;
1847out:
1848 if (count_increased)
1849 put_page(head);
1850 return ret;
1851}
1852
1853/*
1854 * Taking refcount of hugetlb pages needs extra care about race conditions
1855 * with basic operations like hugepage allocation/free/demotion.
1856 * So some of prechecks for hwpoison (pinning, and testing/setting
1857 * PageHWPoison) should be done in single hugetlb_lock range.
1858 */
1859static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
1860{
1861 int res;
1862 struct page *p = pfn_to_page(pfn);
1863 struct page *head;
1864 unsigned long page_flags;
1865 bool migratable_cleared = false;
1866
1867 *hugetlb = 1;
1868retry:
1869 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
1870 if (res == 2) { /* fallback to normal page handling */
1871 *hugetlb = 0;
1872 return 0;
1873 } else if (res == -EHWPOISON) {
1874 pr_err("%#lx: already hardware poisoned\n", pfn);
1875 if (flags & MF_ACTION_REQUIRED) {
1876 head = compound_head(p);
1877 res = kill_accessing_process(current, page_to_pfn(head), flags);
1878 }
1879 return res;
1880 } else if (res == -EBUSY) {
1881 if (!(flags & MF_NO_RETRY)) {
1882 flags |= MF_NO_RETRY;
1883 goto retry;
1884 }
1885 return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1886 }
1887
1888 head = compound_head(p);
1889 lock_page(head);
1890
1891 if (hwpoison_filter(p)) {
1892 hugetlb_clear_page_hwpoison(head);
1893 if (migratable_cleared)
1894 SetHPageMigratable(head);
1895 unlock_page(head);
1896 if (res == 1)
1897 put_page(head);
1898 return -EOPNOTSUPP;
1899 }
1900
1901 /*
1902 * Handling free hugepage. The possible race with hugepage allocation
1903 * or demotion can be prevented by PageHWPoison flag.
1904 */
1905 if (res == 0) {
1906 unlock_page(head);
1907 if (__page_handle_poison(p) >= 0) {
1908 page_ref_inc(p);
1909 res = MF_RECOVERED;
1910 } else {
1911 res = MF_FAILED;
1912 }
1913 return action_result(pfn, MF_MSG_FREE_HUGE, res);
1914 }
1915
1916 page_flags = head->flags;
1917
1918 if (!hwpoison_user_mappings(p, pfn, flags, head)) {
1919 unlock_page(head);
1920 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1921 }
1922
1923 return identify_page_state(pfn, p, page_flags);
1924}
1925
1926#else
1927static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
1928{
1929 return 0;
1930}
1931
1932static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
1933{
1934 return 0;
1935}
1936#endif /* CONFIG_HUGETLB_PAGE */
1937
1938/* Drop the extra refcount in case we come from madvise() */
1939static void put_ref_page(unsigned long pfn, int flags)
1940{
1941 struct page *page;
1942
1943 if (!(flags & MF_COUNT_INCREASED))
1944 return;
1945
1946 page = pfn_to_page(pfn);
1947 if (page)
1948 put_page(page);
1949}
1950
1951static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1952 struct dev_pagemap *pgmap)
1953{
1954 int rc = -ENXIO;
1955
1956 put_ref_page(pfn, flags);
1957
1958 /* device metadata space is not recoverable */
1959 if (!pgmap_pfn_valid(pgmap, pfn))
1960 goto out;
1961
1962 /*
1963 * Call driver's implementation to handle the memory failure, otherwise
1964 * fall back to generic handler.
1965 */
1966 if (pgmap_has_memory_failure(pgmap)) {
1967 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
1968 /*
1969 * Fall back to generic handler too if operation is not
1970 * supported inside the driver/device/filesystem.
1971 */
1972 if (rc != -EOPNOTSUPP)
1973 goto out;
1974 }
1975
1976 rc = mf_generic_kill_procs(pfn, flags, pgmap);
1977out:
1978 /* drop pgmap ref acquired in caller */
1979 put_dev_pagemap(pgmap);
1980 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1981 return rc;
1982}
1983
1984static DEFINE_MUTEX(mf_mutex);
1985
1986/**
1987 * memory_failure - Handle memory failure of a page.
1988 * @pfn: Page Number of the corrupted page
1989 * @flags: fine tune action taken
1990 *
1991 * This function is called by the low level machine check code
1992 * of an architecture when it detects hardware memory corruption
1993 * of a page. It tries its best to recover, which includes
1994 * dropping pages, killing processes etc.
1995 *
1996 * The function is primarily of use for corruptions that
1997 * happen outside the current execution context (e.g. when
1998 * detected by a background scrubber)
1999 *
2000 * Must run in process context (e.g. a work queue) with interrupts
2001 * enabled and no spinlocks hold.
2002 *
2003 * Return: 0 for successfully handled the memory error,
2004 * -EOPNOTSUPP for hwpoison_filter() filtered the error event,
2005 * < 0(except -EOPNOTSUPP) on failure.
2006 */
2007int memory_failure(unsigned long pfn, int flags)
2008{
2009 struct page *p;
2010 struct page *hpage;
2011 struct dev_pagemap *pgmap;
2012 int res = 0;
2013 unsigned long page_flags;
2014 bool retry = true;
2015 int hugetlb = 0;
2016
2017 if (!sysctl_memory_failure_recovery)
2018 panic("Memory failure on page %lx", pfn);
2019
2020 mutex_lock(&mf_mutex);
2021
2022 if (!(flags & MF_SW_SIMULATED))
2023 hw_memory_failure = true;
2024
2025 p = pfn_to_online_page(pfn);
2026 if (!p) {
2027 res = arch_memory_failure(pfn, flags);
2028 if (res == 0)
2029 goto unlock_mutex;
2030
2031 if (pfn_valid(pfn)) {
2032 pgmap = get_dev_pagemap(pfn, NULL);
2033 if (pgmap) {
2034 res = memory_failure_dev_pagemap(pfn, flags,
2035 pgmap);
2036 goto unlock_mutex;
2037 }
2038 }
2039 pr_err("%#lx: memory outside kernel control\n", pfn);
2040 res = -ENXIO;
2041 goto unlock_mutex;
2042 }
2043
2044try_again:
2045 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2046 if (hugetlb)
2047 goto unlock_mutex;
2048
2049 if (TestSetPageHWPoison(p)) {
2050 pr_err("%#lx: already hardware poisoned\n", pfn);
2051 res = -EHWPOISON;
2052 if (flags & MF_ACTION_REQUIRED)
2053 res = kill_accessing_process(current, pfn, flags);
2054 if (flags & MF_COUNT_INCREASED)
2055 put_page(p);
2056 goto unlock_mutex;
2057 }
2058
2059 hpage = compound_head(p);
2060
2061 /*
2062 * We need/can do nothing about count=0 pages.
2063 * 1) it's a free page, and therefore in safe hand:
2064 * check_new_page() will be the gate keeper.
2065 * 2) it's part of a non-compound high order page.
2066 * Implies some kernel user: cannot stop them from
2067 * R/W the page; let's pray that the page has been
2068 * used and will be freed some time later.
2069 * In fact it's dangerous to directly bump up page count from 0,
2070 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
2071 */
2072 if (!(flags & MF_COUNT_INCREASED)) {
2073 res = get_hwpoison_page(p, flags);
2074 if (!res) {
2075 if (is_free_buddy_page(p)) {
2076 if (take_page_off_buddy(p)) {
2077 page_ref_inc(p);
2078 res = MF_RECOVERED;
2079 } else {
2080 /* We lost the race, try again */
2081 if (retry) {
2082 ClearPageHWPoison(p);
2083 retry = false;
2084 goto try_again;
2085 }
2086 res = MF_FAILED;
2087 }
2088 res = action_result(pfn, MF_MSG_BUDDY, res);
2089 } else {
2090 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
2091 }
2092 goto unlock_mutex;
2093 } else if (res < 0) {
2094 res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2095 goto unlock_mutex;
2096 }
2097 }
2098
2099 if (PageTransHuge(hpage)) {
2100 /*
2101 * The flag must be set after the refcount is bumped
2102 * otherwise it may race with THP split.
2103 * And the flag can't be set in get_hwpoison_page() since
2104 * it is called by soft offline too and it is just called
2105 * for !MF_COUNT_INCREASE. So here seems to be the best
2106 * place.
2107 *
2108 * Don't need care about the above error handling paths for
2109 * get_hwpoison_page() since they handle either free page
2110 * or unhandlable page. The refcount is bumped iff the
2111 * page is a valid handlable page.
2112 */
2113 SetPageHasHWPoisoned(hpage);
2114 if (try_to_split_thp_page(p) < 0) {
2115 res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
2116 goto unlock_mutex;
2117 }
2118 VM_BUG_ON_PAGE(!page_count(p), p);
2119 }
2120
2121 /*
2122 * We ignore non-LRU pages for good reasons.
2123 * - PG_locked is only well defined for LRU pages and a few others
2124 * - to avoid races with __SetPageLocked()
2125 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
2126 * The check (unnecessarily) ignores LRU pages being isolated and
2127 * walked by the page reclaim code, however that's not a big loss.
2128 */
2129 shake_page(p);
2130
2131 lock_page(p);
2132
2133 /*
2134 * We're only intended to deal with the non-Compound page here.
2135 * However, the page could have changed compound pages due to
2136 * race window. If this happens, we could try again to hopefully
2137 * handle the page next round.
2138 */
2139 if (PageCompound(p)) {
2140 if (retry) {
2141 ClearPageHWPoison(p);
2142 unlock_page(p);
2143 put_page(p);
2144 flags &= ~MF_COUNT_INCREASED;
2145 retry = false;
2146 goto try_again;
2147 }
2148 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2149 goto unlock_page;
2150 }
2151
2152 /*
2153 * We use page flags to determine what action should be taken, but
2154 * the flags can be modified by the error containment action. One
2155 * example is an mlocked page, where PG_mlocked is cleared by
2156 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
2157 * correctly, we save a copy of the page flags at this time.
2158 */
2159 page_flags = p->flags;
2160
2161 if (hwpoison_filter(p)) {
2162 ClearPageHWPoison(p);
2163 unlock_page(p);
2164 put_page(p);
2165 res = -EOPNOTSUPP;
2166 goto unlock_mutex;
2167 }
2168
2169 /*
2170 * __munlock_pagevec may clear a writeback page's LRU flag without
2171 * page_lock. We need wait writeback completion for this page or it
2172 * may trigger vfs BUG while evict inode.
2173 */
2174 if (!PageLRU(p) && !PageWriteback(p))
2175 goto identify_page_state;
2176
2177 /*
2178 * It's very difficult to mess with pages currently under IO
2179 * and in many cases impossible, so we just avoid it here.
2180 */
2181 wait_on_page_writeback(p);
2182
2183 /*
2184 * Now take care of user space mappings.
2185 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2186 */
2187 if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2188 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2189 goto unlock_page;
2190 }
2191
2192 /*
2193 * Torn down by someone else?
2194 */
2195 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
2196 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2197 goto unlock_page;
2198 }
2199
2200identify_page_state:
2201 res = identify_page_state(pfn, p, page_flags);
2202 mutex_unlock(&mf_mutex);
2203 return res;
2204unlock_page:
2205 unlock_page(p);
2206unlock_mutex:
2207 mutex_unlock(&mf_mutex);
2208 return res;
2209}
2210EXPORT_SYMBOL_GPL(memory_failure);
2211
2212#define MEMORY_FAILURE_FIFO_ORDER 4
2213#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
2214
2215struct memory_failure_entry {
2216 unsigned long pfn;
2217 int flags;
2218};
2219
2220struct memory_failure_cpu {
2221 DECLARE_KFIFO(fifo, struct memory_failure_entry,
2222 MEMORY_FAILURE_FIFO_SIZE);
2223 spinlock_t lock;
2224 struct work_struct work;
2225};
2226
2227static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2228
2229/**
2230 * memory_failure_queue - Schedule handling memory failure of a page.
2231 * @pfn: Page Number of the corrupted page
2232 * @flags: Flags for memory failure handling
2233 *
2234 * This function is called by the low level hardware error handler
2235 * when it detects hardware memory corruption of a page. It schedules
2236 * the recovering of error page, including dropping pages, killing
2237 * processes etc.
2238 *
2239 * The function is primarily of use for corruptions that
2240 * happen outside the current execution context (e.g. when
2241 * detected by a background scrubber)
2242 *
2243 * Can run in IRQ context.
2244 */
2245void memory_failure_queue(unsigned long pfn, int flags)
2246{
2247 struct memory_failure_cpu *mf_cpu;
2248 unsigned long proc_flags;
2249 struct memory_failure_entry entry = {
2250 .pfn = pfn,
2251 .flags = flags,
2252 };
2253
2254 mf_cpu = &get_cpu_var(memory_failure_cpu);
2255 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2256 if (kfifo_put(&mf_cpu->fifo, entry))
2257 schedule_work_on(smp_processor_id(), &mf_cpu->work);
2258 else
2259 pr_err("buffer overflow when queuing memory failure at %#lx\n",
2260 pfn);
2261 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2262 put_cpu_var(memory_failure_cpu);
2263}
2264EXPORT_SYMBOL_GPL(memory_failure_queue);
2265
2266static void memory_failure_work_func(struct work_struct *work)
2267{
2268 struct memory_failure_cpu *mf_cpu;
2269 struct memory_failure_entry entry = { 0, };
2270 unsigned long proc_flags;
2271 int gotten;
2272
2273 mf_cpu = container_of(work, struct memory_failure_cpu, work);
2274 for (;;) {
2275 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2276 gotten = kfifo_get(&mf_cpu->fifo, &entry);
2277 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2278 if (!gotten)
2279 break;
2280 if (entry.flags & MF_SOFT_OFFLINE)
2281 soft_offline_page(entry.pfn, entry.flags);
2282 else
2283 memory_failure(entry.pfn, entry.flags);
2284 }
2285}
2286
2287/*
2288 * Process memory_failure work queued on the specified CPU.
2289 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
2290 */
2291void memory_failure_queue_kick(int cpu)
2292{
2293 struct memory_failure_cpu *mf_cpu;
2294
2295 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2296 cancel_work_sync(&mf_cpu->work);
2297 memory_failure_work_func(&mf_cpu->work);
2298}
2299
2300static int __init memory_failure_init(void)
2301{
2302 struct memory_failure_cpu *mf_cpu;
2303 int cpu;
2304
2305 for_each_possible_cpu(cpu) {
2306 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2307 spin_lock_init(&mf_cpu->lock);
2308 INIT_KFIFO(mf_cpu->fifo);
2309 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2310 }
2311
2312 return 0;
2313}
2314core_initcall(memory_failure_init);
2315
2316#undef pr_fmt
2317#define pr_fmt(fmt) "" fmt
2318#define unpoison_pr_info(fmt, pfn, rs) \
2319({ \
2320 if (__ratelimit(rs)) \
2321 pr_info(fmt, pfn); \
2322})
2323
2324/**
2325 * unpoison_memory - Unpoison a previously poisoned page
2326 * @pfn: Page number of the to be unpoisoned page
2327 *
2328 * Software-unpoison a page that has been poisoned by
2329 * memory_failure() earlier.
2330 *
2331 * This is only done on the software-level, so it only works
2332 * for linux injected failures, not real hardware failures
2333 *
2334 * Returns 0 for success, otherwise -errno.
2335 */
2336int unpoison_memory(unsigned long pfn)
2337{
2338 struct page *page;
2339 struct page *p;
2340 int ret = -EBUSY;
2341 unsigned long count = 1;
2342 bool huge = false;
2343 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2344 DEFAULT_RATELIMIT_BURST);
2345
2346 if (!pfn_valid(pfn))
2347 return -ENXIO;
2348
2349 p = pfn_to_page(pfn);
2350 page = compound_head(p);
2351
2352 mutex_lock(&mf_mutex);
2353
2354 if (hw_memory_failure) {
2355 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
2356 pfn, &unpoison_rs);
2357 ret = -EOPNOTSUPP;
2358 goto unlock_mutex;
2359 }
2360
2361 if (!PageHWPoison(p)) {
2362 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
2363 pfn, &unpoison_rs);
2364 goto unlock_mutex;
2365 }
2366
2367 if (page_count(page) > 1) {
2368 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2369 pfn, &unpoison_rs);
2370 goto unlock_mutex;
2371 }
2372
2373 if (page_mapped(page)) {
2374 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2375 pfn, &unpoison_rs);
2376 goto unlock_mutex;
2377 }
2378
2379 if (page_mapping(page)) {
2380 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2381 pfn, &unpoison_rs);
2382 goto unlock_mutex;
2383 }
2384
2385 if (PageSlab(page) || PageTable(page) || PageReserved(page))
2386 goto unlock_mutex;
2387
2388 ret = get_hwpoison_page(p, MF_UNPOISON);
2389 if (!ret) {
2390 if (PageHuge(p)) {
2391 huge = true;
2392 count = free_raw_hwp_pages(page, false);
2393 if (count == 0) {
2394 ret = -EBUSY;
2395 goto unlock_mutex;
2396 }
2397 }
2398 ret = TestClearPageHWPoison(page) ? 0 : -EBUSY;
2399 } else if (ret < 0) {
2400 if (ret == -EHWPOISON) {
2401 ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2402 } else
2403 unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2404 pfn, &unpoison_rs);
2405 } else {
2406 if (PageHuge(p)) {
2407 huge = true;
2408 count = free_raw_hwp_pages(page, false);
2409 if (count == 0) {
2410 ret = -EBUSY;
2411 put_page(page);
2412 goto unlock_mutex;
2413 }
2414 }
2415
2416 put_page(page);
2417 if (TestClearPageHWPoison(p)) {
2418 put_page(page);
2419 ret = 0;
2420 }
2421 }
2422
2423unlock_mutex:
2424 mutex_unlock(&mf_mutex);
2425 if (!ret) {
2426 if (!huge)
2427 num_poisoned_pages_sub(pfn, 1);
2428 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2429 page_to_pfn(p), &unpoison_rs);
2430 }
2431 return ret;
2432}
2433EXPORT_SYMBOL(unpoison_memory);
2434
2435static bool isolate_page(struct page *page, struct list_head *pagelist)
2436{
2437 bool isolated = false;
2438
2439 if (PageHuge(page)) {
2440 isolated = !isolate_hugetlb(page, pagelist);
2441 } else {
2442 bool lru = !__PageMovable(page);
2443
2444 if (lru)
2445 isolated = !isolate_lru_page(page);
2446 else
2447 isolated = !isolate_movable_page(page,
2448 ISOLATE_UNEVICTABLE);
2449
2450 if (isolated) {
2451 list_add(&page->lru, pagelist);
2452 if (lru)
2453 inc_node_page_state(page, NR_ISOLATED_ANON +
2454 page_is_file_lru(page));
2455 }
2456 }
2457
2458 /*
2459 * If we succeed to isolate the page, we grabbed another refcount on
2460 * the page, so we can safely drop the one we got from get_any_pages().
2461 * If we failed to isolate the page, it means that we cannot go further
2462 * and we will return an error, so drop the reference we got from
2463 * get_any_pages() as well.
2464 */
2465 put_page(page);
2466 return isolated;
2467}
2468
2469/*
2470 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
2471 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2472 * If the page is mapped, it migrates the contents over.
2473 */
2474static int soft_offline_in_use_page(struct page *page)
2475{
2476 long ret = 0;
2477 unsigned long pfn = page_to_pfn(page);
2478 struct page *hpage = compound_head(page);
2479 char const *msg_page[] = {"page", "hugepage"};
2480 bool huge = PageHuge(page);
2481 LIST_HEAD(pagelist);
2482 struct migration_target_control mtc = {
2483 .nid = NUMA_NO_NODE,
2484 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2485 };
2486
2487 if (!huge && PageTransHuge(hpage)) {
2488 if (try_to_split_thp_page(page)) {
2489 pr_info("soft offline: %#lx: thp split failed\n", pfn);
2490 return -EBUSY;
2491 }
2492 hpage = page;
2493 }
2494
2495 lock_page(page);
2496 if (!PageHuge(page))
2497 wait_on_page_writeback(page);
2498 if (PageHWPoison(page)) {
2499 unlock_page(page);
2500 put_page(page);
2501 pr_info("soft offline: %#lx page already poisoned\n", pfn);
2502 return 0;
2503 }
2504
2505 if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page))
2506 /*
2507 * Try to invalidate first. This should work for
2508 * non dirty unmapped page cache pages.
2509 */
2510 ret = invalidate_inode_page(page);
2511 unlock_page(page);
2512
2513 if (ret) {
2514 pr_info("soft_offline: %#lx: invalidated\n", pfn);
2515 page_handle_poison(page, false, true);
2516 return 0;
2517 }
2518
2519 if (isolate_page(hpage, &pagelist)) {
2520 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2521 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2522 if (!ret) {
2523 bool release = !huge;
2524
2525 if (!page_handle_poison(page, huge, release))
2526 ret = -EBUSY;
2527 } else {
2528 if (!list_empty(&pagelist))
2529 putback_movable_pages(&pagelist);
2530
2531 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
2532 pfn, msg_page[huge], ret, &page->flags);
2533 if (ret > 0)
2534 ret = -EBUSY;
2535 }
2536 } else {
2537 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2538 pfn, msg_page[huge], page_count(page), &page->flags);
2539 ret = -EBUSY;
2540 }
2541 return ret;
2542}
2543
2544/**
2545 * soft_offline_page - Soft offline a page.
2546 * @pfn: pfn to soft-offline
2547 * @flags: flags. Same as memory_failure().
2548 *
2549 * Returns 0 on success
2550 * -EOPNOTSUPP for hwpoison_filter() filtered the error event
2551 * < 0 otherwise negated errno.
2552 *
2553 * Soft offline a page, by migration or invalidation,
2554 * without killing anything. This is for the case when
2555 * a page is not corrupted yet (so it's still valid to access),
2556 * but has had a number of corrected errors and is better taken
2557 * out.
2558 *
2559 * The actual policy on when to do that is maintained by
2560 * user space.
2561 *
2562 * This should never impact any application or cause data loss,
2563 * however it might take some time.
2564 *
2565 * This is not a 100% solution for all memory, but tries to be
2566 * ``good enough'' for the majority of memory.
2567 */
2568int soft_offline_page(unsigned long pfn, int flags)
2569{
2570 int ret;
2571 bool try_again = true;
2572 struct page *page;
2573
2574 if (!pfn_valid(pfn)) {
2575 WARN_ON_ONCE(flags & MF_COUNT_INCREASED);
2576 return -ENXIO;
2577 }
2578
2579 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2580 page = pfn_to_online_page(pfn);
2581 if (!page) {
2582 put_ref_page(pfn, flags);
2583 return -EIO;
2584 }
2585
2586 mutex_lock(&mf_mutex);
2587
2588 if (PageHWPoison(page)) {
2589 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2590 put_ref_page(pfn, flags);
2591 mutex_unlock(&mf_mutex);
2592 return 0;
2593 }
2594
2595retry:
2596 get_online_mems();
2597 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2598 put_online_mems();
2599
2600 if (hwpoison_filter(page)) {
2601 if (ret > 0)
2602 put_page(page);
2603
2604 mutex_unlock(&mf_mutex);
2605 return -EOPNOTSUPP;
2606 }
2607
2608 if (ret > 0) {
2609 ret = soft_offline_in_use_page(page);
2610 } else if (ret == 0) {
2611 if (!page_handle_poison(page, true, false) && try_again) {
2612 try_again = false;
2613 flags &= ~MF_COUNT_INCREASED;
2614 goto retry;
2615 }
2616 }
2617
2618 mutex_unlock(&mf_mutex);
2619
2620 return ret;
2621}