Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory merging support.
4 *
5 * This code enables dynamic sharing of identical pages found in different
6 * memory areas, even if they are not shared by fork()
7 *
8 * Copyright (C) 2008-2009 Red Hat, Inc.
9 * Authors:
10 * Izik Eidus
11 * Andrea Arcangeli
12 * Chris Wright
13 * Hugh Dickins
14 */
15
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mm_inline.h>
19#include <linux/fs.h>
20#include <linux/mman.h>
21#include <linux/sched.h>
22#include <linux/sched/mm.h>
23#include <linux/sched/coredump.h>
24#include <linux/sched/cputime.h>
25#include <linux/rwsem.h>
26#include <linux/pagemap.h>
27#include <linux/rmap.h>
28#include <linux/spinlock.h>
29#include <linux/xxhash.h>
30#include <linux/delay.h>
31#include <linux/kthread.h>
32#include <linux/wait.h>
33#include <linux/slab.h>
34#include <linux/rbtree.h>
35#include <linux/memory.h>
36#include <linux/mmu_notifier.h>
37#include <linux/swap.h>
38#include <linux/ksm.h>
39#include <linux/hashtable.h>
40#include <linux/freezer.h>
41#include <linux/oom.h>
42#include <linux/numa.h>
43#include <linux/pagewalk.h>
44
45#include <asm/tlbflush.h>
46#include "internal.h"
47#include "mm_slot.h"
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/ksm.h>
51
52#ifdef CONFIG_NUMA
53#define NUMA(x) (x)
54#define DO_NUMA(x) do { (x); } while (0)
55#else
56#define NUMA(x) (0)
57#define DO_NUMA(x) do { } while (0)
58#endif
59
60typedef u8 rmap_age_t;
61
62/**
63 * DOC: Overview
64 *
65 * A few notes about the KSM scanning process,
66 * to make it easier to understand the data structures below:
67 *
68 * In order to reduce excessive scanning, KSM sorts the memory pages by their
69 * contents into a data structure that holds pointers to the pages' locations.
70 *
71 * Since the contents of the pages may change at any moment, KSM cannot just
72 * insert the pages into a normal sorted tree and expect it to find anything.
73 * Therefore KSM uses two data structures - the stable and the unstable tree.
74 *
75 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
76 * by their contents. Because each such page is write-protected, searching on
77 * this tree is fully assured to be working (except when pages are unmapped),
78 * and therefore this tree is called the stable tree.
79 *
80 * The stable tree node includes information required for reverse
81 * mapping from a KSM page to virtual addresses that map this page.
82 *
83 * In order to avoid large latencies of the rmap walks on KSM pages,
84 * KSM maintains two types of nodes in the stable tree:
85 *
86 * * the regular nodes that keep the reverse mapping structures in a
87 * linked list
88 * * the "chains" that link nodes ("dups") that represent the same
89 * write protected memory content, but each "dup" corresponds to a
90 * different KSM page copy of that content
91 *
92 * Internally, the regular nodes, "dups" and "chains" are represented
93 * using the same struct ksm_stable_node structure.
94 *
95 * In addition to the stable tree, KSM uses a second data structure called the
96 * unstable tree: this tree holds pointers to pages which have been found to
97 * be "unchanged for a period of time". The unstable tree sorts these pages
98 * by their contents, but since they are not write-protected, KSM cannot rely
99 * upon the unstable tree to work correctly - the unstable tree is liable to
100 * be corrupted as its contents are modified, and so it is called unstable.
101 *
102 * KSM solves this problem by several techniques:
103 *
104 * 1) The unstable tree is flushed every time KSM completes scanning all
105 * memory areas, and then the tree is rebuilt again from the beginning.
106 * 2) KSM will only insert into the unstable tree, pages whose hash value
107 * has not changed since the previous scan of all memory areas.
108 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
109 * colors of the nodes and not on their contents, assuring that even when
110 * the tree gets "corrupted" it won't get out of balance, so scanning time
111 * remains the same (also, searching and inserting nodes in an rbtree uses
112 * the same algorithm, so we have no overhead when we flush and rebuild).
113 * 4) KSM never flushes the stable tree, which means that even if it were to
114 * take 10 attempts to find a page in the unstable tree, once it is found,
115 * it is secured in the stable tree. (When we scan a new page, we first
116 * compare it against the stable tree, and then against the unstable tree.)
117 *
118 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
119 * stable trees and multiple unstable trees: one of each for each NUMA node.
120 */
121
122/**
123 * struct ksm_mm_slot - ksm information per mm that is being scanned
124 * @slot: hash lookup from mm to mm_slot
125 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
126 */
127struct ksm_mm_slot {
128 struct mm_slot slot;
129 struct ksm_rmap_item *rmap_list;
130};
131
132/**
133 * struct ksm_scan - cursor for scanning
134 * @mm_slot: the current mm_slot we are scanning
135 * @address: the next address inside that to be scanned
136 * @rmap_list: link to the next rmap to be scanned in the rmap_list
137 * @seqnr: count of completed full scans (needed when removing unstable node)
138 *
139 * There is only the one ksm_scan instance of this cursor structure.
140 */
141struct ksm_scan {
142 struct ksm_mm_slot *mm_slot;
143 unsigned long address;
144 struct ksm_rmap_item **rmap_list;
145 unsigned long seqnr;
146};
147
148/**
149 * struct ksm_stable_node - node of the stable rbtree
150 * @node: rb node of this ksm page in the stable tree
151 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
152 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
153 * @list: linked into migrate_nodes, pending placement in the proper node tree
154 * @hlist: hlist head of rmap_items using this ksm page
155 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
156 * @chain_prune_time: time of the last full garbage collection
157 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
158 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
159 */
160struct ksm_stable_node {
161 union {
162 struct rb_node node; /* when node of stable tree */
163 struct { /* when listed for migration */
164 struct list_head *head;
165 struct {
166 struct hlist_node hlist_dup;
167 struct list_head list;
168 };
169 };
170 };
171 struct hlist_head hlist;
172 union {
173 unsigned long kpfn;
174 unsigned long chain_prune_time;
175 };
176 /*
177 * STABLE_NODE_CHAIN can be any negative number in
178 * rmap_hlist_len negative range, but better not -1 to be able
179 * to reliably detect underflows.
180 */
181#define STABLE_NODE_CHAIN -1024
182 int rmap_hlist_len;
183#ifdef CONFIG_NUMA
184 int nid;
185#endif
186};
187
188/**
189 * struct ksm_rmap_item - reverse mapping item for virtual addresses
190 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
191 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
192 * @nid: NUMA node id of unstable tree in which linked (may not match page)
193 * @mm: the memory structure this rmap_item is pointing into
194 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
195 * @oldchecksum: previous checksum of the page at that virtual address
196 * @node: rb node of this rmap_item in the unstable tree
197 * @head: pointer to stable_node heading this list in the stable tree
198 * @hlist: link into hlist of rmap_items hanging off that stable_node
199 * @age: number of scan iterations since creation
200 * @remaining_skips: how many scans to skip
201 */
202struct ksm_rmap_item {
203 struct ksm_rmap_item *rmap_list;
204 union {
205 struct anon_vma *anon_vma; /* when stable */
206#ifdef CONFIG_NUMA
207 int nid; /* when node of unstable tree */
208#endif
209 };
210 struct mm_struct *mm;
211 unsigned long address; /* + low bits used for flags below */
212 unsigned int oldchecksum; /* when unstable */
213 rmap_age_t age;
214 rmap_age_t remaining_skips;
215 union {
216 struct rb_node node; /* when node of unstable tree */
217 struct { /* when listed from stable tree */
218 struct ksm_stable_node *head;
219 struct hlist_node hlist;
220 };
221 };
222};
223
224#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
225#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
226#define STABLE_FLAG 0x200 /* is listed from the stable tree */
227
228/* The stable and unstable tree heads */
229static struct rb_root one_stable_tree[1] = { RB_ROOT };
230static struct rb_root one_unstable_tree[1] = { RB_ROOT };
231static struct rb_root *root_stable_tree = one_stable_tree;
232static struct rb_root *root_unstable_tree = one_unstable_tree;
233
234/* Recently migrated nodes of stable tree, pending proper placement */
235static LIST_HEAD(migrate_nodes);
236#define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
237
238#define MM_SLOTS_HASH_BITS 10
239static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
240
241static struct ksm_mm_slot ksm_mm_head = {
242 .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
243};
244static struct ksm_scan ksm_scan = {
245 .mm_slot = &ksm_mm_head,
246};
247
248static struct kmem_cache *rmap_item_cache;
249static struct kmem_cache *stable_node_cache;
250static struct kmem_cache *mm_slot_cache;
251
252/* Default number of pages to scan per batch */
253#define DEFAULT_PAGES_TO_SCAN 100
254
255/* The number of pages scanned */
256static unsigned long ksm_pages_scanned;
257
258/* The number of nodes in the stable tree */
259static unsigned long ksm_pages_shared;
260
261/* The number of page slots additionally sharing those nodes */
262static unsigned long ksm_pages_sharing;
263
264/* The number of nodes in the unstable tree */
265static unsigned long ksm_pages_unshared;
266
267/* The number of rmap_items in use: to calculate pages_volatile */
268static unsigned long ksm_rmap_items;
269
270/* The number of stable_node chains */
271static unsigned long ksm_stable_node_chains;
272
273/* The number of stable_node dups linked to the stable_node chains */
274static unsigned long ksm_stable_node_dups;
275
276/* Delay in pruning stale stable_node_dups in the stable_node_chains */
277static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
278
279/* Maximum number of page slots sharing a stable node */
280static int ksm_max_page_sharing = 256;
281
282/* Number of pages ksmd should scan in one batch */
283static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
284
285/* Milliseconds ksmd should sleep between batches */
286static unsigned int ksm_thread_sleep_millisecs = 20;
287
288/* Checksum of an empty (zeroed) page */
289static unsigned int zero_checksum __read_mostly;
290
291/* Whether to merge empty (zeroed) pages with actual zero pages */
292static bool ksm_use_zero_pages __read_mostly;
293
294/* Skip pages that couldn't be de-duplicated previously */
295/* Default to true at least temporarily, for testing */
296static bool ksm_smart_scan = true;
297
298/* The number of zero pages which is placed by KSM */
299unsigned long ksm_zero_pages;
300
301/* The number of pages that have been skipped due to "smart scanning" */
302static unsigned long ksm_pages_skipped;
303
304/* Don't scan more than max pages per batch. */
305static unsigned long ksm_advisor_max_pages_to_scan = 30000;
306
307/* Min CPU for scanning pages per scan */
308#define KSM_ADVISOR_MIN_CPU 10
309
310/* Max CPU for scanning pages per scan */
311static unsigned int ksm_advisor_max_cpu = 70;
312
313/* Target scan time in seconds to analyze all KSM candidate pages. */
314static unsigned long ksm_advisor_target_scan_time = 200;
315
316/* Exponentially weighted moving average. */
317#define EWMA_WEIGHT 30
318
319/**
320 * struct advisor_ctx - metadata for KSM advisor
321 * @start_scan: start time of the current scan
322 * @scan_time: scan time of previous scan
323 * @change: change in percent to pages_to_scan parameter
324 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
325 */
326struct advisor_ctx {
327 ktime_t start_scan;
328 unsigned long scan_time;
329 unsigned long change;
330 unsigned long long cpu_time;
331};
332static struct advisor_ctx advisor_ctx;
333
334/* Define different advisor's */
335enum ksm_advisor_type {
336 KSM_ADVISOR_NONE,
337 KSM_ADVISOR_SCAN_TIME,
338};
339static enum ksm_advisor_type ksm_advisor;
340
341#ifdef CONFIG_SYSFS
342/*
343 * Only called through the sysfs control interface:
344 */
345
346/* At least scan this many pages per batch. */
347static unsigned long ksm_advisor_min_pages_to_scan = 500;
348
349static void set_advisor_defaults(void)
350{
351 if (ksm_advisor == KSM_ADVISOR_NONE) {
352 ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
353 } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
354 advisor_ctx = (const struct advisor_ctx){ 0 };
355 ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
356 }
357}
358#endif /* CONFIG_SYSFS */
359
360static inline void advisor_start_scan(void)
361{
362 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
363 advisor_ctx.start_scan = ktime_get();
364}
365
366/*
367 * Use previous scan time if available, otherwise use current scan time as an
368 * approximation for the previous scan time.
369 */
370static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
371 unsigned long scan_time)
372{
373 return ctx->scan_time ? ctx->scan_time : scan_time;
374}
375
376/* Calculate exponential weighted moving average */
377static unsigned long ewma(unsigned long prev, unsigned long curr)
378{
379 return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
380}
381
382/*
383 * The scan time advisor is based on the current scan rate and the target
384 * scan rate.
385 *
386 * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
387 *
388 * To avoid perturbations it calculates a change factor of previous changes.
389 * A new change factor is calculated for each iteration and it uses an
390 * exponentially weighted moving average. The new pages_to_scan value is
391 * multiplied with that change factor:
392 *
393 * new_pages_to_scan *= change facor
394 *
395 * The new_pages_to_scan value is limited by the cpu min and max values. It
396 * calculates the cpu percent for the last scan and calculates the new
397 * estimated cpu percent cost for the next scan. That value is capped by the
398 * cpu min and max setting.
399 *
400 * In addition the new pages_to_scan value is capped by the max and min
401 * limits.
402 */
403static void scan_time_advisor(void)
404{
405 unsigned int cpu_percent;
406 unsigned long cpu_time;
407 unsigned long cpu_time_diff;
408 unsigned long cpu_time_diff_ms;
409 unsigned long pages;
410 unsigned long per_page_cost;
411 unsigned long factor;
412 unsigned long change;
413 unsigned long last_scan_time;
414 unsigned long scan_time;
415
416 /* Convert scan time to seconds */
417 scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
418 MSEC_PER_SEC);
419 scan_time = scan_time ? scan_time : 1;
420
421 /* Calculate CPU consumption of ksmd background thread */
422 cpu_time = task_sched_runtime(current);
423 cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
424 cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
425
426 cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
427 cpu_percent = cpu_percent ? cpu_percent : 1;
428 last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
429
430 /* Calculate scan time as percentage of target scan time */
431 factor = ksm_advisor_target_scan_time * 100 / scan_time;
432 factor = factor ? factor : 1;
433
434 /*
435 * Calculate scan time as percentage of last scan time and use
436 * exponentially weighted average to smooth it
437 */
438 change = scan_time * 100 / last_scan_time;
439 change = change ? change : 1;
440 change = ewma(advisor_ctx.change, change);
441
442 /* Calculate new scan rate based on target scan rate. */
443 pages = ksm_thread_pages_to_scan * 100 / factor;
444 /* Update pages_to_scan by weighted change percentage. */
445 pages = pages * change / 100;
446
447 /* Cap new pages_to_scan value */
448 per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
449 per_page_cost = per_page_cost ? per_page_cost : 1;
450
451 pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
452 pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
453 pages = min(pages, ksm_advisor_max_pages_to_scan);
454
455 /* Update advisor context */
456 advisor_ctx.change = change;
457 advisor_ctx.scan_time = scan_time;
458 advisor_ctx.cpu_time = cpu_time;
459
460 ksm_thread_pages_to_scan = pages;
461 trace_ksm_advisor(scan_time, pages, cpu_percent);
462}
463
464static void advisor_stop_scan(void)
465{
466 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
467 scan_time_advisor();
468}
469
470#ifdef CONFIG_NUMA
471/* Zeroed when merging across nodes is not allowed */
472static unsigned int ksm_merge_across_nodes = 1;
473static int ksm_nr_node_ids = 1;
474#else
475#define ksm_merge_across_nodes 1U
476#define ksm_nr_node_ids 1
477#endif
478
479#define KSM_RUN_STOP 0
480#define KSM_RUN_MERGE 1
481#define KSM_RUN_UNMERGE 2
482#define KSM_RUN_OFFLINE 4
483static unsigned long ksm_run = KSM_RUN_STOP;
484static void wait_while_offlining(void);
485
486static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
487static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
488static DEFINE_MUTEX(ksm_thread_mutex);
489static DEFINE_SPINLOCK(ksm_mmlist_lock);
490
491#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
492 sizeof(struct __struct), __alignof__(struct __struct),\
493 (__flags), NULL)
494
495static int __init ksm_slab_init(void)
496{
497 rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0);
498 if (!rmap_item_cache)
499 goto out;
500
501 stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0);
502 if (!stable_node_cache)
503 goto out_free1;
504
505 mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0);
506 if (!mm_slot_cache)
507 goto out_free2;
508
509 return 0;
510
511out_free2:
512 kmem_cache_destroy(stable_node_cache);
513out_free1:
514 kmem_cache_destroy(rmap_item_cache);
515out:
516 return -ENOMEM;
517}
518
519static void __init ksm_slab_free(void)
520{
521 kmem_cache_destroy(mm_slot_cache);
522 kmem_cache_destroy(stable_node_cache);
523 kmem_cache_destroy(rmap_item_cache);
524 mm_slot_cache = NULL;
525}
526
527static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
528{
529 return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
530}
531
532static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup)
533{
534 return dup->head == STABLE_NODE_DUP_HEAD;
535}
536
537static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup,
538 struct ksm_stable_node *chain)
539{
540 VM_BUG_ON(is_stable_node_dup(dup));
541 dup->head = STABLE_NODE_DUP_HEAD;
542 VM_BUG_ON(!is_stable_node_chain(chain));
543 hlist_add_head(&dup->hlist_dup, &chain->hlist);
544 ksm_stable_node_dups++;
545}
546
547static inline void __stable_node_dup_del(struct ksm_stable_node *dup)
548{
549 VM_BUG_ON(!is_stable_node_dup(dup));
550 hlist_del(&dup->hlist_dup);
551 ksm_stable_node_dups--;
552}
553
554static inline void stable_node_dup_del(struct ksm_stable_node *dup)
555{
556 VM_BUG_ON(is_stable_node_chain(dup));
557 if (is_stable_node_dup(dup))
558 __stable_node_dup_del(dup);
559 else
560 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
561#ifdef CONFIG_DEBUG_VM
562 dup->head = NULL;
563#endif
564}
565
566static inline struct ksm_rmap_item *alloc_rmap_item(void)
567{
568 struct ksm_rmap_item *rmap_item;
569
570 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
571 __GFP_NORETRY | __GFP_NOWARN);
572 if (rmap_item)
573 ksm_rmap_items++;
574 return rmap_item;
575}
576
577static inline void free_rmap_item(struct ksm_rmap_item *rmap_item)
578{
579 ksm_rmap_items--;
580 rmap_item->mm->ksm_rmap_items--;
581 rmap_item->mm = NULL; /* debug safety */
582 kmem_cache_free(rmap_item_cache, rmap_item);
583}
584
585static inline struct ksm_stable_node *alloc_stable_node(void)
586{
587 /*
588 * The allocation can take too long with GFP_KERNEL when memory is under
589 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
590 * grants access to memory reserves, helping to avoid this problem.
591 */
592 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
593}
594
595static inline void free_stable_node(struct ksm_stable_node *stable_node)
596{
597 VM_BUG_ON(stable_node->rmap_hlist_len &&
598 !is_stable_node_chain(stable_node));
599 kmem_cache_free(stable_node_cache, stable_node);
600}
601
602/*
603 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
604 * page tables after it has passed through ksm_exit() - which, if necessary,
605 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
606 * a special flag: they can just back out as soon as mm_users goes to zero.
607 * ksm_test_exit() is used throughout to make this test for exit: in some
608 * places for correctness, in some places just to avoid unnecessary work.
609 */
610static inline bool ksm_test_exit(struct mm_struct *mm)
611{
612 return atomic_read(&mm->mm_users) == 0;
613}
614
615static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
616 struct mm_walk *walk)
617{
618 struct page *page = NULL;
619 spinlock_t *ptl;
620 pte_t *pte;
621 pte_t ptent;
622 int ret;
623
624 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
625 if (!pte)
626 return 0;
627 ptent = ptep_get(pte);
628 if (pte_present(ptent)) {
629 page = vm_normal_page(walk->vma, addr, ptent);
630 } else if (!pte_none(ptent)) {
631 swp_entry_t entry = pte_to_swp_entry(ptent);
632
633 /*
634 * As KSM pages remain KSM pages until freed, no need to wait
635 * here for migration to end.
636 */
637 if (is_migration_entry(entry))
638 page = pfn_swap_entry_to_page(entry);
639 }
640 /* return 1 if the page is an normal ksm page or KSM-placed zero page */
641 ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent);
642 pte_unmap_unlock(pte, ptl);
643 return ret;
644}
645
646static const struct mm_walk_ops break_ksm_ops = {
647 .pmd_entry = break_ksm_pmd_entry,
648 .walk_lock = PGWALK_RDLOCK,
649};
650
651static const struct mm_walk_ops break_ksm_lock_vma_ops = {
652 .pmd_entry = break_ksm_pmd_entry,
653 .walk_lock = PGWALK_WRLOCK,
654};
655
656/*
657 * We use break_ksm to break COW on a ksm page by triggering unsharing,
658 * such that the ksm page will get replaced by an exclusive anonymous page.
659 *
660 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
661 * in case the application has unmapped and remapped mm,addr meanwhile.
662 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
663 * mmap of /dev/mem, where we would not want to touch it.
664 *
665 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context
666 * of the process that owns 'vma'. We also do not want to enforce
667 * protection keys here anyway.
668 */
669static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
670{
671 vm_fault_t ret = 0;
672 const struct mm_walk_ops *ops = lock_vma ?
673 &break_ksm_lock_vma_ops : &break_ksm_ops;
674
675 do {
676 int ksm_page;
677
678 cond_resched();
679 ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
680 if (WARN_ON_ONCE(ksm_page < 0))
681 return ksm_page;
682 if (!ksm_page)
683 return 0;
684 ret = handle_mm_fault(vma, addr,
685 FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
686 NULL);
687 } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
688 /*
689 * We must loop until we no longer find a KSM page because
690 * handle_mm_fault() may back out if there's any difficulty e.g. if
691 * pte accessed bit gets updated concurrently.
692 *
693 * VM_FAULT_SIGBUS could occur if we race with truncation of the
694 * backing file, which also invalidates anonymous pages: that's
695 * okay, that truncation will have unmapped the PageKsm for us.
696 *
697 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
698 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
699 * current task has TIF_MEMDIE set, and will be OOM killed on return
700 * to user; and ksmd, having no mm, would never be chosen for that.
701 *
702 * But if the mm is in a limited mem_cgroup, then the fault may fail
703 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
704 * even ksmd can fail in this way - though it's usually breaking ksm
705 * just to undo a merge it made a moment before, so unlikely to oom.
706 *
707 * That's a pity: we might therefore have more kernel pages allocated
708 * than we're counting as nodes in the stable tree; but ksm_do_scan
709 * will retry to break_cow on each pass, so should recover the page
710 * in due course. The important thing is to not let VM_MERGEABLE
711 * be cleared while any such pages might remain in the area.
712 */
713 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
714}
715
716static bool vma_ksm_compatible(struct vm_area_struct *vma)
717{
718 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP |
719 VM_IO | VM_DONTEXPAND | VM_HUGETLB |
720 VM_MIXEDMAP))
721 return false; /* just ignore the advice */
722
723 if (vma_is_dax(vma))
724 return false;
725
726#ifdef VM_SAO
727 if (vma->vm_flags & VM_SAO)
728 return false;
729#endif
730#ifdef VM_SPARC_ADI
731 if (vma->vm_flags & VM_SPARC_ADI)
732 return false;
733#endif
734
735 return true;
736}
737
738static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
739 unsigned long addr)
740{
741 struct vm_area_struct *vma;
742 if (ksm_test_exit(mm))
743 return NULL;
744 vma = vma_lookup(mm, addr);
745 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
746 return NULL;
747 return vma;
748}
749
750static void break_cow(struct ksm_rmap_item *rmap_item)
751{
752 struct mm_struct *mm = rmap_item->mm;
753 unsigned long addr = rmap_item->address;
754 struct vm_area_struct *vma;
755
756 /*
757 * It is not an accident that whenever we want to break COW
758 * to undo, we also need to drop a reference to the anon_vma.
759 */
760 put_anon_vma(rmap_item->anon_vma);
761
762 mmap_read_lock(mm);
763 vma = find_mergeable_vma(mm, addr);
764 if (vma)
765 break_ksm(vma, addr, false);
766 mmap_read_unlock(mm);
767}
768
769static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
770{
771 struct mm_struct *mm = rmap_item->mm;
772 unsigned long addr = rmap_item->address;
773 struct vm_area_struct *vma;
774 struct page *page;
775
776 mmap_read_lock(mm);
777 vma = find_mergeable_vma(mm, addr);
778 if (!vma)
779 goto out;
780
781 page = follow_page(vma, addr, FOLL_GET);
782 if (IS_ERR_OR_NULL(page))
783 goto out;
784 if (is_zone_device_page(page))
785 goto out_putpage;
786 if (PageAnon(page)) {
787 flush_anon_page(vma, page, addr);
788 flush_dcache_page(page);
789 } else {
790out_putpage:
791 put_page(page);
792out:
793 page = NULL;
794 }
795 mmap_read_unlock(mm);
796 return page;
797}
798
799/*
800 * This helper is used for getting right index into array of tree roots.
801 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
802 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
803 * every node has its own stable and unstable tree.
804 */
805static inline int get_kpfn_nid(unsigned long kpfn)
806{
807 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
808}
809
810static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup,
811 struct rb_root *root)
812{
813 struct ksm_stable_node *chain = alloc_stable_node();
814 VM_BUG_ON(is_stable_node_chain(dup));
815 if (likely(chain)) {
816 INIT_HLIST_HEAD(&chain->hlist);
817 chain->chain_prune_time = jiffies;
818 chain->rmap_hlist_len = STABLE_NODE_CHAIN;
819#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
820 chain->nid = NUMA_NO_NODE; /* debug */
821#endif
822 ksm_stable_node_chains++;
823
824 /*
825 * Put the stable node chain in the first dimension of
826 * the stable tree and at the same time remove the old
827 * stable node.
828 */
829 rb_replace_node(&dup->node, &chain->node, root);
830
831 /*
832 * Move the old stable node to the second dimension
833 * queued in the hlist_dup. The invariant is that all
834 * dup stable_nodes in the chain->hlist point to pages
835 * that are write protected and have the exact same
836 * content.
837 */
838 stable_node_chain_add_dup(dup, chain);
839 }
840 return chain;
841}
842
843static inline void free_stable_node_chain(struct ksm_stable_node *chain,
844 struct rb_root *root)
845{
846 rb_erase(&chain->node, root);
847 free_stable_node(chain);
848 ksm_stable_node_chains--;
849}
850
851static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
852{
853 struct ksm_rmap_item *rmap_item;
854
855 /* check it's not STABLE_NODE_CHAIN or negative */
856 BUG_ON(stable_node->rmap_hlist_len < 0);
857
858 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
859 if (rmap_item->hlist.next) {
860 ksm_pages_sharing--;
861 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
862 } else {
863 ksm_pages_shared--;
864 }
865
866 rmap_item->mm->ksm_merging_pages--;
867
868 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
869 stable_node->rmap_hlist_len--;
870 put_anon_vma(rmap_item->anon_vma);
871 rmap_item->address &= PAGE_MASK;
872 cond_resched();
873 }
874
875 /*
876 * We need the second aligned pointer of the migrate_nodes
877 * list_head to stay clear from the rb_parent_color union
878 * (aligned and different than any node) and also different
879 * from &migrate_nodes. This will verify that future list.h changes
880 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
881 */
882 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
883 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
884
885 trace_ksm_remove_ksm_page(stable_node->kpfn);
886 if (stable_node->head == &migrate_nodes)
887 list_del(&stable_node->list);
888 else
889 stable_node_dup_del(stable_node);
890 free_stable_node(stable_node);
891}
892
893enum get_ksm_page_flags {
894 GET_KSM_PAGE_NOLOCK,
895 GET_KSM_PAGE_LOCK,
896 GET_KSM_PAGE_TRYLOCK
897};
898
899/*
900 * get_ksm_page: checks if the page indicated by the stable node
901 * is still its ksm page, despite having held no reference to it.
902 * In which case we can trust the content of the page, and it
903 * returns the gotten page; but if the page has now been zapped,
904 * remove the stale node from the stable tree and return NULL.
905 * But beware, the stable node's page might be being migrated.
906 *
907 * You would expect the stable_node to hold a reference to the ksm page.
908 * But if it increments the page's count, swapping out has to wait for
909 * ksmd to come around again before it can free the page, which may take
910 * seconds or even minutes: much too unresponsive. So instead we use a
911 * "keyhole reference": access to the ksm page from the stable node peeps
912 * out through its keyhole to see if that page still holds the right key,
913 * pointing back to this stable node. This relies on freeing a PageAnon
914 * page to reset its page->mapping to NULL, and relies on no other use of
915 * a page to put something that might look like our key in page->mapping.
916 * is on its way to being freed; but it is an anomaly to bear in mind.
917 */
918static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
919 enum get_ksm_page_flags flags)
920{
921 struct page *page;
922 void *expected_mapping;
923 unsigned long kpfn;
924
925 expected_mapping = (void *)((unsigned long)stable_node |
926 PAGE_MAPPING_KSM);
927again:
928 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
929 page = pfn_to_page(kpfn);
930 if (READ_ONCE(page->mapping) != expected_mapping)
931 goto stale;
932
933 /*
934 * We cannot do anything with the page while its refcount is 0.
935 * Usually 0 means free, or tail of a higher-order page: in which
936 * case this node is no longer referenced, and should be freed;
937 * however, it might mean that the page is under page_ref_freeze().
938 * The __remove_mapping() case is easy, again the node is now stale;
939 * the same is in reuse_ksm_page() case; but if page is swapcache
940 * in folio_migrate_mapping(), it might still be our page,
941 * in which case it's essential to keep the node.
942 */
943 while (!get_page_unless_zero(page)) {
944 /*
945 * Another check for page->mapping != expected_mapping would
946 * work here too. We have chosen the !PageSwapCache test to
947 * optimize the common case, when the page is or is about to
948 * be freed: PageSwapCache is cleared (under spin_lock_irq)
949 * in the ref_freeze section of __remove_mapping(); but Anon
950 * page->mapping reset to NULL later, in free_pages_prepare().
951 */
952 if (!PageSwapCache(page))
953 goto stale;
954 cpu_relax();
955 }
956
957 if (READ_ONCE(page->mapping) != expected_mapping) {
958 put_page(page);
959 goto stale;
960 }
961
962 if (flags == GET_KSM_PAGE_TRYLOCK) {
963 if (!trylock_page(page)) {
964 put_page(page);
965 return ERR_PTR(-EBUSY);
966 }
967 } else if (flags == GET_KSM_PAGE_LOCK)
968 lock_page(page);
969
970 if (flags != GET_KSM_PAGE_NOLOCK) {
971 if (READ_ONCE(page->mapping) != expected_mapping) {
972 unlock_page(page);
973 put_page(page);
974 goto stale;
975 }
976 }
977 return page;
978
979stale:
980 /*
981 * We come here from above when page->mapping or !PageSwapCache
982 * suggests that the node is stale; but it might be under migration.
983 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
984 * before checking whether node->kpfn has been changed.
985 */
986 smp_rmb();
987 if (READ_ONCE(stable_node->kpfn) != kpfn)
988 goto again;
989 remove_node_from_stable_tree(stable_node);
990 return NULL;
991}
992
993/*
994 * Removing rmap_item from stable or unstable tree.
995 * This function will clean the information from the stable/unstable tree.
996 */
997static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
998{
999 if (rmap_item->address & STABLE_FLAG) {
1000 struct ksm_stable_node *stable_node;
1001 struct page *page;
1002
1003 stable_node = rmap_item->head;
1004 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
1005 if (!page)
1006 goto out;
1007
1008 hlist_del(&rmap_item->hlist);
1009 unlock_page(page);
1010 put_page(page);
1011
1012 if (!hlist_empty(&stable_node->hlist))
1013 ksm_pages_sharing--;
1014 else
1015 ksm_pages_shared--;
1016
1017 rmap_item->mm->ksm_merging_pages--;
1018
1019 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
1020 stable_node->rmap_hlist_len--;
1021
1022 put_anon_vma(rmap_item->anon_vma);
1023 rmap_item->head = NULL;
1024 rmap_item->address &= PAGE_MASK;
1025
1026 } else if (rmap_item->address & UNSTABLE_FLAG) {
1027 unsigned char age;
1028 /*
1029 * Usually ksmd can and must skip the rb_erase, because
1030 * root_unstable_tree was already reset to RB_ROOT.
1031 * But be careful when an mm is exiting: do the rb_erase
1032 * if this rmap_item was inserted by this scan, rather
1033 * than left over from before.
1034 */
1035 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
1036 BUG_ON(age > 1);
1037 if (!age)
1038 rb_erase(&rmap_item->node,
1039 root_unstable_tree + NUMA(rmap_item->nid));
1040 ksm_pages_unshared--;
1041 rmap_item->address &= PAGE_MASK;
1042 }
1043out:
1044 cond_resched(); /* we're called from many long loops */
1045}
1046
1047static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
1048{
1049 while (*rmap_list) {
1050 struct ksm_rmap_item *rmap_item = *rmap_list;
1051 *rmap_list = rmap_item->rmap_list;
1052 remove_rmap_item_from_tree(rmap_item);
1053 free_rmap_item(rmap_item);
1054 }
1055}
1056
1057/*
1058 * Though it's very tempting to unmerge rmap_items from stable tree rather
1059 * than check every pte of a given vma, the locking doesn't quite work for
1060 * that - an rmap_item is assigned to the stable tree after inserting ksm
1061 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
1062 * rmap_items from parent to child at fork time (so as not to waste time
1063 * if exit comes before the next scan reaches it).
1064 *
1065 * Similarly, although we'd like to remove rmap_items (so updating counts
1066 * and freeing memory) when unmerging an area, it's easier to leave that
1067 * to the next pass of ksmd - consider, for example, how ksmd might be
1068 * in cmp_and_merge_page on one of the rmap_items we would be removing.
1069 */
1070static int unmerge_ksm_pages(struct vm_area_struct *vma,
1071 unsigned long start, unsigned long end, bool lock_vma)
1072{
1073 unsigned long addr;
1074 int err = 0;
1075
1076 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
1077 if (ksm_test_exit(vma->vm_mm))
1078 break;
1079 if (signal_pending(current))
1080 err = -ERESTARTSYS;
1081 else
1082 err = break_ksm(vma, addr, lock_vma);
1083 }
1084 return err;
1085}
1086
1087static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
1088{
1089 return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
1090}
1091
1092static inline struct ksm_stable_node *page_stable_node(struct page *page)
1093{
1094 return folio_stable_node(page_folio(page));
1095}
1096
1097static inline void set_page_stable_node(struct page *page,
1098 struct ksm_stable_node *stable_node)
1099{
1100 VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page);
1101 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
1102}
1103
1104#ifdef CONFIG_SYSFS
1105/*
1106 * Only called through the sysfs control interface:
1107 */
1108static int remove_stable_node(struct ksm_stable_node *stable_node)
1109{
1110 struct page *page;
1111 int err;
1112
1113 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
1114 if (!page) {
1115 /*
1116 * get_ksm_page did remove_node_from_stable_tree itself.
1117 */
1118 return 0;
1119 }
1120
1121 /*
1122 * Page could be still mapped if this races with __mmput() running in
1123 * between ksm_exit() and exit_mmap(). Just refuse to let
1124 * merge_across_nodes/max_page_sharing be switched.
1125 */
1126 err = -EBUSY;
1127 if (!page_mapped(page)) {
1128 /*
1129 * The stable node did not yet appear stale to get_ksm_page(),
1130 * since that allows for an unmapped ksm page to be recognized
1131 * right up until it is freed; but the node is safe to remove.
1132 * This page might be in an LRU cache waiting to be freed,
1133 * or it might be PageSwapCache (perhaps under writeback),
1134 * or it might have been removed from swapcache a moment ago.
1135 */
1136 set_page_stable_node(page, NULL);
1137 remove_node_from_stable_tree(stable_node);
1138 err = 0;
1139 }
1140
1141 unlock_page(page);
1142 put_page(page);
1143 return err;
1144}
1145
1146static int remove_stable_node_chain(struct ksm_stable_node *stable_node,
1147 struct rb_root *root)
1148{
1149 struct ksm_stable_node *dup;
1150 struct hlist_node *hlist_safe;
1151
1152 if (!is_stable_node_chain(stable_node)) {
1153 VM_BUG_ON(is_stable_node_dup(stable_node));
1154 if (remove_stable_node(stable_node))
1155 return true;
1156 else
1157 return false;
1158 }
1159
1160 hlist_for_each_entry_safe(dup, hlist_safe,
1161 &stable_node->hlist, hlist_dup) {
1162 VM_BUG_ON(!is_stable_node_dup(dup));
1163 if (remove_stable_node(dup))
1164 return true;
1165 }
1166 BUG_ON(!hlist_empty(&stable_node->hlist));
1167 free_stable_node_chain(stable_node, root);
1168 return false;
1169}
1170
1171static int remove_all_stable_nodes(void)
1172{
1173 struct ksm_stable_node *stable_node, *next;
1174 int nid;
1175 int err = 0;
1176
1177 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
1178 while (root_stable_tree[nid].rb_node) {
1179 stable_node = rb_entry(root_stable_tree[nid].rb_node,
1180 struct ksm_stable_node, node);
1181 if (remove_stable_node_chain(stable_node,
1182 root_stable_tree + nid)) {
1183 err = -EBUSY;
1184 break; /* proceed to next nid */
1185 }
1186 cond_resched();
1187 }
1188 }
1189 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
1190 if (remove_stable_node(stable_node))
1191 err = -EBUSY;
1192 cond_resched();
1193 }
1194 return err;
1195}
1196
1197static int unmerge_and_remove_all_rmap_items(void)
1198{
1199 struct ksm_mm_slot *mm_slot;
1200 struct mm_slot *slot;
1201 struct mm_struct *mm;
1202 struct vm_area_struct *vma;
1203 int err = 0;
1204
1205 spin_lock(&ksm_mmlist_lock);
1206 slot = list_entry(ksm_mm_head.slot.mm_node.next,
1207 struct mm_slot, mm_node);
1208 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
1209 spin_unlock(&ksm_mmlist_lock);
1210
1211 for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
1212 mm_slot = ksm_scan.mm_slot) {
1213 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
1214
1215 mm = mm_slot->slot.mm;
1216 mmap_read_lock(mm);
1217
1218 /*
1219 * Exit right away if mm is exiting to avoid lockdep issue in
1220 * the maple tree
1221 */
1222 if (ksm_test_exit(mm))
1223 goto mm_exiting;
1224
1225 for_each_vma(vmi, vma) {
1226 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
1227 continue;
1228 err = unmerge_ksm_pages(vma,
1229 vma->vm_start, vma->vm_end, false);
1230 if (err)
1231 goto error;
1232 }
1233
1234mm_exiting:
1235 remove_trailing_rmap_items(&mm_slot->rmap_list);
1236 mmap_read_unlock(mm);
1237
1238 spin_lock(&ksm_mmlist_lock);
1239 slot = list_entry(mm_slot->slot.mm_node.next,
1240 struct mm_slot, mm_node);
1241 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
1242 if (ksm_test_exit(mm)) {
1243 hash_del(&mm_slot->slot.hash);
1244 list_del(&mm_slot->slot.mm_node);
1245 spin_unlock(&ksm_mmlist_lock);
1246
1247 mm_slot_free(mm_slot_cache, mm_slot);
1248 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1249 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
1250 mmdrop(mm);
1251 } else
1252 spin_unlock(&ksm_mmlist_lock);
1253 }
1254
1255 /* Clean up stable nodes, but don't worry if some are still busy */
1256 remove_all_stable_nodes();
1257 ksm_scan.seqnr = 0;
1258 return 0;
1259
1260error:
1261 mmap_read_unlock(mm);
1262 spin_lock(&ksm_mmlist_lock);
1263 ksm_scan.mm_slot = &ksm_mm_head;
1264 spin_unlock(&ksm_mmlist_lock);
1265 return err;
1266}
1267#endif /* CONFIG_SYSFS */
1268
1269static u32 calc_checksum(struct page *page)
1270{
1271 u32 checksum;
1272 void *addr = kmap_local_page(page);
1273 checksum = xxhash(addr, PAGE_SIZE, 0);
1274 kunmap_local(addr);
1275 return checksum;
1276}
1277
1278static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1279 pte_t *orig_pte)
1280{
1281 struct mm_struct *mm = vma->vm_mm;
1282 DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0);
1283 int swapped;
1284 int err = -EFAULT;
1285 struct mmu_notifier_range range;
1286 bool anon_exclusive;
1287 pte_t entry;
1288
1289 pvmw.address = page_address_in_vma(page, vma);
1290 if (pvmw.address == -EFAULT)
1291 goto out;
1292
1293 BUG_ON(PageTransCompound(page));
1294
1295 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
1296 pvmw.address + PAGE_SIZE);
1297 mmu_notifier_invalidate_range_start(&range);
1298
1299 if (!page_vma_mapped_walk(&pvmw))
1300 goto out_mn;
1301 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1302 goto out_unlock;
1303
1304 anon_exclusive = PageAnonExclusive(page);
1305 entry = ptep_get(pvmw.pte);
1306 if (pte_write(entry) || pte_dirty(entry) ||
1307 anon_exclusive || mm_tlb_flush_pending(mm)) {
1308 swapped = PageSwapCache(page);
1309 flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1310 /*
1311 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1312 * take any lock, therefore the check that we are going to make
1313 * with the pagecount against the mapcount is racy and
1314 * O_DIRECT can happen right after the check.
1315 * So we clear the pte and flush the tlb before the check
1316 * this assure us that no O_DIRECT can happen after the check
1317 * or in the middle of the check.
1318 *
1319 * No need to notify as we are downgrading page table to read
1320 * only not changing it to point to a new page.
1321 *
1322 * See Documentation/mm/mmu_notifier.rst
1323 */
1324 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1325 /*
1326 * Check that no O_DIRECT or similar I/O is in progress on the
1327 * page
1328 */
1329 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1330 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1331 goto out_unlock;
1332 }
1333
1334 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
1335 if (anon_exclusive &&
1336 folio_try_share_anon_rmap_pte(page_folio(page), page)) {
1337 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1338 goto out_unlock;
1339 }
1340
1341 if (pte_dirty(entry))
1342 set_page_dirty(page);
1343 entry = pte_mkclean(entry);
1344
1345 if (pte_write(entry))
1346 entry = pte_wrprotect(entry);
1347
1348 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
1349 }
1350 *orig_pte = entry;
1351 err = 0;
1352
1353out_unlock:
1354 page_vma_mapped_walk_done(&pvmw);
1355out_mn:
1356 mmu_notifier_invalidate_range_end(&range);
1357out:
1358 return err;
1359}
1360
1361/**
1362 * replace_page - replace page in vma by new ksm page
1363 * @vma: vma that holds the pte pointing to page
1364 * @page: the page we are replacing by kpage
1365 * @kpage: the ksm page we replace page by
1366 * @orig_pte: the original value of the pte
1367 *
1368 * Returns 0 on success, -EFAULT on failure.
1369 */
1370static int replace_page(struct vm_area_struct *vma, struct page *page,
1371 struct page *kpage, pte_t orig_pte)
1372{
1373 struct folio *kfolio = page_folio(kpage);
1374 struct mm_struct *mm = vma->vm_mm;
1375 struct folio *folio;
1376 pmd_t *pmd;
1377 pmd_t pmde;
1378 pte_t *ptep;
1379 pte_t newpte;
1380 spinlock_t *ptl;
1381 unsigned long addr;
1382 int err = -EFAULT;
1383 struct mmu_notifier_range range;
1384
1385 addr = page_address_in_vma(page, vma);
1386 if (addr == -EFAULT)
1387 goto out;
1388
1389 pmd = mm_find_pmd(mm, addr);
1390 if (!pmd)
1391 goto out;
1392 /*
1393 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
1394 * without holding anon_vma lock for write. So when looking for a
1395 * genuine pmde (in which to find pte), test present and !THP together.
1396 */
1397 pmde = pmdp_get_lockless(pmd);
1398 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
1399 goto out;
1400
1401 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1402 addr + PAGE_SIZE);
1403 mmu_notifier_invalidate_range_start(&range);
1404
1405 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1406 if (!ptep)
1407 goto out_mn;
1408 if (!pte_same(ptep_get(ptep), orig_pte)) {
1409 pte_unmap_unlock(ptep, ptl);
1410 goto out_mn;
1411 }
1412 VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
1413 VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
1414 kfolio);
1415
1416 /*
1417 * No need to check ksm_use_zero_pages here: we can only have a
1418 * zero_page here if ksm_use_zero_pages was enabled already.
1419 */
1420 if (!is_zero_pfn(page_to_pfn(kpage))) {
1421 folio_get(kfolio);
1422 folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
1423 newpte = mk_pte(kpage, vma->vm_page_prot);
1424 } else {
1425 /*
1426 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1427 * we can easily track all KSM-placed zero pages by checking if
1428 * the dirty bit in zero page's PTE is set.
1429 */
1430 newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
1431 ksm_zero_pages++;
1432 mm->ksm_zero_pages++;
1433 /*
1434 * We're replacing an anonymous page with a zero page, which is
1435 * not anonymous. We need to do proper accounting otherwise we
1436 * will get wrong values in /proc, and a BUG message in dmesg
1437 * when tearing down the mm.
1438 */
1439 dec_mm_counter(mm, MM_ANONPAGES);
1440 }
1441
1442 flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep)));
1443 /*
1444 * No need to notify as we are replacing a read only page with another
1445 * read only page with the same content.
1446 *
1447 * See Documentation/mm/mmu_notifier.rst
1448 */
1449 ptep_clear_flush(vma, addr, ptep);
1450 set_pte_at_notify(mm, addr, ptep, newpte);
1451
1452 folio = page_folio(page);
1453 folio_remove_rmap_pte(folio, page, vma);
1454 if (!folio_mapped(folio))
1455 folio_free_swap(folio);
1456 folio_put(folio);
1457
1458 pte_unmap_unlock(ptep, ptl);
1459 err = 0;
1460out_mn:
1461 mmu_notifier_invalidate_range_end(&range);
1462out:
1463 return err;
1464}
1465
1466/*
1467 * try_to_merge_one_page - take two pages and merge them into one
1468 * @vma: the vma that holds the pte pointing to page
1469 * @page: the PageAnon page that we want to replace with kpage
1470 * @kpage: the PageKsm page that we want to map instead of page,
1471 * or NULL the first time when we want to use page as kpage.
1472 *
1473 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1474 */
1475static int try_to_merge_one_page(struct vm_area_struct *vma,
1476 struct page *page, struct page *kpage)
1477{
1478 pte_t orig_pte = __pte(0);
1479 int err = -EFAULT;
1480
1481 if (page == kpage) /* ksm page forked */
1482 return 0;
1483
1484 if (!PageAnon(page))
1485 goto out;
1486
1487 /*
1488 * We need the page lock to read a stable PageSwapCache in
1489 * write_protect_page(). We use trylock_page() instead of
1490 * lock_page() because we don't want to wait here - we
1491 * prefer to continue scanning and merging different pages,
1492 * then come back to this page when it is unlocked.
1493 */
1494 if (!trylock_page(page))
1495 goto out;
1496
1497 if (PageTransCompound(page)) {
1498 if (split_huge_page(page))
1499 goto out_unlock;
1500 }
1501
1502 /*
1503 * If this anonymous page is mapped only here, its pte may need
1504 * to be write-protected. If it's mapped elsewhere, all of its
1505 * ptes are necessarily already write-protected. But in either
1506 * case, we need to lock and check page_count is not raised.
1507 */
1508 if (write_protect_page(vma, page, &orig_pte) == 0) {
1509 if (!kpage) {
1510 /*
1511 * While we hold page lock, upgrade page from
1512 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1513 * stable_tree_insert() will update stable_node.
1514 */
1515 set_page_stable_node(page, NULL);
1516 mark_page_accessed(page);
1517 /*
1518 * Page reclaim just frees a clean page with no dirty
1519 * ptes: make sure that the ksm page would be swapped.
1520 */
1521 if (!PageDirty(page))
1522 SetPageDirty(page);
1523 err = 0;
1524 } else if (pages_identical(page, kpage))
1525 err = replace_page(vma, page, kpage, orig_pte);
1526 }
1527
1528out_unlock:
1529 unlock_page(page);
1530out:
1531 return err;
1532}
1533
1534/*
1535 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1536 * but no new kernel page is allocated: kpage must already be a ksm page.
1537 *
1538 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1539 */
1540static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
1541 struct page *page, struct page *kpage)
1542{
1543 struct mm_struct *mm = rmap_item->mm;
1544 struct vm_area_struct *vma;
1545 int err = -EFAULT;
1546
1547 mmap_read_lock(mm);
1548 vma = find_mergeable_vma(mm, rmap_item->address);
1549 if (!vma)
1550 goto out;
1551
1552 err = try_to_merge_one_page(vma, page, kpage);
1553 if (err)
1554 goto out;
1555
1556 /* Unstable nid is in union with stable anon_vma: remove first */
1557 remove_rmap_item_from_tree(rmap_item);
1558
1559 /* Must get reference to anon_vma while still holding mmap_lock */
1560 rmap_item->anon_vma = vma->anon_vma;
1561 get_anon_vma(vma->anon_vma);
1562out:
1563 mmap_read_unlock(mm);
1564 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
1565 rmap_item, mm, err);
1566 return err;
1567}
1568
1569/*
1570 * try_to_merge_two_pages - take two identical pages and prepare them
1571 * to be merged into one page.
1572 *
1573 * This function returns the kpage if we successfully merged two identical
1574 * pages into one ksm page, NULL otherwise.
1575 *
1576 * Note that this function upgrades page to ksm page: if one of the pages
1577 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1578 */
1579static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
1580 struct page *page,
1581 struct ksm_rmap_item *tree_rmap_item,
1582 struct page *tree_page)
1583{
1584 int err;
1585
1586 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1587 if (!err) {
1588 err = try_to_merge_with_ksm_page(tree_rmap_item,
1589 tree_page, page);
1590 /*
1591 * If that fails, we have a ksm page with only one pte
1592 * pointing to it: so break it.
1593 */
1594 if (err)
1595 break_cow(rmap_item);
1596 }
1597 return err ? NULL : page;
1598}
1599
1600static __always_inline
1601bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset)
1602{
1603 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1604 /*
1605 * Check that at least one mapping still exists, otherwise
1606 * there's no much point to merge and share with this
1607 * stable_node, as the underlying tree_page of the other
1608 * sharer is going to be freed soon.
1609 */
1610 return stable_node->rmap_hlist_len &&
1611 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1612}
1613
1614static __always_inline
1615bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
1616{
1617 return __is_page_sharing_candidate(stable_node, 0);
1618}
1619
1620static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1621 struct ksm_stable_node **_stable_node,
1622 struct rb_root *root,
1623 bool prune_stale_stable_nodes)
1624{
1625 struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1626 struct hlist_node *hlist_safe;
1627 struct page *_tree_page, *tree_page = NULL;
1628 int nr = 0;
1629 int found_rmap_hlist_len;
1630
1631 if (!prune_stale_stable_nodes ||
1632 time_before(jiffies, stable_node->chain_prune_time +
1633 msecs_to_jiffies(
1634 ksm_stable_node_chains_prune_millisecs)))
1635 prune_stale_stable_nodes = false;
1636 else
1637 stable_node->chain_prune_time = jiffies;
1638
1639 hlist_for_each_entry_safe(dup, hlist_safe,
1640 &stable_node->hlist, hlist_dup) {
1641 cond_resched();
1642 /*
1643 * We must walk all stable_node_dup to prune the stale
1644 * stable nodes during lookup.
1645 *
1646 * get_ksm_page can drop the nodes from the
1647 * stable_node->hlist if they point to freed pages
1648 * (that's why we do a _safe walk). The "dup"
1649 * stable_node parameter itself will be freed from
1650 * under us if it returns NULL.
1651 */
1652 _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
1653 if (!_tree_page)
1654 continue;
1655 nr += 1;
1656 if (is_page_sharing_candidate(dup)) {
1657 if (!found ||
1658 dup->rmap_hlist_len > found_rmap_hlist_len) {
1659 if (found)
1660 put_page(tree_page);
1661 found = dup;
1662 found_rmap_hlist_len = found->rmap_hlist_len;
1663 tree_page = _tree_page;
1664
1665 /* skip put_page for found dup */
1666 if (!prune_stale_stable_nodes)
1667 break;
1668 continue;
1669 }
1670 }
1671 put_page(_tree_page);
1672 }
1673
1674 if (found) {
1675 /*
1676 * nr is counting all dups in the chain only if
1677 * prune_stale_stable_nodes is true, otherwise we may
1678 * break the loop at nr == 1 even if there are
1679 * multiple entries.
1680 */
1681 if (prune_stale_stable_nodes && nr == 1) {
1682 /*
1683 * If there's not just one entry it would
1684 * corrupt memory, better BUG_ON. In KSM
1685 * context with no lock held it's not even
1686 * fatal.
1687 */
1688 BUG_ON(stable_node->hlist.first->next);
1689
1690 /*
1691 * There's just one entry and it is below the
1692 * deduplication limit so drop the chain.
1693 */
1694 rb_replace_node(&stable_node->node, &found->node,
1695 root);
1696 free_stable_node(stable_node);
1697 ksm_stable_node_chains--;
1698 ksm_stable_node_dups--;
1699 /*
1700 * NOTE: the caller depends on the stable_node
1701 * to be equal to stable_node_dup if the chain
1702 * was collapsed.
1703 */
1704 *_stable_node = found;
1705 /*
1706 * Just for robustness, as stable_node is
1707 * otherwise left as a stable pointer, the
1708 * compiler shall optimize it away at build
1709 * time.
1710 */
1711 stable_node = NULL;
1712 } else if (stable_node->hlist.first != &found->hlist_dup &&
1713 __is_page_sharing_candidate(found, 1)) {
1714 /*
1715 * If the found stable_node dup can accept one
1716 * more future merge (in addition to the one
1717 * that is underway) and is not at the head of
1718 * the chain, put it there so next search will
1719 * be quicker in the !prune_stale_stable_nodes
1720 * case.
1721 *
1722 * NOTE: it would be inaccurate to use nr > 1
1723 * instead of checking the hlist.first pointer
1724 * directly, because in the
1725 * prune_stale_stable_nodes case "nr" isn't
1726 * the position of the found dup in the chain,
1727 * but the total number of dups in the chain.
1728 */
1729 hlist_del(&found->hlist_dup);
1730 hlist_add_head(&found->hlist_dup,
1731 &stable_node->hlist);
1732 }
1733 }
1734
1735 *_stable_node_dup = found;
1736 return tree_page;
1737}
1738
1739static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,
1740 struct rb_root *root)
1741{
1742 if (!is_stable_node_chain(stable_node))
1743 return stable_node;
1744 if (hlist_empty(&stable_node->hlist)) {
1745 free_stable_node_chain(stable_node, root);
1746 return NULL;
1747 }
1748 return hlist_entry(stable_node->hlist.first,
1749 typeof(*stable_node), hlist_dup);
1750}
1751
1752/*
1753 * Like for get_ksm_page, this function can free the *_stable_node and
1754 * *_stable_node_dup if the returned tree_page is NULL.
1755 *
1756 * It can also free and overwrite *_stable_node with the found
1757 * stable_node_dup if the chain is collapsed (in which case
1758 * *_stable_node will be equal to *_stable_node_dup like if the chain
1759 * never existed). It's up to the caller to verify tree_page is not
1760 * NULL before dereferencing *_stable_node or *_stable_node_dup.
1761 *
1762 * *_stable_node_dup is really a second output parameter of this
1763 * function and will be overwritten in all cases, the caller doesn't
1764 * need to initialize it.
1765 */
1766static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
1767 struct ksm_stable_node **_stable_node,
1768 struct rb_root *root,
1769 bool prune_stale_stable_nodes)
1770{
1771 struct ksm_stable_node *stable_node = *_stable_node;
1772 if (!is_stable_node_chain(stable_node)) {
1773 if (is_page_sharing_candidate(stable_node)) {
1774 *_stable_node_dup = stable_node;
1775 return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
1776 }
1777 /*
1778 * _stable_node_dup set to NULL means the stable_node
1779 * reached the ksm_max_page_sharing limit.
1780 */
1781 *_stable_node_dup = NULL;
1782 return NULL;
1783 }
1784 return stable_node_dup(_stable_node_dup, _stable_node, root,
1785 prune_stale_stable_nodes);
1786}
1787
1788static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d,
1789 struct ksm_stable_node **s_n,
1790 struct rb_root *root)
1791{
1792 return __stable_node_chain(s_n_d, s_n, root, true);
1793}
1794
1795static __always_inline struct page *chain(struct ksm_stable_node **s_n_d,
1796 struct ksm_stable_node *s_n,
1797 struct rb_root *root)
1798{
1799 struct ksm_stable_node *old_stable_node = s_n;
1800 struct page *tree_page;
1801
1802 tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
1803 /* not pruning dups so s_n cannot have changed */
1804 VM_BUG_ON(s_n != old_stable_node);
1805 return tree_page;
1806}
1807
1808/*
1809 * stable_tree_search - search for page inside the stable tree
1810 *
1811 * This function checks if there is a page inside the stable tree
1812 * with identical content to the page that we are scanning right now.
1813 *
1814 * This function returns the stable tree node of identical content if found,
1815 * NULL otherwise.
1816 */
1817static struct page *stable_tree_search(struct page *page)
1818{
1819 int nid;
1820 struct rb_root *root;
1821 struct rb_node **new;
1822 struct rb_node *parent;
1823 struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
1824 struct ksm_stable_node *page_node;
1825
1826 page_node = page_stable_node(page);
1827 if (page_node && page_node->head != &migrate_nodes) {
1828 /* ksm page forked */
1829 get_page(page);
1830 return page;
1831 }
1832
1833 nid = get_kpfn_nid(page_to_pfn(page));
1834 root = root_stable_tree + nid;
1835again:
1836 new = &root->rb_node;
1837 parent = NULL;
1838
1839 while (*new) {
1840 struct page *tree_page;
1841 int ret;
1842
1843 cond_resched();
1844 stable_node = rb_entry(*new, struct ksm_stable_node, node);
1845 stable_node_any = NULL;
1846 tree_page = chain_prune(&stable_node_dup, &stable_node, root);
1847 /*
1848 * NOTE: stable_node may have been freed by
1849 * chain_prune() if the returned stable_node_dup is
1850 * not NULL. stable_node_dup may have been inserted in
1851 * the rbtree instead as a regular stable_node (in
1852 * order to collapse the stable_node chain if a single
1853 * stable_node dup was found in it). In such case the
1854 * stable_node is overwritten by the callee to point
1855 * to the stable_node_dup that was collapsed in the
1856 * stable rbtree and stable_node will be equal to
1857 * stable_node_dup like if the chain never existed.
1858 */
1859 if (!stable_node_dup) {
1860 /*
1861 * Either all stable_node dups were full in
1862 * this stable_node chain, or this chain was
1863 * empty and should be rb_erased.
1864 */
1865 stable_node_any = stable_node_dup_any(stable_node,
1866 root);
1867 if (!stable_node_any) {
1868 /* rb_erase just run */
1869 goto again;
1870 }
1871 /*
1872 * Take any of the stable_node dups page of
1873 * this stable_node chain to let the tree walk
1874 * continue. All KSM pages belonging to the
1875 * stable_node dups in a stable_node chain
1876 * have the same content and they're
1877 * write protected at all times. Any will work
1878 * fine to continue the walk.
1879 */
1880 tree_page = get_ksm_page(stable_node_any,
1881 GET_KSM_PAGE_NOLOCK);
1882 }
1883 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1884 if (!tree_page) {
1885 /*
1886 * If we walked over a stale stable_node,
1887 * get_ksm_page() will call rb_erase() and it
1888 * may rebalance the tree from under us. So
1889 * restart the search from scratch. Returning
1890 * NULL would be safe too, but we'd generate
1891 * false negative insertions just because some
1892 * stable_node was stale.
1893 */
1894 goto again;
1895 }
1896
1897 ret = memcmp_pages(page, tree_page);
1898 put_page(tree_page);
1899
1900 parent = *new;
1901 if (ret < 0)
1902 new = &parent->rb_left;
1903 else if (ret > 0)
1904 new = &parent->rb_right;
1905 else {
1906 if (page_node) {
1907 VM_BUG_ON(page_node->head != &migrate_nodes);
1908 /*
1909 * Test if the migrated page should be merged
1910 * into a stable node dup. If the mapcount is
1911 * 1 we can migrate it with another KSM page
1912 * without adding it to the chain.
1913 */
1914 if (page_mapcount(page) > 1)
1915 goto chain_append;
1916 }
1917
1918 if (!stable_node_dup) {
1919 /*
1920 * If the stable_node is a chain and
1921 * we got a payload match in memcmp
1922 * but we cannot merge the scanned
1923 * page in any of the existing
1924 * stable_node dups because they're
1925 * all full, we need to wait the
1926 * scanned page to find itself a match
1927 * in the unstable tree to create a
1928 * brand new KSM page to add later to
1929 * the dups of this stable_node.
1930 */
1931 return NULL;
1932 }
1933
1934 /*
1935 * Lock and unlock the stable_node's page (which
1936 * might already have been migrated) so that page
1937 * migration is sure to notice its raised count.
1938 * It would be more elegant to return stable_node
1939 * than kpage, but that involves more changes.
1940 */
1941 tree_page = get_ksm_page(stable_node_dup,
1942 GET_KSM_PAGE_TRYLOCK);
1943
1944 if (PTR_ERR(tree_page) == -EBUSY)
1945 return ERR_PTR(-EBUSY);
1946
1947 if (unlikely(!tree_page))
1948 /*
1949 * The tree may have been rebalanced,
1950 * so re-evaluate parent and new.
1951 */
1952 goto again;
1953 unlock_page(tree_page);
1954
1955 if (get_kpfn_nid(stable_node_dup->kpfn) !=
1956 NUMA(stable_node_dup->nid)) {
1957 put_page(tree_page);
1958 goto replace;
1959 }
1960 return tree_page;
1961 }
1962 }
1963
1964 if (!page_node)
1965 return NULL;
1966
1967 list_del(&page_node->list);
1968 DO_NUMA(page_node->nid = nid);
1969 rb_link_node(&page_node->node, parent, new);
1970 rb_insert_color(&page_node->node, root);
1971out:
1972 if (is_page_sharing_candidate(page_node)) {
1973 get_page(page);
1974 return page;
1975 } else
1976 return NULL;
1977
1978replace:
1979 /*
1980 * If stable_node was a chain and chain_prune collapsed it,
1981 * stable_node has been updated to be the new regular
1982 * stable_node. A collapse of the chain is indistinguishable
1983 * from the case there was no chain in the stable
1984 * rbtree. Otherwise stable_node is the chain and
1985 * stable_node_dup is the dup to replace.
1986 */
1987 if (stable_node_dup == stable_node) {
1988 VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1989 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1990 /* there is no chain */
1991 if (page_node) {
1992 VM_BUG_ON(page_node->head != &migrate_nodes);
1993 list_del(&page_node->list);
1994 DO_NUMA(page_node->nid = nid);
1995 rb_replace_node(&stable_node_dup->node,
1996 &page_node->node,
1997 root);
1998 if (is_page_sharing_candidate(page_node))
1999 get_page(page);
2000 else
2001 page = NULL;
2002 } else {
2003 rb_erase(&stable_node_dup->node, root);
2004 page = NULL;
2005 }
2006 } else {
2007 VM_BUG_ON(!is_stable_node_chain(stable_node));
2008 __stable_node_dup_del(stable_node_dup);
2009 if (page_node) {
2010 VM_BUG_ON(page_node->head != &migrate_nodes);
2011 list_del(&page_node->list);
2012 DO_NUMA(page_node->nid = nid);
2013 stable_node_chain_add_dup(page_node, stable_node);
2014 if (is_page_sharing_candidate(page_node))
2015 get_page(page);
2016 else
2017 page = NULL;
2018 } else {
2019 page = NULL;
2020 }
2021 }
2022 stable_node_dup->head = &migrate_nodes;
2023 list_add(&stable_node_dup->list, stable_node_dup->head);
2024 return page;
2025
2026chain_append:
2027 /* stable_node_dup could be null if it reached the limit */
2028 if (!stable_node_dup)
2029 stable_node_dup = stable_node_any;
2030 /*
2031 * If stable_node was a chain and chain_prune collapsed it,
2032 * stable_node has been updated to be the new regular
2033 * stable_node. A collapse of the chain is indistinguishable
2034 * from the case there was no chain in the stable
2035 * rbtree. Otherwise stable_node is the chain and
2036 * stable_node_dup is the dup to replace.
2037 */
2038 if (stable_node_dup == stable_node) {
2039 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
2040 /* chain is missing so create it */
2041 stable_node = alloc_stable_node_chain(stable_node_dup,
2042 root);
2043 if (!stable_node)
2044 return NULL;
2045 }
2046 /*
2047 * Add this stable_node dup that was
2048 * migrated to the stable_node chain
2049 * of the current nid for this page
2050 * content.
2051 */
2052 VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
2053 VM_BUG_ON(page_node->head != &migrate_nodes);
2054 list_del(&page_node->list);
2055 DO_NUMA(page_node->nid = nid);
2056 stable_node_chain_add_dup(page_node, stable_node);
2057 goto out;
2058}
2059
2060/*
2061 * stable_tree_insert - insert stable tree node pointing to new ksm page
2062 * into the stable tree.
2063 *
2064 * This function returns the stable tree node just allocated on success,
2065 * NULL otherwise.
2066 */
2067static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
2068{
2069 int nid;
2070 unsigned long kpfn;
2071 struct rb_root *root;
2072 struct rb_node **new;
2073 struct rb_node *parent;
2074 struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
2075 bool need_chain = false;
2076
2077 kpfn = page_to_pfn(kpage);
2078 nid = get_kpfn_nid(kpfn);
2079 root = root_stable_tree + nid;
2080again:
2081 parent = NULL;
2082 new = &root->rb_node;
2083
2084 while (*new) {
2085 struct page *tree_page;
2086 int ret;
2087
2088 cond_resched();
2089 stable_node = rb_entry(*new, struct ksm_stable_node, node);
2090 stable_node_any = NULL;
2091 tree_page = chain(&stable_node_dup, stable_node, root);
2092 if (!stable_node_dup) {
2093 /*
2094 * Either all stable_node dups were full in
2095 * this stable_node chain, or this chain was
2096 * empty and should be rb_erased.
2097 */
2098 stable_node_any = stable_node_dup_any(stable_node,
2099 root);
2100 if (!stable_node_any) {
2101 /* rb_erase just run */
2102 goto again;
2103 }
2104 /*
2105 * Take any of the stable_node dups page of
2106 * this stable_node chain to let the tree walk
2107 * continue. All KSM pages belonging to the
2108 * stable_node dups in a stable_node chain
2109 * have the same content and they're
2110 * write protected at all times. Any will work
2111 * fine to continue the walk.
2112 */
2113 tree_page = get_ksm_page(stable_node_any,
2114 GET_KSM_PAGE_NOLOCK);
2115 }
2116 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
2117 if (!tree_page) {
2118 /*
2119 * If we walked over a stale stable_node,
2120 * get_ksm_page() will call rb_erase() and it
2121 * may rebalance the tree from under us. So
2122 * restart the search from scratch. Returning
2123 * NULL would be safe too, but we'd generate
2124 * false negative insertions just because some
2125 * stable_node was stale.
2126 */
2127 goto again;
2128 }
2129
2130 ret = memcmp_pages(kpage, tree_page);
2131 put_page(tree_page);
2132
2133 parent = *new;
2134 if (ret < 0)
2135 new = &parent->rb_left;
2136 else if (ret > 0)
2137 new = &parent->rb_right;
2138 else {
2139 need_chain = true;
2140 break;
2141 }
2142 }
2143
2144 stable_node_dup = alloc_stable_node();
2145 if (!stable_node_dup)
2146 return NULL;
2147
2148 INIT_HLIST_HEAD(&stable_node_dup->hlist);
2149 stable_node_dup->kpfn = kpfn;
2150 set_page_stable_node(kpage, stable_node_dup);
2151 stable_node_dup->rmap_hlist_len = 0;
2152 DO_NUMA(stable_node_dup->nid = nid);
2153 if (!need_chain) {
2154 rb_link_node(&stable_node_dup->node, parent, new);
2155 rb_insert_color(&stable_node_dup->node, root);
2156 } else {
2157 if (!is_stable_node_chain(stable_node)) {
2158 struct ksm_stable_node *orig = stable_node;
2159 /* chain is missing so create it */
2160 stable_node = alloc_stable_node_chain(orig, root);
2161 if (!stable_node) {
2162 free_stable_node(stable_node_dup);
2163 return NULL;
2164 }
2165 }
2166 stable_node_chain_add_dup(stable_node_dup, stable_node);
2167 }
2168
2169 return stable_node_dup;
2170}
2171
2172/*
2173 * unstable_tree_search_insert - search for identical page,
2174 * else insert rmap_item into the unstable tree.
2175 *
2176 * This function searches for a page in the unstable tree identical to the
2177 * page currently being scanned; and if no identical page is found in the
2178 * tree, we insert rmap_item as a new object into the unstable tree.
2179 *
2180 * This function returns pointer to rmap_item found to be identical
2181 * to the currently scanned page, NULL otherwise.
2182 *
2183 * This function does both searching and inserting, because they share
2184 * the same walking algorithm in an rbtree.
2185 */
2186static
2187struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
2188 struct page *page,
2189 struct page **tree_pagep)
2190{
2191 struct rb_node **new;
2192 struct rb_root *root;
2193 struct rb_node *parent = NULL;
2194 int nid;
2195
2196 nid = get_kpfn_nid(page_to_pfn(page));
2197 root = root_unstable_tree + nid;
2198 new = &root->rb_node;
2199
2200 while (*new) {
2201 struct ksm_rmap_item *tree_rmap_item;
2202 struct page *tree_page;
2203 int ret;
2204
2205 cond_resched();
2206 tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node);
2207 tree_page = get_mergeable_page(tree_rmap_item);
2208 if (!tree_page)
2209 return NULL;
2210
2211 /*
2212 * Don't substitute a ksm page for a forked page.
2213 */
2214 if (page == tree_page) {
2215 put_page(tree_page);
2216 return NULL;
2217 }
2218
2219 ret = memcmp_pages(page, tree_page);
2220
2221 parent = *new;
2222 if (ret < 0) {
2223 put_page(tree_page);
2224 new = &parent->rb_left;
2225 } else if (ret > 0) {
2226 put_page(tree_page);
2227 new = &parent->rb_right;
2228 } else if (!ksm_merge_across_nodes &&
2229 page_to_nid(tree_page) != nid) {
2230 /*
2231 * If tree_page has been migrated to another NUMA node,
2232 * it will be flushed out and put in the right unstable
2233 * tree next time: only merge with it when across_nodes.
2234 */
2235 put_page(tree_page);
2236 return NULL;
2237 } else {
2238 *tree_pagep = tree_page;
2239 return tree_rmap_item;
2240 }
2241 }
2242
2243 rmap_item->address |= UNSTABLE_FLAG;
2244 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
2245 DO_NUMA(rmap_item->nid = nid);
2246 rb_link_node(&rmap_item->node, parent, new);
2247 rb_insert_color(&rmap_item->node, root);
2248
2249 ksm_pages_unshared++;
2250 return NULL;
2251}
2252
2253/*
2254 * stable_tree_append - add another rmap_item to the linked list of
2255 * rmap_items hanging off a given node of the stable tree, all sharing
2256 * the same ksm page.
2257 */
2258static void stable_tree_append(struct ksm_rmap_item *rmap_item,
2259 struct ksm_stable_node *stable_node,
2260 bool max_page_sharing_bypass)
2261{
2262 /*
2263 * rmap won't find this mapping if we don't insert the
2264 * rmap_item in the right stable_node
2265 * duplicate. page_migration could break later if rmap breaks,
2266 * so we can as well crash here. We really need to check for
2267 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2268 * for other negative values as an underflow if detected here
2269 * for the first time (and not when decreasing rmap_hlist_len)
2270 * would be sign of memory corruption in the stable_node.
2271 */
2272 BUG_ON(stable_node->rmap_hlist_len < 0);
2273
2274 stable_node->rmap_hlist_len++;
2275 if (!max_page_sharing_bypass)
2276 /* possibly non fatal but unexpected overflow, only warn */
2277 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2278 ksm_max_page_sharing);
2279
2280 rmap_item->head = stable_node;
2281 rmap_item->address |= STABLE_FLAG;
2282 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2283
2284 if (rmap_item->hlist.next)
2285 ksm_pages_sharing++;
2286 else
2287 ksm_pages_shared++;
2288
2289 rmap_item->mm->ksm_merging_pages++;
2290}
2291
2292/*
2293 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2294 * if not, compare checksum to previous and if it's the same, see if page can
2295 * be inserted into the unstable tree, or merged with a page already there and
2296 * both transferred to the stable tree.
2297 *
2298 * @page: the page that we are searching identical page to.
2299 * @rmap_item: the reverse mapping into the virtual address of this page
2300 */
2301static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
2302{
2303 struct mm_struct *mm = rmap_item->mm;
2304 struct ksm_rmap_item *tree_rmap_item;
2305 struct page *tree_page = NULL;
2306 struct ksm_stable_node *stable_node;
2307 struct page *kpage;
2308 unsigned int checksum;
2309 int err;
2310 bool max_page_sharing_bypass = false;
2311
2312 stable_node = page_stable_node(page);
2313 if (stable_node) {
2314 if (stable_node->head != &migrate_nodes &&
2315 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2316 NUMA(stable_node->nid)) {
2317 stable_node_dup_del(stable_node);
2318 stable_node->head = &migrate_nodes;
2319 list_add(&stable_node->list, stable_node->head);
2320 }
2321 if (stable_node->head != &migrate_nodes &&
2322 rmap_item->head == stable_node)
2323 return;
2324 /*
2325 * If it's a KSM fork, allow it to go over the sharing limit
2326 * without warnings.
2327 */
2328 if (!is_page_sharing_candidate(stable_node))
2329 max_page_sharing_bypass = true;
2330 }
2331
2332 /* We first start with searching the page inside the stable tree */
2333 kpage = stable_tree_search(page);
2334 if (kpage == page && rmap_item->head == stable_node) {
2335 put_page(kpage);
2336 return;
2337 }
2338
2339 remove_rmap_item_from_tree(rmap_item);
2340
2341 if (kpage) {
2342 if (PTR_ERR(kpage) == -EBUSY)
2343 return;
2344
2345 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2346 if (!err) {
2347 /*
2348 * The page was successfully merged:
2349 * add its rmap_item to the stable tree.
2350 */
2351 lock_page(kpage);
2352 stable_tree_append(rmap_item, page_stable_node(kpage),
2353 max_page_sharing_bypass);
2354 unlock_page(kpage);
2355 }
2356 put_page(kpage);
2357 return;
2358 }
2359
2360 /*
2361 * If the hash value of the page has changed from the last time
2362 * we calculated it, this page is changing frequently: therefore we
2363 * don't want to insert it in the unstable tree, and we don't want
2364 * to waste our time searching for something identical to it there.
2365 */
2366 checksum = calc_checksum(page);
2367 if (rmap_item->oldchecksum != checksum) {
2368 rmap_item->oldchecksum = checksum;
2369 return;
2370 }
2371
2372 /*
2373 * Same checksum as an empty page. We attempt to merge it with the
2374 * appropriate zero page if the user enabled this via sysfs.
2375 */
2376 if (ksm_use_zero_pages && (checksum == zero_checksum)) {
2377 struct vm_area_struct *vma;
2378
2379 mmap_read_lock(mm);
2380 vma = find_mergeable_vma(mm, rmap_item->address);
2381 if (vma) {
2382 err = try_to_merge_one_page(vma, page,
2383 ZERO_PAGE(rmap_item->address));
2384 trace_ksm_merge_one_page(
2385 page_to_pfn(ZERO_PAGE(rmap_item->address)),
2386 rmap_item, mm, err);
2387 } else {
2388 /*
2389 * If the vma is out of date, we do not need to
2390 * continue.
2391 */
2392 err = 0;
2393 }
2394 mmap_read_unlock(mm);
2395 /*
2396 * In case of failure, the page was not really empty, so we
2397 * need to continue. Otherwise we're done.
2398 */
2399 if (!err)
2400 return;
2401 }
2402 tree_rmap_item =
2403 unstable_tree_search_insert(rmap_item, page, &tree_page);
2404 if (tree_rmap_item) {
2405 bool split;
2406
2407 kpage = try_to_merge_two_pages(rmap_item, page,
2408 tree_rmap_item, tree_page);
2409 /*
2410 * If both pages we tried to merge belong to the same compound
2411 * page, then we actually ended up increasing the reference
2412 * count of the same compound page twice, and split_huge_page
2413 * failed.
2414 * Here we set a flag if that happened, and we use it later to
2415 * try split_huge_page again. Since we call put_page right
2416 * afterwards, the reference count will be correct and
2417 * split_huge_page should succeed.
2418 */
2419 split = PageTransCompound(page)
2420 && compound_head(page) == compound_head(tree_page);
2421 put_page(tree_page);
2422 if (kpage) {
2423 /*
2424 * The pages were successfully merged: insert new
2425 * node in the stable tree and add both rmap_items.
2426 */
2427 lock_page(kpage);
2428 stable_node = stable_tree_insert(kpage);
2429 if (stable_node) {
2430 stable_tree_append(tree_rmap_item, stable_node,
2431 false);
2432 stable_tree_append(rmap_item, stable_node,
2433 false);
2434 }
2435 unlock_page(kpage);
2436
2437 /*
2438 * If we fail to insert the page into the stable tree,
2439 * we will have 2 virtual addresses that are pointing
2440 * to a ksm page left outside the stable tree,
2441 * in which case we need to break_cow on both.
2442 */
2443 if (!stable_node) {
2444 break_cow(tree_rmap_item);
2445 break_cow(rmap_item);
2446 }
2447 } else if (split) {
2448 /*
2449 * We are here if we tried to merge two pages and
2450 * failed because they both belonged to the same
2451 * compound page. We will split the page now, but no
2452 * merging will take place.
2453 * We do not want to add the cost of a full lock; if
2454 * the page is locked, it is better to skip it and
2455 * perhaps try again later.
2456 */
2457 if (!trylock_page(page))
2458 return;
2459 split_huge_page(page);
2460 unlock_page(page);
2461 }
2462 }
2463}
2464
2465static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
2466 struct ksm_rmap_item **rmap_list,
2467 unsigned long addr)
2468{
2469 struct ksm_rmap_item *rmap_item;
2470
2471 while (*rmap_list) {
2472 rmap_item = *rmap_list;
2473 if ((rmap_item->address & PAGE_MASK) == addr)
2474 return rmap_item;
2475 if (rmap_item->address > addr)
2476 break;
2477 *rmap_list = rmap_item->rmap_list;
2478 remove_rmap_item_from_tree(rmap_item);
2479 free_rmap_item(rmap_item);
2480 }
2481
2482 rmap_item = alloc_rmap_item();
2483 if (rmap_item) {
2484 /* It has already been zeroed */
2485 rmap_item->mm = mm_slot->slot.mm;
2486 rmap_item->mm->ksm_rmap_items++;
2487 rmap_item->address = addr;
2488 rmap_item->rmap_list = *rmap_list;
2489 *rmap_list = rmap_item;
2490 }
2491 return rmap_item;
2492}
2493
2494/*
2495 * Calculate skip age for the ksm page age. The age determines how often
2496 * de-duplicating has already been tried unsuccessfully. If the age is
2497 * smaller, the scanning of this page is skipped for less scans.
2498 *
2499 * @age: rmap_item age of page
2500 */
2501static unsigned int skip_age(rmap_age_t age)
2502{
2503 if (age <= 3)
2504 return 1;
2505 if (age <= 5)
2506 return 2;
2507 if (age <= 8)
2508 return 4;
2509
2510 return 8;
2511}
2512
2513/*
2514 * Determines if a page should be skipped for the current scan.
2515 *
2516 * @page: page to check
2517 * @rmap_item: associated rmap_item of page
2518 */
2519static bool should_skip_rmap_item(struct page *page,
2520 struct ksm_rmap_item *rmap_item)
2521{
2522 rmap_age_t age;
2523
2524 if (!ksm_smart_scan)
2525 return false;
2526
2527 /*
2528 * Never skip pages that are already KSM; pages cmp_and_merge_page()
2529 * will essentially ignore them, but we still have to process them
2530 * properly.
2531 */
2532 if (PageKsm(page))
2533 return false;
2534
2535 age = rmap_item->age;
2536 if (age != U8_MAX)
2537 rmap_item->age++;
2538
2539 /*
2540 * Smaller ages are not skipped, they need to get a chance to go
2541 * through the different phases of the KSM merging.
2542 */
2543 if (age < 3)
2544 return false;
2545
2546 /*
2547 * Are we still allowed to skip? If not, then don't skip it
2548 * and determine how much more often we are allowed to skip next.
2549 */
2550 if (!rmap_item->remaining_skips) {
2551 rmap_item->remaining_skips = skip_age(age);
2552 return false;
2553 }
2554
2555 /* Skip this page */
2556 ksm_pages_skipped++;
2557 rmap_item->remaining_skips--;
2558 remove_rmap_item_from_tree(rmap_item);
2559 return true;
2560}
2561
2562static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
2563{
2564 struct mm_struct *mm;
2565 struct ksm_mm_slot *mm_slot;
2566 struct mm_slot *slot;
2567 struct vm_area_struct *vma;
2568 struct ksm_rmap_item *rmap_item;
2569 struct vma_iterator vmi;
2570 int nid;
2571
2572 if (list_empty(&ksm_mm_head.slot.mm_node))
2573 return NULL;
2574
2575 mm_slot = ksm_scan.mm_slot;
2576 if (mm_slot == &ksm_mm_head) {
2577 advisor_start_scan();
2578 trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
2579
2580 /*
2581 * A number of pages can hang around indefinitely in per-cpu
2582 * LRU cache, raised page count preventing write_protect_page
2583 * from merging them. Though it doesn't really matter much,
2584 * it is puzzling to see some stuck in pages_volatile until
2585 * other activity jostles them out, and they also prevented
2586 * LTP's KSM test from succeeding deterministically; so drain
2587 * them here (here rather than on entry to ksm_do_scan(),
2588 * so we don't IPI too often when pages_to_scan is set low).
2589 */
2590 lru_add_drain_all();
2591
2592 /*
2593 * Whereas stale stable_nodes on the stable_tree itself
2594 * get pruned in the regular course of stable_tree_search(),
2595 * those moved out to the migrate_nodes list can accumulate:
2596 * so prune them once before each full scan.
2597 */
2598 if (!ksm_merge_across_nodes) {
2599 struct ksm_stable_node *stable_node, *next;
2600 struct page *page;
2601
2602 list_for_each_entry_safe(stable_node, next,
2603 &migrate_nodes, list) {
2604 page = get_ksm_page(stable_node,
2605 GET_KSM_PAGE_NOLOCK);
2606 if (page)
2607 put_page(page);
2608 cond_resched();
2609 }
2610 }
2611
2612 for (nid = 0; nid < ksm_nr_node_ids; nid++)
2613 root_unstable_tree[nid] = RB_ROOT;
2614
2615 spin_lock(&ksm_mmlist_lock);
2616 slot = list_entry(mm_slot->slot.mm_node.next,
2617 struct mm_slot, mm_node);
2618 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2619 ksm_scan.mm_slot = mm_slot;
2620 spin_unlock(&ksm_mmlist_lock);
2621 /*
2622 * Although we tested list_empty() above, a racing __ksm_exit
2623 * of the last mm on the list may have removed it since then.
2624 */
2625 if (mm_slot == &ksm_mm_head)
2626 return NULL;
2627next_mm:
2628 ksm_scan.address = 0;
2629 ksm_scan.rmap_list = &mm_slot->rmap_list;
2630 }
2631
2632 slot = &mm_slot->slot;
2633 mm = slot->mm;
2634 vma_iter_init(&vmi, mm, ksm_scan.address);
2635
2636 mmap_read_lock(mm);
2637 if (ksm_test_exit(mm))
2638 goto no_vmas;
2639
2640 for_each_vma(vmi, vma) {
2641 if (!(vma->vm_flags & VM_MERGEABLE))
2642 continue;
2643 if (ksm_scan.address < vma->vm_start)
2644 ksm_scan.address = vma->vm_start;
2645 if (!vma->anon_vma)
2646 ksm_scan.address = vma->vm_end;
2647
2648 while (ksm_scan.address < vma->vm_end) {
2649 if (ksm_test_exit(mm))
2650 break;
2651 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
2652 if (IS_ERR_OR_NULL(*page)) {
2653 ksm_scan.address += PAGE_SIZE;
2654 cond_resched();
2655 continue;
2656 }
2657 if (is_zone_device_page(*page))
2658 goto next_page;
2659 if (PageAnon(*page)) {
2660 flush_anon_page(vma, *page, ksm_scan.address);
2661 flush_dcache_page(*page);
2662 rmap_item = get_next_rmap_item(mm_slot,
2663 ksm_scan.rmap_list, ksm_scan.address);
2664 if (rmap_item) {
2665 ksm_scan.rmap_list =
2666 &rmap_item->rmap_list;
2667
2668 if (should_skip_rmap_item(*page, rmap_item))
2669 goto next_page;
2670
2671 ksm_scan.address += PAGE_SIZE;
2672 } else
2673 put_page(*page);
2674 mmap_read_unlock(mm);
2675 return rmap_item;
2676 }
2677next_page:
2678 put_page(*page);
2679 ksm_scan.address += PAGE_SIZE;
2680 cond_resched();
2681 }
2682 }
2683
2684 if (ksm_test_exit(mm)) {
2685no_vmas:
2686 ksm_scan.address = 0;
2687 ksm_scan.rmap_list = &mm_slot->rmap_list;
2688 }
2689 /*
2690 * Nuke all the rmap_items that are above this current rmap:
2691 * because there were no VM_MERGEABLE vmas with such addresses.
2692 */
2693 remove_trailing_rmap_items(ksm_scan.rmap_list);
2694
2695 spin_lock(&ksm_mmlist_lock);
2696 slot = list_entry(mm_slot->slot.mm_node.next,
2697 struct mm_slot, mm_node);
2698 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2699 if (ksm_scan.address == 0) {
2700 /*
2701 * We've completed a full scan of all vmas, holding mmap_lock
2702 * throughout, and found no VM_MERGEABLE: so do the same as
2703 * __ksm_exit does to remove this mm from all our lists now.
2704 * This applies either when cleaning up after __ksm_exit
2705 * (but beware: we can reach here even before __ksm_exit),
2706 * or when all VM_MERGEABLE areas have been unmapped (and
2707 * mmap_lock then protects against race with MADV_MERGEABLE).
2708 */
2709 hash_del(&mm_slot->slot.hash);
2710 list_del(&mm_slot->slot.mm_node);
2711 spin_unlock(&ksm_mmlist_lock);
2712
2713 mm_slot_free(mm_slot_cache, mm_slot);
2714 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2715 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2716 mmap_read_unlock(mm);
2717 mmdrop(mm);
2718 } else {
2719 mmap_read_unlock(mm);
2720 /*
2721 * mmap_read_unlock(mm) first because after
2722 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2723 * already have been freed under us by __ksm_exit()
2724 * because the "mm_slot" is still hashed and
2725 * ksm_scan.mm_slot doesn't point to it anymore.
2726 */
2727 spin_unlock(&ksm_mmlist_lock);
2728 }
2729
2730 /* Repeat until we've completed scanning the whole list */
2731 mm_slot = ksm_scan.mm_slot;
2732 if (mm_slot != &ksm_mm_head)
2733 goto next_mm;
2734
2735 advisor_stop_scan();
2736
2737 trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
2738 ksm_scan.seqnr++;
2739 return NULL;
2740}
2741
2742/**
2743 * ksm_do_scan - the ksm scanner main worker function.
2744 * @scan_npages: number of pages we want to scan before we return.
2745 */
2746static void ksm_do_scan(unsigned int scan_npages)
2747{
2748 struct ksm_rmap_item *rmap_item;
2749 struct page *page;
2750 unsigned int npages = scan_npages;
2751
2752 while (npages-- && likely(!freezing(current))) {
2753 cond_resched();
2754 rmap_item = scan_get_next_rmap_item(&page);
2755 if (!rmap_item)
2756 return;
2757 cmp_and_merge_page(page, rmap_item);
2758 put_page(page);
2759 }
2760
2761 ksm_pages_scanned += scan_npages - npages;
2762}
2763
2764static int ksmd_should_run(void)
2765{
2766 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node);
2767}
2768
2769static int ksm_scan_thread(void *nothing)
2770{
2771 unsigned int sleep_ms;
2772
2773 set_freezable();
2774 set_user_nice(current, 5);
2775
2776 while (!kthread_should_stop()) {
2777 mutex_lock(&ksm_thread_mutex);
2778 wait_while_offlining();
2779 if (ksmd_should_run())
2780 ksm_do_scan(ksm_thread_pages_to_scan);
2781 mutex_unlock(&ksm_thread_mutex);
2782
2783 if (ksmd_should_run()) {
2784 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
2785 wait_event_freezable_timeout(ksm_iter_wait,
2786 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
2787 msecs_to_jiffies(sleep_ms));
2788 } else {
2789 wait_event_freezable(ksm_thread_wait,
2790 ksmd_should_run() || kthread_should_stop());
2791 }
2792 }
2793 return 0;
2794}
2795
2796static void __ksm_add_vma(struct vm_area_struct *vma)
2797{
2798 unsigned long vm_flags = vma->vm_flags;
2799
2800 if (vm_flags & VM_MERGEABLE)
2801 return;
2802
2803 if (vma_ksm_compatible(vma))
2804 vm_flags_set(vma, VM_MERGEABLE);
2805}
2806
2807static int __ksm_del_vma(struct vm_area_struct *vma)
2808{
2809 int err;
2810
2811 if (!(vma->vm_flags & VM_MERGEABLE))
2812 return 0;
2813
2814 if (vma->anon_vma) {
2815 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
2816 if (err)
2817 return err;
2818 }
2819
2820 vm_flags_clear(vma, VM_MERGEABLE);
2821 return 0;
2822}
2823/**
2824 * ksm_add_vma - Mark vma as mergeable if compatible
2825 *
2826 * @vma: Pointer to vma
2827 */
2828void ksm_add_vma(struct vm_area_struct *vma)
2829{
2830 struct mm_struct *mm = vma->vm_mm;
2831
2832 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2833 __ksm_add_vma(vma);
2834}
2835
2836static void ksm_add_vmas(struct mm_struct *mm)
2837{
2838 struct vm_area_struct *vma;
2839
2840 VMA_ITERATOR(vmi, mm, 0);
2841 for_each_vma(vmi, vma)
2842 __ksm_add_vma(vma);
2843}
2844
2845static int ksm_del_vmas(struct mm_struct *mm)
2846{
2847 struct vm_area_struct *vma;
2848 int err;
2849
2850 VMA_ITERATOR(vmi, mm, 0);
2851 for_each_vma(vmi, vma) {
2852 err = __ksm_del_vma(vma);
2853 if (err)
2854 return err;
2855 }
2856 return 0;
2857}
2858
2859/**
2860 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2861 * compatible VMA's
2862 *
2863 * @mm: Pointer to mm
2864 *
2865 * Returns 0 on success, otherwise error code
2866 */
2867int ksm_enable_merge_any(struct mm_struct *mm)
2868{
2869 int err;
2870
2871 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2872 return 0;
2873
2874 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2875 err = __ksm_enter(mm);
2876 if (err)
2877 return err;
2878 }
2879
2880 set_bit(MMF_VM_MERGE_ANY, &mm->flags);
2881 ksm_add_vmas(mm);
2882
2883 return 0;
2884}
2885
2886/**
2887 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2888 * previously enabled via ksm_enable_merge_any().
2889 *
2890 * Disabling merging implies unmerging any merged pages, like setting
2891 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
2892 * merging on all compatible VMA's remains enabled.
2893 *
2894 * @mm: Pointer to mm
2895 *
2896 * Returns 0 on success, otherwise error code
2897 */
2898int ksm_disable_merge_any(struct mm_struct *mm)
2899{
2900 int err;
2901
2902 if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2903 return 0;
2904
2905 err = ksm_del_vmas(mm);
2906 if (err) {
2907 ksm_add_vmas(mm);
2908 return err;
2909 }
2910
2911 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2912 return 0;
2913}
2914
2915int ksm_disable(struct mm_struct *mm)
2916{
2917 mmap_assert_write_locked(mm);
2918
2919 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
2920 return 0;
2921 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2922 return ksm_disable_merge_any(mm);
2923 return ksm_del_vmas(mm);
2924}
2925
2926int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2927 unsigned long end, int advice, unsigned long *vm_flags)
2928{
2929 struct mm_struct *mm = vma->vm_mm;
2930 int err;
2931
2932 switch (advice) {
2933 case MADV_MERGEABLE:
2934 if (vma->vm_flags & VM_MERGEABLE)
2935 return 0;
2936 if (!vma_ksm_compatible(vma))
2937 return 0;
2938
2939 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2940 err = __ksm_enter(mm);
2941 if (err)
2942 return err;
2943 }
2944
2945 *vm_flags |= VM_MERGEABLE;
2946 break;
2947
2948 case MADV_UNMERGEABLE:
2949 if (!(*vm_flags & VM_MERGEABLE))
2950 return 0; /* just ignore the advice */
2951
2952 if (vma->anon_vma) {
2953 err = unmerge_ksm_pages(vma, start, end, true);
2954 if (err)
2955 return err;
2956 }
2957
2958 *vm_flags &= ~VM_MERGEABLE;
2959 break;
2960 }
2961
2962 return 0;
2963}
2964EXPORT_SYMBOL_GPL(ksm_madvise);
2965
2966int __ksm_enter(struct mm_struct *mm)
2967{
2968 struct ksm_mm_slot *mm_slot;
2969 struct mm_slot *slot;
2970 int needs_wakeup;
2971
2972 mm_slot = mm_slot_alloc(mm_slot_cache);
2973 if (!mm_slot)
2974 return -ENOMEM;
2975
2976 slot = &mm_slot->slot;
2977
2978 /* Check ksm_run too? Would need tighter locking */
2979 needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node);
2980
2981 spin_lock(&ksm_mmlist_lock);
2982 mm_slot_insert(mm_slots_hash, mm, slot);
2983 /*
2984 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2985 * insert just behind the scanning cursor, to let the area settle
2986 * down a little; when fork is followed by immediate exec, we don't
2987 * want ksmd to waste time setting up and tearing down an rmap_list.
2988 *
2989 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2990 * scanning cursor, otherwise KSM pages in newly forked mms will be
2991 * missed: then we might as well insert at the end of the list.
2992 */
2993 if (ksm_run & KSM_RUN_UNMERGE)
2994 list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node);
2995 else
2996 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
2997 spin_unlock(&ksm_mmlist_lock);
2998
2999 set_bit(MMF_VM_MERGEABLE, &mm->flags);
3000 mmgrab(mm);
3001
3002 if (needs_wakeup)
3003 wake_up_interruptible(&ksm_thread_wait);
3004
3005 trace_ksm_enter(mm);
3006 return 0;
3007}
3008
3009void __ksm_exit(struct mm_struct *mm)
3010{
3011 struct ksm_mm_slot *mm_slot;
3012 struct mm_slot *slot;
3013 int easy_to_free = 0;
3014
3015 /*
3016 * This process is exiting: if it's straightforward (as is the
3017 * case when ksmd was never running), free mm_slot immediately.
3018 * But if it's at the cursor or has rmap_items linked to it, use
3019 * mmap_lock to synchronize with any break_cows before pagetables
3020 * are freed, and leave the mm_slot on the list for ksmd to free.
3021 * Beware: ksm may already have noticed it exiting and freed the slot.
3022 */
3023
3024 spin_lock(&ksm_mmlist_lock);
3025 slot = mm_slot_lookup(mm_slots_hash, mm);
3026 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
3027 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
3028 if (!mm_slot->rmap_list) {
3029 hash_del(&slot->hash);
3030 list_del(&slot->mm_node);
3031 easy_to_free = 1;
3032 } else {
3033 list_move(&slot->mm_node,
3034 &ksm_scan.mm_slot->slot.mm_node);
3035 }
3036 }
3037 spin_unlock(&ksm_mmlist_lock);
3038
3039 if (easy_to_free) {
3040 mm_slot_free(mm_slot_cache, mm_slot);
3041 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
3042 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
3043 mmdrop(mm);
3044 } else if (mm_slot) {
3045 mmap_write_lock(mm);
3046 mmap_write_unlock(mm);
3047 }
3048
3049 trace_ksm_exit(mm);
3050}
3051
3052struct folio *ksm_might_need_to_copy(struct folio *folio,
3053 struct vm_area_struct *vma, unsigned long addr)
3054{
3055 struct page *page = folio_page(folio, 0);
3056 struct anon_vma *anon_vma = folio_anon_vma(folio);
3057 struct folio *new_folio;
3058
3059 if (folio_test_large(folio))
3060 return folio;
3061
3062 if (folio_test_ksm(folio)) {
3063 if (folio_stable_node(folio) &&
3064 !(ksm_run & KSM_RUN_UNMERGE))
3065 return folio; /* no need to copy it */
3066 } else if (!anon_vma) {
3067 return folio; /* no need to copy it */
3068 } else if (folio->index == linear_page_index(vma, addr) &&
3069 anon_vma->root == vma->anon_vma->root) {
3070 return folio; /* still no need to copy it */
3071 }
3072 if (PageHWPoison(page))
3073 return ERR_PTR(-EHWPOISON);
3074 if (!folio_test_uptodate(folio))
3075 return folio; /* let do_swap_page report the error */
3076
3077 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
3078 if (new_folio &&
3079 mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
3080 folio_put(new_folio);
3081 new_folio = NULL;
3082 }
3083 if (new_folio) {
3084 if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
3085 addr, vma)) {
3086 folio_put(new_folio);
3087 memory_failure_queue(folio_pfn(folio), 0);
3088 return ERR_PTR(-EHWPOISON);
3089 }
3090 folio_set_dirty(new_folio);
3091 __folio_mark_uptodate(new_folio);
3092 __folio_set_locked(new_folio);
3093#ifdef CONFIG_SWAP
3094 count_vm_event(KSM_SWPIN_COPY);
3095#endif
3096 }
3097
3098 return new_folio;
3099}
3100
3101void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
3102{
3103 struct ksm_stable_node *stable_node;
3104 struct ksm_rmap_item *rmap_item;
3105 int search_new_forks = 0;
3106
3107 VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
3108
3109 /*
3110 * Rely on the page lock to protect against concurrent modifications
3111 * to that page's node of the stable tree.
3112 */
3113 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3114
3115 stable_node = folio_stable_node(folio);
3116 if (!stable_node)
3117 return;
3118again:
3119 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
3120 struct anon_vma *anon_vma = rmap_item->anon_vma;
3121 struct anon_vma_chain *vmac;
3122 struct vm_area_struct *vma;
3123
3124 cond_resched();
3125 if (!anon_vma_trylock_read(anon_vma)) {
3126 if (rwc->try_lock) {
3127 rwc->contended = true;
3128 return;
3129 }
3130 anon_vma_lock_read(anon_vma);
3131 }
3132 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
3133 0, ULONG_MAX) {
3134 unsigned long addr;
3135
3136 cond_resched();
3137 vma = vmac->vma;
3138
3139 /* Ignore the stable/unstable/sqnr flags */
3140 addr = rmap_item->address & PAGE_MASK;
3141
3142 if (addr < vma->vm_start || addr >= vma->vm_end)
3143 continue;
3144 /*
3145 * Initially we examine only the vma which covers this
3146 * rmap_item; but later, if there is still work to do,
3147 * we examine covering vmas in other mms: in case they
3148 * were forked from the original since ksmd passed.
3149 */
3150 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
3151 continue;
3152
3153 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
3154 continue;
3155
3156 if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
3157 anon_vma_unlock_read(anon_vma);
3158 return;
3159 }
3160 if (rwc->done && rwc->done(folio)) {
3161 anon_vma_unlock_read(anon_vma);
3162 return;
3163 }
3164 }
3165 anon_vma_unlock_read(anon_vma);
3166 }
3167 if (!search_new_forks++)
3168 goto again;
3169}
3170
3171#ifdef CONFIG_MEMORY_FAILURE
3172/*
3173 * Collect processes when the error hit an ksm page.
3174 */
3175void collect_procs_ksm(struct page *page, struct list_head *to_kill,
3176 int force_early)
3177{
3178 struct ksm_stable_node *stable_node;
3179 struct ksm_rmap_item *rmap_item;
3180 struct folio *folio = page_folio(page);
3181 struct vm_area_struct *vma;
3182 struct task_struct *tsk;
3183
3184 stable_node = folio_stable_node(folio);
3185 if (!stable_node)
3186 return;
3187 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
3188 struct anon_vma *av = rmap_item->anon_vma;
3189
3190 anon_vma_lock_read(av);
3191 rcu_read_lock();
3192 for_each_process(tsk) {
3193 struct anon_vma_chain *vmac;
3194 unsigned long addr;
3195 struct task_struct *t =
3196 task_early_kill(tsk, force_early);
3197 if (!t)
3198 continue;
3199 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0,
3200 ULONG_MAX)
3201 {
3202 vma = vmac->vma;
3203 if (vma->vm_mm == t->mm) {
3204 addr = rmap_item->address & PAGE_MASK;
3205 add_to_kill_ksm(t, page, vma, to_kill,
3206 addr);
3207 }
3208 }
3209 }
3210 rcu_read_unlock();
3211 anon_vma_unlock_read(av);
3212 }
3213}
3214#endif
3215
3216#ifdef CONFIG_MIGRATION
3217void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
3218{
3219 struct ksm_stable_node *stable_node;
3220
3221 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3222 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
3223 VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
3224
3225 stable_node = folio_stable_node(folio);
3226 if (stable_node) {
3227 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
3228 stable_node->kpfn = folio_pfn(newfolio);
3229 /*
3230 * newfolio->mapping was set in advance; now we need smp_wmb()
3231 * to make sure that the new stable_node->kpfn is visible
3232 * to get_ksm_page() before it can see that folio->mapping
3233 * has gone stale (or that folio_test_swapcache has been cleared).
3234 */
3235 smp_wmb();
3236 set_page_stable_node(&folio->page, NULL);
3237 }
3238}
3239#endif /* CONFIG_MIGRATION */
3240
3241#ifdef CONFIG_MEMORY_HOTREMOVE
3242static void wait_while_offlining(void)
3243{
3244 while (ksm_run & KSM_RUN_OFFLINE) {
3245 mutex_unlock(&ksm_thread_mutex);
3246 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
3247 TASK_UNINTERRUPTIBLE);
3248 mutex_lock(&ksm_thread_mutex);
3249 }
3250}
3251
3252static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
3253 unsigned long start_pfn,
3254 unsigned long end_pfn)
3255{
3256 if (stable_node->kpfn >= start_pfn &&
3257 stable_node->kpfn < end_pfn) {
3258 /*
3259 * Don't get_ksm_page, page has already gone:
3260 * which is why we keep kpfn instead of page*
3261 */
3262 remove_node_from_stable_tree(stable_node);
3263 return true;
3264 }
3265 return false;
3266}
3267
3268static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node,
3269 unsigned long start_pfn,
3270 unsigned long end_pfn,
3271 struct rb_root *root)
3272{
3273 struct ksm_stable_node *dup;
3274 struct hlist_node *hlist_safe;
3275
3276 if (!is_stable_node_chain(stable_node)) {
3277 VM_BUG_ON(is_stable_node_dup(stable_node));
3278 return stable_node_dup_remove_range(stable_node, start_pfn,
3279 end_pfn);
3280 }
3281
3282 hlist_for_each_entry_safe(dup, hlist_safe,
3283 &stable_node->hlist, hlist_dup) {
3284 VM_BUG_ON(!is_stable_node_dup(dup));
3285 stable_node_dup_remove_range(dup, start_pfn, end_pfn);
3286 }
3287 if (hlist_empty(&stable_node->hlist)) {
3288 free_stable_node_chain(stable_node, root);
3289 return true; /* notify caller that tree was rebalanced */
3290 } else
3291 return false;
3292}
3293
3294static void ksm_check_stable_tree(unsigned long start_pfn,
3295 unsigned long end_pfn)
3296{
3297 struct ksm_stable_node *stable_node, *next;
3298 struct rb_node *node;
3299 int nid;
3300
3301 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
3302 node = rb_first(root_stable_tree + nid);
3303 while (node) {
3304 stable_node = rb_entry(node, struct ksm_stable_node, node);
3305 if (stable_node_chain_remove_range(stable_node,
3306 start_pfn, end_pfn,
3307 root_stable_tree +
3308 nid))
3309 node = rb_first(root_stable_tree + nid);
3310 else
3311 node = rb_next(node);
3312 cond_resched();
3313 }
3314 }
3315 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
3316 if (stable_node->kpfn >= start_pfn &&
3317 stable_node->kpfn < end_pfn)
3318 remove_node_from_stable_tree(stable_node);
3319 cond_resched();
3320 }
3321}
3322
3323static int ksm_memory_callback(struct notifier_block *self,
3324 unsigned long action, void *arg)
3325{
3326 struct memory_notify *mn = arg;
3327
3328 switch (action) {
3329 case MEM_GOING_OFFLINE:
3330 /*
3331 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
3332 * and remove_all_stable_nodes() while memory is going offline:
3333 * it is unsafe for them to touch the stable tree at this time.
3334 * But unmerge_ksm_pages(), rmap lookups and other entry points
3335 * which do not need the ksm_thread_mutex are all safe.
3336 */
3337 mutex_lock(&ksm_thread_mutex);
3338 ksm_run |= KSM_RUN_OFFLINE;
3339 mutex_unlock(&ksm_thread_mutex);
3340 break;
3341
3342 case MEM_OFFLINE:
3343 /*
3344 * Most of the work is done by page migration; but there might
3345 * be a few stable_nodes left over, still pointing to struct
3346 * pages which have been offlined: prune those from the tree,
3347 * otherwise get_ksm_page() might later try to access a
3348 * non-existent struct page.
3349 */
3350 ksm_check_stable_tree(mn->start_pfn,
3351 mn->start_pfn + mn->nr_pages);
3352 fallthrough;
3353 case MEM_CANCEL_OFFLINE:
3354 mutex_lock(&ksm_thread_mutex);
3355 ksm_run &= ~KSM_RUN_OFFLINE;
3356 mutex_unlock(&ksm_thread_mutex);
3357
3358 smp_mb(); /* wake_up_bit advises this */
3359 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
3360 break;
3361 }
3362 return NOTIFY_OK;
3363}
3364#else
3365static void wait_while_offlining(void)
3366{
3367}
3368#endif /* CONFIG_MEMORY_HOTREMOVE */
3369
3370#ifdef CONFIG_PROC_FS
3371long ksm_process_profit(struct mm_struct *mm)
3372{
3373 return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
3374 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
3375}
3376#endif /* CONFIG_PROC_FS */
3377
3378#ifdef CONFIG_SYSFS
3379/*
3380 * This all compiles without CONFIG_SYSFS, but is a waste of space.
3381 */
3382
3383#define KSM_ATTR_RO(_name) \
3384 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3385#define KSM_ATTR(_name) \
3386 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3387
3388static ssize_t sleep_millisecs_show(struct kobject *kobj,
3389 struct kobj_attribute *attr, char *buf)
3390{
3391 return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
3392}
3393
3394static ssize_t sleep_millisecs_store(struct kobject *kobj,
3395 struct kobj_attribute *attr,
3396 const char *buf, size_t count)
3397{
3398 unsigned int msecs;
3399 int err;
3400
3401 err = kstrtouint(buf, 10, &msecs);
3402 if (err)
3403 return -EINVAL;
3404
3405 ksm_thread_sleep_millisecs = msecs;
3406 wake_up_interruptible(&ksm_iter_wait);
3407
3408 return count;
3409}
3410KSM_ATTR(sleep_millisecs);
3411
3412static ssize_t pages_to_scan_show(struct kobject *kobj,
3413 struct kobj_attribute *attr, char *buf)
3414{
3415 return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
3416}
3417
3418static ssize_t pages_to_scan_store(struct kobject *kobj,
3419 struct kobj_attribute *attr,
3420 const char *buf, size_t count)
3421{
3422 unsigned int nr_pages;
3423 int err;
3424
3425 if (ksm_advisor != KSM_ADVISOR_NONE)
3426 return -EINVAL;
3427
3428 err = kstrtouint(buf, 10, &nr_pages);
3429 if (err)
3430 return -EINVAL;
3431
3432 ksm_thread_pages_to_scan = nr_pages;
3433
3434 return count;
3435}
3436KSM_ATTR(pages_to_scan);
3437
3438static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
3439 char *buf)
3440{
3441 return sysfs_emit(buf, "%lu\n", ksm_run);
3442}
3443
3444static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
3445 const char *buf, size_t count)
3446{
3447 unsigned int flags;
3448 int err;
3449
3450 err = kstrtouint(buf, 10, &flags);
3451 if (err)
3452 return -EINVAL;
3453 if (flags > KSM_RUN_UNMERGE)
3454 return -EINVAL;
3455
3456 /*
3457 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
3458 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
3459 * breaking COW to free the pages_shared (but leaves mm_slots
3460 * on the list for when ksmd may be set running again).
3461 */
3462
3463 mutex_lock(&ksm_thread_mutex);
3464 wait_while_offlining();
3465 if (ksm_run != flags) {
3466 ksm_run = flags;
3467 if (flags & KSM_RUN_UNMERGE) {
3468 set_current_oom_origin();
3469 err = unmerge_and_remove_all_rmap_items();
3470 clear_current_oom_origin();
3471 if (err) {
3472 ksm_run = KSM_RUN_STOP;
3473 count = err;
3474 }
3475 }
3476 }
3477 mutex_unlock(&ksm_thread_mutex);
3478
3479 if (flags & KSM_RUN_MERGE)
3480 wake_up_interruptible(&ksm_thread_wait);
3481
3482 return count;
3483}
3484KSM_ATTR(run);
3485
3486#ifdef CONFIG_NUMA
3487static ssize_t merge_across_nodes_show(struct kobject *kobj,
3488 struct kobj_attribute *attr, char *buf)
3489{
3490 return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
3491}
3492
3493static ssize_t merge_across_nodes_store(struct kobject *kobj,
3494 struct kobj_attribute *attr,
3495 const char *buf, size_t count)
3496{
3497 int err;
3498 unsigned long knob;
3499
3500 err = kstrtoul(buf, 10, &knob);
3501 if (err)
3502 return err;
3503 if (knob > 1)
3504 return -EINVAL;
3505
3506 mutex_lock(&ksm_thread_mutex);
3507 wait_while_offlining();
3508 if (ksm_merge_across_nodes != knob) {
3509 if (ksm_pages_shared || remove_all_stable_nodes())
3510 err = -EBUSY;
3511 else if (root_stable_tree == one_stable_tree) {
3512 struct rb_root *buf;
3513 /*
3514 * This is the first time that we switch away from the
3515 * default of merging across nodes: must now allocate
3516 * a buffer to hold as many roots as may be needed.
3517 * Allocate stable and unstable together:
3518 * MAXSMP NODES_SHIFT 10 will use 16kB.
3519 */
3520 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
3521 GFP_KERNEL);
3522 /* Let us assume that RB_ROOT is NULL is zero */
3523 if (!buf)
3524 err = -ENOMEM;
3525 else {
3526 root_stable_tree = buf;
3527 root_unstable_tree = buf + nr_node_ids;
3528 /* Stable tree is empty but not the unstable */
3529 root_unstable_tree[0] = one_unstable_tree[0];
3530 }
3531 }
3532 if (!err) {
3533 ksm_merge_across_nodes = knob;
3534 ksm_nr_node_ids = knob ? 1 : nr_node_ids;
3535 }
3536 }
3537 mutex_unlock(&ksm_thread_mutex);
3538
3539 return err ? err : count;
3540}
3541KSM_ATTR(merge_across_nodes);
3542#endif
3543
3544static ssize_t use_zero_pages_show(struct kobject *kobj,
3545 struct kobj_attribute *attr, char *buf)
3546{
3547 return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
3548}
3549static ssize_t use_zero_pages_store(struct kobject *kobj,
3550 struct kobj_attribute *attr,
3551 const char *buf, size_t count)
3552{
3553 int err;
3554 bool value;
3555
3556 err = kstrtobool(buf, &value);
3557 if (err)
3558 return -EINVAL;
3559
3560 ksm_use_zero_pages = value;
3561
3562 return count;
3563}
3564KSM_ATTR(use_zero_pages);
3565
3566static ssize_t max_page_sharing_show(struct kobject *kobj,
3567 struct kobj_attribute *attr, char *buf)
3568{
3569 return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
3570}
3571
3572static ssize_t max_page_sharing_store(struct kobject *kobj,
3573 struct kobj_attribute *attr,
3574 const char *buf, size_t count)
3575{
3576 int err;
3577 int knob;
3578
3579 err = kstrtoint(buf, 10, &knob);
3580 if (err)
3581 return err;
3582 /*
3583 * When a KSM page is created it is shared by 2 mappings. This
3584 * being a signed comparison, it implicitly verifies it's not
3585 * negative.
3586 */
3587 if (knob < 2)
3588 return -EINVAL;
3589
3590 if (READ_ONCE(ksm_max_page_sharing) == knob)
3591 return count;
3592
3593 mutex_lock(&ksm_thread_mutex);
3594 wait_while_offlining();
3595 if (ksm_max_page_sharing != knob) {
3596 if (ksm_pages_shared || remove_all_stable_nodes())
3597 err = -EBUSY;
3598 else
3599 ksm_max_page_sharing = knob;
3600 }
3601 mutex_unlock(&ksm_thread_mutex);
3602
3603 return err ? err : count;
3604}
3605KSM_ATTR(max_page_sharing);
3606
3607static ssize_t pages_scanned_show(struct kobject *kobj,
3608 struct kobj_attribute *attr, char *buf)
3609{
3610 return sysfs_emit(buf, "%lu\n", ksm_pages_scanned);
3611}
3612KSM_ATTR_RO(pages_scanned);
3613
3614static ssize_t pages_shared_show(struct kobject *kobj,
3615 struct kobj_attribute *attr, char *buf)
3616{
3617 return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
3618}
3619KSM_ATTR_RO(pages_shared);
3620
3621static ssize_t pages_sharing_show(struct kobject *kobj,
3622 struct kobj_attribute *attr, char *buf)
3623{
3624 return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
3625}
3626KSM_ATTR_RO(pages_sharing);
3627
3628static ssize_t pages_unshared_show(struct kobject *kobj,
3629 struct kobj_attribute *attr, char *buf)
3630{
3631 return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
3632}
3633KSM_ATTR_RO(pages_unshared);
3634
3635static ssize_t pages_volatile_show(struct kobject *kobj,
3636 struct kobj_attribute *attr, char *buf)
3637{
3638 long ksm_pages_volatile;
3639
3640 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3641 - ksm_pages_sharing - ksm_pages_unshared;
3642 /*
3643 * It was not worth any locking to calculate that statistic,
3644 * but it might therefore sometimes be negative: conceal that.
3645 */
3646 if (ksm_pages_volatile < 0)
3647 ksm_pages_volatile = 0;
3648 return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
3649}
3650KSM_ATTR_RO(pages_volatile);
3651
3652static ssize_t pages_skipped_show(struct kobject *kobj,
3653 struct kobj_attribute *attr, char *buf)
3654{
3655 return sysfs_emit(buf, "%lu\n", ksm_pages_skipped);
3656}
3657KSM_ATTR_RO(pages_skipped);
3658
3659static ssize_t ksm_zero_pages_show(struct kobject *kobj,
3660 struct kobj_attribute *attr, char *buf)
3661{
3662 return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
3663}
3664KSM_ATTR_RO(ksm_zero_pages);
3665
3666static ssize_t general_profit_show(struct kobject *kobj,
3667 struct kobj_attribute *attr, char *buf)
3668{
3669 long general_profit;
3670
3671 general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
3672 ksm_rmap_items * sizeof(struct ksm_rmap_item);
3673
3674 return sysfs_emit(buf, "%ld\n", general_profit);
3675}
3676KSM_ATTR_RO(general_profit);
3677
3678static ssize_t stable_node_dups_show(struct kobject *kobj,
3679 struct kobj_attribute *attr, char *buf)
3680{
3681 return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
3682}
3683KSM_ATTR_RO(stable_node_dups);
3684
3685static ssize_t stable_node_chains_show(struct kobject *kobj,
3686 struct kobj_attribute *attr, char *buf)
3687{
3688 return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
3689}
3690KSM_ATTR_RO(stable_node_chains);
3691
3692static ssize_t
3693stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3694 struct kobj_attribute *attr,
3695 char *buf)
3696{
3697 return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
3698}
3699
3700static ssize_t
3701stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3702 struct kobj_attribute *attr,
3703 const char *buf, size_t count)
3704{
3705 unsigned int msecs;
3706 int err;
3707
3708 err = kstrtouint(buf, 10, &msecs);
3709 if (err)
3710 return -EINVAL;
3711
3712 ksm_stable_node_chains_prune_millisecs = msecs;
3713
3714 return count;
3715}
3716KSM_ATTR(stable_node_chains_prune_millisecs);
3717
3718static ssize_t full_scans_show(struct kobject *kobj,
3719 struct kobj_attribute *attr, char *buf)
3720{
3721 return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
3722}
3723KSM_ATTR_RO(full_scans);
3724
3725static ssize_t smart_scan_show(struct kobject *kobj,
3726 struct kobj_attribute *attr, char *buf)
3727{
3728 return sysfs_emit(buf, "%u\n", ksm_smart_scan);
3729}
3730
3731static ssize_t smart_scan_store(struct kobject *kobj,
3732 struct kobj_attribute *attr,
3733 const char *buf, size_t count)
3734{
3735 int err;
3736 bool value;
3737
3738 err = kstrtobool(buf, &value);
3739 if (err)
3740 return -EINVAL;
3741
3742 ksm_smart_scan = value;
3743 return count;
3744}
3745KSM_ATTR(smart_scan);
3746
3747static ssize_t advisor_mode_show(struct kobject *kobj,
3748 struct kobj_attribute *attr, char *buf)
3749{
3750 const char *output;
3751
3752 if (ksm_advisor == KSM_ADVISOR_NONE)
3753 output = "[none] scan-time";
3754 else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
3755 output = "none [scan-time]";
3756
3757 return sysfs_emit(buf, "%s\n", output);
3758}
3759
3760static ssize_t advisor_mode_store(struct kobject *kobj,
3761 struct kobj_attribute *attr, const char *buf,
3762 size_t count)
3763{
3764 enum ksm_advisor_type curr_advisor = ksm_advisor;
3765
3766 if (sysfs_streq("scan-time", buf))
3767 ksm_advisor = KSM_ADVISOR_SCAN_TIME;
3768 else if (sysfs_streq("none", buf))
3769 ksm_advisor = KSM_ADVISOR_NONE;
3770 else
3771 return -EINVAL;
3772
3773 /* Set advisor default values */
3774 if (curr_advisor != ksm_advisor)
3775 set_advisor_defaults();
3776
3777 return count;
3778}
3779KSM_ATTR(advisor_mode);
3780
3781static ssize_t advisor_max_cpu_show(struct kobject *kobj,
3782 struct kobj_attribute *attr, char *buf)
3783{
3784 return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
3785}
3786
3787static ssize_t advisor_max_cpu_store(struct kobject *kobj,
3788 struct kobj_attribute *attr,
3789 const char *buf, size_t count)
3790{
3791 int err;
3792 unsigned long value;
3793
3794 err = kstrtoul(buf, 10, &value);
3795 if (err)
3796 return -EINVAL;
3797
3798 ksm_advisor_max_cpu = value;
3799 return count;
3800}
3801KSM_ATTR(advisor_max_cpu);
3802
3803static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
3804 struct kobj_attribute *attr, char *buf)
3805{
3806 return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
3807}
3808
3809static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
3810 struct kobj_attribute *attr,
3811 const char *buf, size_t count)
3812{
3813 int err;
3814 unsigned long value;
3815
3816 err = kstrtoul(buf, 10, &value);
3817 if (err)
3818 return -EINVAL;
3819
3820 ksm_advisor_min_pages_to_scan = value;
3821 return count;
3822}
3823KSM_ATTR(advisor_min_pages_to_scan);
3824
3825static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
3826 struct kobj_attribute *attr, char *buf)
3827{
3828 return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
3829}
3830
3831static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
3832 struct kobj_attribute *attr,
3833 const char *buf, size_t count)
3834{
3835 int err;
3836 unsigned long value;
3837
3838 err = kstrtoul(buf, 10, &value);
3839 if (err)
3840 return -EINVAL;
3841
3842 ksm_advisor_max_pages_to_scan = value;
3843 return count;
3844}
3845KSM_ATTR(advisor_max_pages_to_scan);
3846
3847static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
3848 struct kobj_attribute *attr, char *buf)
3849{
3850 return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
3851}
3852
3853static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
3854 struct kobj_attribute *attr,
3855 const char *buf, size_t count)
3856{
3857 int err;
3858 unsigned long value;
3859
3860 err = kstrtoul(buf, 10, &value);
3861 if (err)
3862 return -EINVAL;
3863 if (value < 1)
3864 return -EINVAL;
3865
3866 ksm_advisor_target_scan_time = value;
3867 return count;
3868}
3869KSM_ATTR(advisor_target_scan_time);
3870
3871static struct attribute *ksm_attrs[] = {
3872 &sleep_millisecs_attr.attr,
3873 &pages_to_scan_attr.attr,
3874 &run_attr.attr,
3875 &pages_scanned_attr.attr,
3876 &pages_shared_attr.attr,
3877 &pages_sharing_attr.attr,
3878 &pages_unshared_attr.attr,
3879 &pages_volatile_attr.attr,
3880 &pages_skipped_attr.attr,
3881 &ksm_zero_pages_attr.attr,
3882 &full_scans_attr.attr,
3883#ifdef CONFIG_NUMA
3884 &merge_across_nodes_attr.attr,
3885#endif
3886 &max_page_sharing_attr.attr,
3887 &stable_node_chains_attr.attr,
3888 &stable_node_dups_attr.attr,
3889 &stable_node_chains_prune_millisecs_attr.attr,
3890 &use_zero_pages_attr.attr,
3891 &general_profit_attr.attr,
3892 &smart_scan_attr.attr,
3893 &advisor_mode_attr.attr,
3894 &advisor_max_cpu_attr.attr,
3895 &advisor_min_pages_to_scan_attr.attr,
3896 &advisor_max_pages_to_scan_attr.attr,
3897 &advisor_target_scan_time_attr.attr,
3898 NULL,
3899};
3900
3901static const struct attribute_group ksm_attr_group = {
3902 .attrs = ksm_attrs,
3903 .name = "ksm",
3904};
3905#endif /* CONFIG_SYSFS */
3906
3907static int __init ksm_init(void)
3908{
3909 struct task_struct *ksm_thread;
3910 int err;
3911
3912 /* The correct value depends on page size and endianness */
3913 zero_checksum = calc_checksum(ZERO_PAGE(0));
3914 /* Default to false for backwards compatibility */
3915 ksm_use_zero_pages = false;
3916
3917 err = ksm_slab_init();
3918 if (err)
3919 goto out;
3920
3921 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3922 if (IS_ERR(ksm_thread)) {
3923 pr_err("ksm: creating kthread failed\n");
3924 err = PTR_ERR(ksm_thread);
3925 goto out_free;
3926 }
3927
3928#ifdef CONFIG_SYSFS
3929 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3930 if (err) {
3931 pr_err("ksm: register sysfs failed\n");
3932 kthread_stop(ksm_thread);
3933 goto out_free;
3934 }
3935#else
3936 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
3937
3938#endif /* CONFIG_SYSFS */
3939
3940#ifdef CONFIG_MEMORY_HOTREMOVE
3941 /* There is no significance to this priority 100 */
3942 hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI);
3943#endif
3944 return 0;
3945
3946out_free:
3947 ksm_slab_free();
3948out:
3949 return err;
3950}
3951subsys_initcall(ksm_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory merging support.
4 *
5 * This code enables dynamic sharing of identical pages found in different
6 * memory areas, even if they are not shared by fork()
7 *
8 * Copyright (C) 2008-2009 Red Hat, Inc.
9 * Authors:
10 * Izik Eidus
11 * Andrea Arcangeli
12 * Chris Wright
13 * Hugh Dickins
14 */
15
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/fs.h>
19#include <linux/mman.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <linux/sched/coredump.h>
23#include <linux/rwsem.h>
24#include <linux/pagemap.h>
25#include <linux/rmap.h>
26#include <linux/spinlock.h>
27#include <linux/xxhash.h>
28#include <linux/delay.h>
29#include <linux/kthread.h>
30#include <linux/wait.h>
31#include <linux/slab.h>
32#include <linux/rbtree.h>
33#include <linux/memory.h>
34#include <linux/mmu_notifier.h>
35#include <linux/swap.h>
36#include <linux/ksm.h>
37#include <linux/hashtable.h>
38#include <linux/freezer.h>
39#include <linux/oom.h>
40#include <linux/numa.h>
41
42#include <asm/tlbflush.h>
43#include "internal.h"
44
45#ifdef CONFIG_NUMA
46#define NUMA(x) (x)
47#define DO_NUMA(x) do { (x); } while (0)
48#else
49#define NUMA(x) (0)
50#define DO_NUMA(x) do { } while (0)
51#endif
52
53/**
54 * DOC: Overview
55 *
56 * A few notes about the KSM scanning process,
57 * to make it easier to understand the data structures below:
58 *
59 * In order to reduce excessive scanning, KSM sorts the memory pages by their
60 * contents into a data structure that holds pointers to the pages' locations.
61 *
62 * Since the contents of the pages may change at any moment, KSM cannot just
63 * insert the pages into a normal sorted tree and expect it to find anything.
64 * Therefore KSM uses two data structures - the stable and the unstable tree.
65 *
66 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
67 * by their contents. Because each such page is write-protected, searching on
68 * this tree is fully assured to be working (except when pages are unmapped),
69 * and therefore this tree is called the stable tree.
70 *
71 * The stable tree node includes information required for reverse
72 * mapping from a KSM page to virtual addresses that map this page.
73 *
74 * In order to avoid large latencies of the rmap walks on KSM pages,
75 * KSM maintains two types of nodes in the stable tree:
76 *
77 * * the regular nodes that keep the reverse mapping structures in a
78 * linked list
79 * * the "chains" that link nodes ("dups") that represent the same
80 * write protected memory content, but each "dup" corresponds to a
81 * different KSM page copy of that content
82 *
83 * Internally, the regular nodes, "dups" and "chains" are represented
84 * using the same struct stable_node structure.
85 *
86 * In addition to the stable tree, KSM uses a second data structure called the
87 * unstable tree: this tree holds pointers to pages which have been found to
88 * be "unchanged for a period of time". The unstable tree sorts these pages
89 * by their contents, but since they are not write-protected, KSM cannot rely
90 * upon the unstable tree to work correctly - the unstable tree is liable to
91 * be corrupted as its contents are modified, and so it is called unstable.
92 *
93 * KSM solves this problem by several techniques:
94 *
95 * 1) The unstable tree is flushed every time KSM completes scanning all
96 * memory areas, and then the tree is rebuilt again from the beginning.
97 * 2) KSM will only insert into the unstable tree, pages whose hash value
98 * has not changed since the previous scan of all memory areas.
99 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
100 * colors of the nodes and not on their contents, assuring that even when
101 * the tree gets "corrupted" it won't get out of balance, so scanning time
102 * remains the same (also, searching and inserting nodes in an rbtree uses
103 * the same algorithm, so we have no overhead when we flush and rebuild).
104 * 4) KSM never flushes the stable tree, which means that even if it were to
105 * take 10 attempts to find a page in the unstable tree, once it is found,
106 * it is secured in the stable tree. (When we scan a new page, we first
107 * compare it against the stable tree, and then against the unstable tree.)
108 *
109 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
110 * stable trees and multiple unstable trees: one of each for each NUMA node.
111 */
112
113/**
114 * struct mm_slot - ksm information per mm that is being scanned
115 * @link: link to the mm_slots hash list
116 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
117 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
118 * @mm: the mm that this information is valid for
119 */
120struct mm_slot {
121 struct hlist_node link;
122 struct list_head mm_list;
123 struct rmap_item *rmap_list;
124 struct mm_struct *mm;
125};
126
127/**
128 * struct ksm_scan - cursor for scanning
129 * @mm_slot: the current mm_slot we are scanning
130 * @address: the next address inside that to be scanned
131 * @rmap_list: link to the next rmap to be scanned in the rmap_list
132 * @seqnr: count of completed full scans (needed when removing unstable node)
133 *
134 * There is only the one ksm_scan instance of this cursor structure.
135 */
136struct ksm_scan {
137 struct mm_slot *mm_slot;
138 unsigned long address;
139 struct rmap_item **rmap_list;
140 unsigned long seqnr;
141};
142
143/**
144 * struct stable_node - node of the stable rbtree
145 * @node: rb node of this ksm page in the stable tree
146 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
147 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
148 * @list: linked into migrate_nodes, pending placement in the proper node tree
149 * @hlist: hlist head of rmap_items using this ksm page
150 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
151 * @chain_prune_time: time of the last full garbage collection
152 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
153 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
154 */
155struct stable_node {
156 union {
157 struct rb_node node; /* when node of stable tree */
158 struct { /* when listed for migration */
159 struct list_head *head;
160 struct {
161 struct hlist_node hlist_dup;
162 struct list_head list;
163 };
164 };
165 };
166 struct hlist_head hlist;
167 union {
168 unsigned long kpfn;
169 unsigned long chain_prune_time;
170 };
171 /*
172 * STABLE_NODE_CHAIN can be any negative number in
173 * rmap_hlist_len negative range, but better not -1 to be able
174 * to reliably detect underflows.
175 */
176#define STABLE_NODE_CHAIN -1024
177 int rmap_hlist_len;
178#ifdef CONFIG_NUMA
179 int nid;
180#endif
181};
182
183/**
184 * struct rmap_item - reverse mapping item for virtual addresses
185 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
186 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
187 * @nid: NUMA node id of unstable tree in which linked (may not match page)
188 * @mm: the memory structure this rmap_item is pointing into
189 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
190 * @oldchecksum: previous checksum of the page at that virtual address
191 * @node: rb node of this rmap_item in the unstable tree
192 * @head: pointer to stable_node heading this list in the stable tree
193 * @hlist: link into hlist of rmap_items hanging off that stable_node
194 */
195struct rmap_item {
196 struct rmap_item *rmap_list;
197 union {
198 struct anon_vma *anon_vma; /* when stable */
199#ifdef CONFIG_NUMA
200 int nid; /* when node of unstable tree */
201#endif
202 };
203 struct mm_struct *mm;
204 unsigned long address; /* + low bits used for flags below */
205 unsigned int oldchecksum; /* when unstable */
206 union {
207 struct rb_node node; /* when node of unstable tree */
208 struct { /* when listed from stable tree */
209 struct stable_node *head;
210 struct hlist_node hlist;
211 };
212 };
213};
214
215#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
216#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
217#define STABLE_FLAG 0x200 /* is listed from the stable tree */
218
219/* The stable and unstable tree heads */
220static struct rb_root one_stable_tree[1] = { RB_ROOT };
221static struct rb_root one_unstable_tree[1] = { RB_ROOT };
222static struct rb_root *root_stable_tree = one_stable_tree;
223static struct rb_root *root_unstable_tree = one_unstable_tree;
224
225/* Recently migrated nodes of stable tree, pending proper placement */
226static LIST_HEAD(migrate_nodes);
227#define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
228
229#define MM_SLOTS_HASH_BITS 10
230static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
231
232static struct mm_slot ksm_mm_head = {
233 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
234};
235static struct ksm_scan ksm_scan = {
236 .mm_slot = &ksm_mm_head,
237};
238
239static struct kmem_cache *rmap_item_cache;
240static struct kmem_cache *stable_node_cache;
241static struct kmem_cache *mm_slot_cache;
242
243/* The number of nodes in the stable tree */
244static unsigned long ksm_pages_shared;
245
246/* The number of page slots additionally sharing those nodes */
247static unsigned long ksm_pages_sharing;
248
249/* The number of nodes in the unstable tree */
250static unsigned long ksm_pages_unshared;
251
252/* The number of rmap_items in use: to calculate pages_volatile */
253static unsigned long ksm_rmap_items;
254
255/* The number of stable_node chains */
256static unsigned long ksm_stable_node_chains;
257
258/* The number of stable_node dups linked to the stable_node chains */
259static unsigned long ksm_stable_node_dups;
260
261/* Delay in pruning stale stable_node_dups in the stable_node_chains */
262static int ksm_stable_node_chains_prune_millisecs = 2000;
263
264/* Maximum number of page slots sharing a stable node */
265static int ksm_max_page_sharing = 256;
266
267/* Number of pages ksmd should scan in one batch */
268static unsigned int ksm_thread_pages_to_scan = 100;
269
270/* Milliseconds ksmd should sleep between batches */
271static unsigned int ksm_thread_sleep_millisecs = 20;
272
273/* Checksum of an empty (zeroed) page */
274static unsigned int zero_checksum __read_mostly;
275
276/* Whether to merge empty (zeroed) pages with actual zero pages */
277static bool ksm_use_zero_pages __read_mostly;
278
279#ifdef CONFIG_NUMA
280/* Zeroed when merging across nodes is not allowed */
281static unsigned int ksm_merge_across_nodes = 1;
282static int ksm_nr_node_ids = 1;
283#else
284#define ksm_merge_across_nodes 1U
285#define ksm_nr_node_ids 1
286#endif
287
288#define KSM_RUN_STOP 0
289#define KSM_RUN_MERGE 1
290#define KSM_RUN_UNMERGE 2
291#define KSM_RUN_OFFLINE 4
292static unsigned long ksm_run = KSM_RUN_STOP;
293static void wait_while_offlining(void);
294
295static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
296static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
297static DEFINE_MUTEX(ksm_thread_mutex);
298static DEFINE_SPINLOCK(ksm_mmlist_lock);
299
300#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
301 sizeof(struct __struct), __alignof__(struct __struct),\
302 (__flags), NULL)
303
304static int __init ksm_slab_init(void)
305{
306 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
307 if (!rmap_item_cache)
308 goto out;
309
310 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
311 if (!stable_node_cache)
312 goto out_free1;
313
314 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
315 if (!mm_slot_cache)
316 goto out_free2;
317
318 return 0;
319
320out_free2:
321 kmem_cache_destroy(stable_node_cache);
322out_free1:
323 kmem_cache_destroy(rmap_item_cache);
324out:
325 return -ENOMEM;
326}
327
328static void __init ksm_slab_free(void)
329{
330 kmem_cache_destroy(mm_slot_cache);
331 kmem_cache_destroy(stable_node_cache);
332 kmem_cache_destroy(rmap_item_cache);
333 mm_slot_cache = NULL;
334}
335
336static __always_inline bool is_stable_node_chain(struct stable_node *chain)
337{
338 return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
339}
340
341static __always_inline bool is_stable_node_dup(struct stable_node *dup)
342{
343 return dup->head == STABLE_NODE_DUP_HEAD;
344}
345
346static inline void stable_node_chain_add_dup(struct stable_node *dup,
347 struct stable_node *chain)
348{
349 VM_BUG_ON(is_stable_node_dup(dup));
350 dup->head = STABLE_NODE_DUP_HEAD;
351 VM_BUG_ON(!is_stable_node_chain(chain));
352 hlist_add_head(&dup->hlist_dup, &chain->hlist);
353 ksm_stable_node_dups++;
354}
355
356static inline void __stable_node_dup_del(struct stable_node *dup)
357{
358 VM_BUG_ON(!is_stable_node_dup(dup));
359 hlist_del(&dup->hlist_dup);
360 ksm_stable_node_dups--;
361}
362
363static inline void stable_node_dup_del(struct stable_node *dup)
364{
365 VM_BUG_ON(is_stable_node_chain(dup));
366 if (is_stable_node_dup(dup))
367 __stable_node_dup_del(dup);
368 else
369 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
370#ifdef CONFIG_DEBUG_VM
371 dup->head = NULL;
372#endif
373}
374
375static inline struct rmap_item *alloc_rmap_item(void)
376{
377 struct rmap_item *rmap_item;
378
379 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
380 __GFP_NORETRY | __GFP_NOWARN);
381 if (rmap_item)
382 ksm_rmap_items++;
383 return rmap_item;
384}
385
386static inline void free_rmap_item(struct rmap_item *rmap_item)
387{
388 ksm_rmap_items--;
389 rmap_item->mm = NULL; /* debug safety */
390 kmem_cache_free(rmap_item_cache, rmap_item);
391}
392
393static inline struct stable_node *alloc_stable_node(void)
394{
395 /*
396 * The allocation can take too long with GFP_KERNEL when memory is under
397 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
398 * grants access to memory reserves, helping to avoid this problem.
399 */
400 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
401}
402
403static inline void free_stable_node(struct stable_node *stable_node)
404{
405 VM_BUG_ON(stable_node->rmap_hlist_len &&
406 !is_stable_node_chain(stable_node));
407 kmem_cache_free(stable_node_cache, stable_node);
408}
409
410static inline struct mm_slot *alloc_mm_slot(void)
411{
412 if (!mm_slot_cache) /* initialization failed */
413 return NULL;
414 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
415}
416
417static inline void free_mm_slot(struct mm_slot *mm_slot)
418{
419 kmem_cache_free(mm_slot_cache, mm_slot);
420}
421
422static struct mm_slot *get_mm_slot(struct mm_struct *mm)
423{
424 struct mm_slot *slot;
425
426 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
427 if (slot->mm == mm)
428 return slot;
429
430 return NULL;
431}
432
433static void insert_to_mm_slots_hash(struct mm_struct *mm,
434 struct mm_slot *mm_slot)
435{
436 mm_slot->mm = mm;
437 hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
438}
439
440/*
441 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
442 * page tables after it has passed through ksm_exit() - which, if necessary,
443 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
444 * a special flag: they can just back out as soon as mm_users goes to zero.
445 * ksm_test_exit() is used throughout to make this test for exit: in some
446 * places for correctness, in some places just to avoid unnecessary work.
447 */
448static inline bool ksm_test_exit(struct mm_struct *mm)
449{
450 return atomic_read(&mm->mm_users) == 0;
451}
452
453/*
454 * We use break_ksm to break COW on a ksm page: it's a stripped down
455 *
456 * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1)
457 * put_page(page);
458 *
459 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
460 * in case the application has unmapped and remapped mm,addr meanwhile.
461 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
462 * mmap of /dev/mem, where we would not want to touch it.
463 *
464 * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
465 * of the process that owns 'vma'. We also do not want to enforce
466 * protection keys here anyway.
467 */
468static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
469{
470 struct page *page;
471 vm_fault_t ret = 0;
472
473 do {
474 cond_resched();
475 page = follow_page(vma, addr,
476 FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
477 if (IS_ERR_OR_NULL(page))
478 break;
479 if (PageKsm(page))
480 ret = handle_mm_fault(vma, addr,
481 FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
482 NULL);
483 else
484 ret = VM_FAULT_WRITE;
485 put_page(page);
486 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
487 /*
488 * We must loop because handle_mm_fault() may back out if there's
489 * any difficulty e.g. if pte accessed bit gets updated concurrently.
490 *
491 * VM_FAULT_WRITE is what we have been hoping for: it indicates that
492 * COW has been broken, even if the vma does not permit VM_WRITE;
493 * but note that a concurrent fault might break PageKsm for us.
494 *
495 * VM_FAULT_SIGBUS could occur if we race with truncation of the
496 * backing file, which also invalidates anonymous pages: that's
497 * okay, that truncation will have unmapped the PageKsm for us.
498 *
499 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
500 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
501 * current task has TIF_MEMDIE set, and will be OOM killed on return
502 * to user; and ksmd, having no mm, would never be chosen for that.
503 *
504 * But if the mm is in a limited mem_cgroup, then the fault may fail
505 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
506 * even ksmd can fail in this way - though it's usually breaking ksm
507 * just to undo a merge it made a moment before, so unlikely to oom.
508 *
509 * That's a pity: we might therefore have more kernel pages allocated
510 * than we're counting as nodes in the stable tree; but ksm_do_scan
511 * will retry to break_cow on each pass, so should recover the page
512 * in due course. The important thing is to not let VM_MERGEABLE
513 * be cleared while any such pages might remain in the area.
514 */
515 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
516}
517
518static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
519 unsigned long addr)
520{
521 struct vm_area_struct *vma;
522 if (ksm_test_exit(mm))
523 return NULL;
524 vma = vma_lookup(mm, addr);
525 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
526 return NULL;
527 return vma;
528}
529
530static void break_cow(struct rmap_item *rmap_item)
531{
532 struct mm_struct *mm = rmap_item->mm;
533 unsigned long addr = rmap_item->address;
534 struct vm_area_struct *vma;
535
536 /*
537 * It is not an accident that whenever we want to break COW
538 * to undo, we also need to drop a reference to the anon_vma.
539 */
540 put_anon_vma(rmap_item->anon_vma);
541
542 mmap_read_lock(mm);
543 vma = find_mergeable_vma(mm, addr);
544 if (vma)
545 break_ksm(vma, addr);
546 mmap_read_unlock(mm);
547}
548
549static struct page *get_mergeable_page(struct rmap_item *rmap_item)
550{
551 struct mm_struct *mm = rmap_item->mm;
552 unsigned long addr = rmap_item->address;
553 struct vm_area_struct *vma;
554 struct page *page;
555
556 mmap_read_lock(mm);
557 vma = find_mergeable_vma(mm, addr);
558 if (!vma)
559 goto out;
560
561 page = follow_page(vma, addr, FOLL_GET);
562 if (IS_ERR_OR_NULL(page))
563 goto out;
564 if (PageAnon(page)) {
565 flush_anon_page(vma, page, addr);
566 flush_dcache_page(page);
567 } else {
568 put_page(page);
569out:
570 page = NULL;
571 }
572 mmap_read_unlock(mm);
573 return page;
574}
575
576/*
577 * This helper is used for getting right index into array of tree roots.
578 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
579 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
580 * every node has its own stable and unstable tree.
581 */
582static inline int get_kpfn_nid(unsigned long kpfn)
583{
584 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
585}
586
587static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
588 struct rb_root *root)
589{
590 struct stable_node *chain = alloc_stable_node();
591 VM_BUG_ON(is_stable_node_chain(dup));
592 if (likely(chain)) {
593 INIT_HLIST_HEAD(&chain->hlist);
594 chain->chain_prune_time = jiffies;
595 chain->rmap_hlist_len = STABLE_NODE_CHAIN;
596#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
597 chain->nid = NUMA_NO_NODE; /* debug */
598#endif
599 ksm_stable_node_chains++;
600
601 /*
602 * Put the stable node chain in the first dimension of
603 * the stable tree and at the same time remove the old
604 * stable node.
605 */
606 rb_replace_node(&dup->node, &chain->node, root);
607
608 /*
609 * Move the old stable node to the second dimension
610 * queued in the hlist_dup. The invariant is that all
611 * dup stable_nodes in the chain->hlist point to pages
612 * that are write protected and have the exact same
613 * content.
614 */
615 stable_node_chain_add_dup(dup, chain);
616 }
617 return chain;
618}
619
620static inline void free_stable_node_chain(struct stable_node *chain,
621 struct rb_root *root)
622{
623 rb_erase(&chain->node, root);
624 free_stable_node(chain);
625 ksm_stable_node_chains--;
626}
627
628static void remove_node_from_stable_tree(struct stable_node *stable_node)
629{
630 struct rmap_item *rmap_item;
631
632 /* check it's not STABLE_NODE_CHAIN or negative */
633 BUG_ON(stable_node->rmap_hlist_len < 0);
634
635 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
636 if (rmap_item->hlist.next)
637 ksm_pages_sharing--;
638 else
639 ksm_pages_shared--;
640 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
641 stable_node->rmap_hlist_len--;
642 put_anon_vma(rmap_item->anon_vma);
643 rmap_item->address &= PAGE_MASK;
644 cond_resched();
645 }
646
647 /*
648 * We need the second aligned pointer of the migrate_nodes
649 * list_head to stay clear from the rb_parent_color union
650 * (aligned and different than any node) and also different
651 * from &migrate_nodes. This will verify that future list.h changes
652 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
653 */
654#if defined(GCC_VERSION) && GCC_VERSION >= 40903
655 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
656 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
657#endif
658
659 if (stable_node->head == &migrate_nodes)
660 list_del(&stable_node->list);
661 else
662 stable_node_dup_del(stable_node);
663 free_stable_node(stable_node);
664}
665
666enum get_ksm_page_flags {
667 GET_KSM_PAGE_NOLOCK,
668 GET_KSM_PAGE_LOCK,
669 GET_KSM_PAGE_TRYLOCK
670};
671
672/*
673 * get_ksm_page: checks if the page indicated by the stable node
674 * is still its ksm page, despite having held no reference to it.
675 * In which case we can trust the content of the page, and it
676 * returns the gotten page; but if the page has now been zapped,
677 * remove the stale node from the stable tree and return NULL.
678 * But beware, the stable node's page might be being migrated.
679 *
680 * You would expect the stable_node to hold a reference to the ksm page.
681 * But if it increments the page's count, swapping out has to wait for
682 * ksmd to come around again before it can free the page, which may take
683 * seconds or even minutes: much too unresponsive. So instead we use a
684 * "keyhole reference": access to the ksm page from the stable node peeps
685 * out through its keyhole to see if that page still holds the right key,
686 * pointing back to this stable node. This relies on freeing a PageAnon
687 * page to reset its page->mapping to NULL, and relies on no other use of
688 * a page to put something that might look like our key in page->mapping.
689 * is on its way to being freed; but it is an anomaly to bear in mind.
690 */
691static struct page *get_ksm_page(struct stable_node *stable_node,
692 enum get_ksm_page_flags flags)
693{
694 struct page *page;
695 void *expected_mapping;
696 unsigned long kpfn;
697
698 expected_mapping = (void *)((unsigned long)stable_node |
699 PAGE_MAPPING_KSM);
700again:
701 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
702 page = pfn_to_page(kpfn);
703 if (READ_ONCE(page->mapping) != expected_mapping)
704 goto stale;
705
706 /*
707 * We cannot do anything with the page while its refcount is 0.
708 * Usually 0 means free, or tail of a higher-order page: in which
709 * case this node is no longer referenced, and should be freed;
710 * however, it might mean that the page is under page_ref_freeze().
711 * The __remove_mapping() case is easy, again the node is now stale;
712 * the same is in reuse_ksm_page() case; but if page is swapcache
713 * in migrate_page_move_mapping(), it might still be our page,
714 * in which case it's essential to keep the node.
715 */
716 while (!get_page_unless_zero(page)) {
717 /*
718 * Another check for page->mapping != expected_mapping would
719 * work here too. We have chosen the !PageSwapCache test to
720 * optimize the common case, when the page is or is about to
721 * be freed: PageSwapCache is cleared (under spin_lock_irq)
722 * in the ref_freeze section of __remove_mapping(); but Anon
723 * page->mapping reset to NULL later, in free_pages_prepare().
724 */
725 if (!PageSwapCache(page))
726 goto stale;
727 cpu_relax();
728 }
729
730 if (READ_ONCE(page->mapping) != expected_mapping) {
731 put_page(page);
732 goto stale;
733 }
734
735 if (flags == GET_KSM_PAGE_TRYLOCK) {
736 if (!trylock_page(page)) {
737 put_page(page);
738 return ERR_PTR(-EBUSY);
739 }
740 } else if (flags == GET_KSM_PAGE_LOCK)
741 lock_page(page);
742
743 if (flags != GET_KSM_PAGE_NOLOCK) {
744 if (READ_ONCE(page->mapping) != expected_mapping) {
745 unlock_page(page);
746 put_page(page);
747 goto stale;
748 }
749 }
750 return page;
751
752stale:
753 /*
754 * We come here from above when page->mapping or !PageSwapCache
755 * suggests that the node is stale; but it might be under migration.
756 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
757 * before checking whether node->kpfn has been changed.
758 */
759 smp_rmb();
760 if (READ_ONCE(stable_node->kpfn) != kpfn)
761 goto again;
762 remove_node_from_stable_tree(stable_node);
763 return NULL;
764}
765
766/*
767 * Removing rmap_item from stable or unstable tree.
768 * This function will clean the information from the stable/unstable tree.
769 */
770static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
771{
772 if (rmap_item->address & STABLE_FLAG) {
773 struct stable_node *stable_node;
774 struct page *page;
775
776 stable_node = rmap_item->head;
777 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
778 if (!page)
779 goto out;
780
781 hlist_del(&rmap_item->hlist);
782 unlock_page(page);
783 put_page(page);
784
785 if (!hlist_empty(&stable_node->hlist))
786 ksm_pages_sharing--;
787 else
788 ksm_pages_shared--;
789 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
790 stable_node->rmap_hlist_len--;
791
792 put_anon_vma(rmap_item->anon_vma);
793 rmap_item->head = NULL;
794 rmap_item->address &= PAGE_MASK;
795
796 } else if (rmap_item->address & UNSTABLE_FLAG) {
797 unsigned char age;
798 /*
799 * Usually ksmd can and must skip the rb_erase, because
800 * root_unstable_tree was already reset to RB_ROOT.
801 * But be careful when an mm is exiting: do the rb_erase
802 * if this rmap_item was inserted by this scan, rather
803 * than left over from before.
804 */
805 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
806 BUG_ON(age > 1);
807 if (!age)
808 rb_erase(&rmap_item->node,
809 root_unstable_tree + NUMA(rmap_item->nid));
810 ksm_pages_unshared--;
811 rmap_item->address &= PAGE_MASK;
812 }
813out:
814 cond_resched(); /* we're called from many long loops */
815}
816
817static void remove_trailing_rmap_items(struct rmap_item **rmap_list)
818{
819 while (*rmap_list) {
820 struct rmap_item *rmap_item = *rmap_list;
821 *rmap_list = rmap_item->rmap_list;
822 remove_rmap_item_from_tree(rmap_item);
823 free_rmap_item(rmap_item);
824 }
825}
826
827/*
828 * Though it's very tempting to unmerge rmap_items from stable tree rather
829 * than check every pte of a given vma, the locking doesn't quite work for
830 * that - an rmap_item is assigned to the stable tree after inserting ksm
831 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
832 * rmap_items from parent to child at fork time (so as not to waste time
833 * if exit comes before the next scan reaches it).
834 *
835 * Similarly, although we'd like to remove rmap_items (so updating counts
836 * and freeing memory) when unmerging an area, it's easier to leave that
837 * to the next pass of ksmd - consider, for example, how ksmd might be
838 * in cmp_and_merge_page on one of the rmap_items we would be removing.
839 */
840static int unmerge_ksm_pages(struct vm_area_struct *vma,
841 unsigned long start, unsigned long end)
842{
843 unsigned long addr;
844 int err = 0;
845
846 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
847 if (ksm_test_exit(vma->vm_mm))
848 break;
849 if (signal_pending(current))
850 err = -ERESTARTSYS;
851 else
852 err = break_ksm(vma, addr);
853 }
854 return err;
855}
856
857static inline struct stable_node *page_stable_node(struct page *page)
858{
859 return PageKsm(page) ? page_rmapping(page) : NULL;
860}
861
862static inline void set_page_stable_node(struct page *page,
863 struct stable_node *stable_node)
864{
865 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
866}
867
868#ifdef CONFIG_SYSFS
869/*
870 * Only called through the sysfs control interface:
871 */
872static int remove_stable_node(struct stable_node *stable_node)
873{
874 struct page *page;
875 int err;
876
877 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
878 if (!page) {
879 /*
880 * get_ksm_page did remove_node_from_stable_tree itself.
881 */
882 return 0;
883 }
884
885 /*
886 * Page could be still mapped if this races with __mmput() running in
887 * between ksm_exit() and exit_mmap(). Just refuse to let
888 * merge_across_nodes/max_page_sharing be switched.
889 */
890 err = -EBUSY;
891 if (!page_mapped(page)) {
892 /*
893 * The stable node did not yet appear stale to get_ksm_page(),
894 * since that allows for an unmapped ksm page to be recognized
895 * right up until it is freed; but the node is safe to remove.
896 * This page might be in a pagevec waiting to be freed,
897 * or it might be PageSwapCache (perhaps under writeback),
898 * or it might have been removed from swapcache a moment ago.
899 */
900 set_page_stable_node(page, NULL);
901 remove_node_from_stable_tree(stable_node);
902 err = 0;
903 }
904
905 unlock_page(page);
906 put_page(page);
907 return err;
908}
909
910static int remove_stable_node_chain(struct stable_node *stable_node,
911 struct rb_root *root)
912{
913 struct stable_node *dup;
914 struct hlist_node *hlist_safe;
915
916 if (!is_stable_node_chain(stable_node)) {
917 VM_BUG_ON(is_stable_node_dup(stable_node));
918 if (remove_stable_node(stable_node))
919 return true;
920 else
921 return false;
922 }
923
924 hlist_for_each_entry_safe(dup, hlist_safe,
925 &stable_node->hlist, hlist_dup) {
926 VM_BUG_ON(!is_stable_node_dup(dup));
927 if (remove_stable_node(dup))
928 return true;
929 }
930 BUG_ON(!hlist_empty(&stable_node->hlist));
931 free_stable_node_chain(stable_node, root);
932 return false;
933}
934
935static int remove_all_stable_nodes(void)
936{
937 struct stable_node *stable_node, *next;
938 int nid;
939 int err = 0;
940
941 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
942 while (root_stable_tree[nid].rb_node) {
943 stable_node = rb_entry(root_stable_tree[nid].rb_node,
944 struct stable_node, node);
945 if (remove_stable_node_chain(stable_node,
946 root_stable_tree + nid)) {
947 err = -EBUSY;
948 break; /* proceed to next nid */
949 }
950 cond_resched();
951 }
952 }
953 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
954 if (remove_stable_node(stable_node))
955 err = -EBUSY;
956 cond_resched();
957 }
958 return err;
959}
960
961static int unmerge_and_remove_all_rmap_items(void)
962{
963 struct mm_slot *mm_slot;
964 struct mm_struct *mm;
965 struct vm_area_struct *vma;
966 int err = 0;
967
968 spin_lock(&ksm_mmlist_lock);
969 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
970 struct mm_slot, mm_list);
971 spin_unlock(&ksm_mmlist_lock);
972
973 for (mm_slot = ksm_scan.mm_slot;
974 mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
975 mm = mm_slot->mm;
976 mmap_read_lock(mm);
977 for (vma = mm->mmap; vma; vma = vma->vm_next) {
978 if (ksm_test_exit(mm))
979 break;
980 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
981 continue;
982 err = unmerge_ksm_pages(vma,
983 vma->vm_start, vma->vm_end);
984 if (err)
985 goto error;
986 }
987
988 remove_trailing_rmap_items(&mm_slot->rmap_list);
989 mmap_read_unlock(mm);
990
991 spin_lock(&ksm_mmlist_lock);
992 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
993 struct mm_slot, mm_list);
994 if (ksm_test_exit(mm)) {
995 hash_del(&mm_slot->link);
996 list_del(&mm_slot->mm_list);
997 spin_unlock(&ksm_mmlist_lock);
998
999 free_mm_slot(mm_slot);
1000 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1001 mmdrop(mm);
1002 } else
1003 spin_unlock(&ksm_mmlist_lock);
1004 }
1005
1006 /* Clean up stable nodes, but don't worry if some are still busy */
1007 remove_all_stable_nodes();
1008 ksm_scan.seqnr = 0;
1009 return 0;
1010
1011error:
1012 mmap_read_unlock(mm);
1013 spin_lock(&ksm_mmlist_lock);
1014 ksm_scan.mm_slot = &ksm_mm_head;
1015 spin_unlock(&ksm_mmlist_lock);
1016 return err;
1017}
1018#endif /* CONFIG_SYSFS */
1019
1020static u32 calc_checksum(struct page *page)
1021{
1022 u32 checksum;
1023 void *addr = kmap_atomic(page);
1024 checksum = xxhash(addr, PAGE_SIZE, 0);
1025 kunmap_atomic(addr);
1026 return checksum;
1027}
1028
1029static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1030 pte_t *orig_pte)
1031{
1032 struct mm_struct *mm = vma->vm_mm;
1033 struct page_vma_mapped_walk pvmw = {
1034 .page = page,
1035 .vma = vma,
1036 };
1037 int swapped;
1038 int err = -EFAULT;
1039 struct mmu_notifier_range range;
1040
1041 pvmw.address = page_address_in_vma(page, vma);
1042 if (pvmw.address == -EFAULT)
1043 goto out;
1044
1045 BUG_ON(PageTransCompound(page));
1046
1047 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
1048 pvmw.address,
1049 pvmw.address + PAGE_SIZE);
1050 mmu_notifier_invalidate_range_start(&range);
1051
1052 if (!page_vma_mapped_walk(&pvmw))
1053 goto out_mn;
1054 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1055 goto out_unlock;
1056
1057 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
1058 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
1059 mm_tlb_flush_pending(mm)) {
1060 pte_t entry;
1061
1062 swapped = PageSwapCache(page);
1063 flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1064 /*
1065 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1066 * take any lock, therefore the check that we are going to make
1067 * with the pagecount against the mapcount is racy and
1068 * O_DIRECT can happen right after the check.
1069 * So we clear the pte and flush the tlb before the check
1070 * this assure us that no O_DIRECT can happen after the check
1071 * or in the middle of the check.
1072 *
1073 * No need to notify as we are downgrading page table to read
1074 * only not changing it to point to a new page.
1075 *
1076 * See Documentation/vm/mmu_notifier.rst
1077 */
1078 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1079 /*
1080 * Check that no O_DIRECT or similar I/O is in progress on the
1081 * page
1082 */
1083 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1084 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1085 goto out_unlock;
1086 }
1087 if (pte_dirty(entry))
1088 set_page_dirty(page);
1089
1090 if (pte_protnone(entry))
1091 entry = pte_mkclean(pte_clear_savedwrite(entry));
1092 else
1093 entry = pte_mkclean(pte_wrprotect(entry));
1094 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
1095 }
1096 *orig_pte = *pvmw.pte;
1097 err = 0;
1098
1099out_unlock:
1100 page_vma_mapped_walk_done(&pvmw);
1101out_mn:
1102 mmu_notifier_invalidate_range_end(&range);
1103out:
1104 return err;
1105}
1106
1107/**
1108 * replace_page - replace page in vma by new ksm page
1109 * @vma: vma that holds the pte pointing to page
1110 * @page: the page we are replacing by kpage
1111 * @kpage: the ksm page we replace page by
1112 * @orig_pte: the original value of the pte
1113 *
1114 * Returns 0 on success, -EFAULT on failure.
1115 */
1116static int replace_page(struct vm_area_struct *vma, struct page *page,
1117 struct page *kpage, pte_t orig_pte)
1118{
1119 struct mm_struct *mm = vma->vm_mm;
1120 pmd_t *pmd;
1121 pte_t *ptep;
1122 pte_t newpte;
1123 spinlock_t *ptl;
1124 unsigned long addr;
1125 int err = -EFAULT;
1126 struct mmu_notifier_range range;
1127
1128 addr = page_address_in_vma(page, vma);
1129 if (addr == -EFAULT)
1130 goto out;
1131
1132 pmd = mm_find_pmd(mm, addr);
1133 if (!pmd)
1134 goto out;
1135
1136 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
1137 addr + PAGE_SIZE);
1138 mmu_notifier_invalidate_range_start(&range);
1139
1140 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1141 if (!pte_same(*ptep, orig_pte)) {
1142 pte_unmap_unlock(ptep, ptl);
1143 goto out_mn;
1144 }
1145
1146 /*
1147 * No need to check ksm_use_zero_pages here: we can only have a
1148 * zero_page here if ksm_use_zero_pages was enabled already.
1149 */
1150 if (!is_zero_pfn(page_to_pfn(kpage))) {
1151 get_page(kpage);
1152 page_add_anon_rmap(kpage, vma, addr, false);
1153 newpte = mk_pte(kpage, vma->vm_page_prot);
1154 } else {
1155 newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
1156 vma->vm_page_prot));
1157 /*
1158 * We're replacing an anonymous page with a zero page, which is
1159 * not anonymous. We need to do proper accounting otherwise we
1160 * will get wrong values in /proc, and a BUG message in dmesg
1161 * when tearing down the mm.
1162 */
1163 dec_mm_counter(mm, MM_ANONPAGES);
1164 }
1165
1166 flush_cache_page(vma, addr, pte_pfn(*ptep));
1167 /*
1168 * No need to notify as we are replacing a read only page with another
1169 * read only page with the same content.
1170 *
1171 * See Documentation/vm/mmu_notifier.rst
1172 */
1173 ptep_clear_flush(vma, addr, ptep);
1174 set_pte_at_notify(mm, addr, ptep, newpte);
1175
1176 page_remove_rmap(page, false);
1177 if (!page_mapped(page))
1178 try_to_free_swap(page);
1179 put_page(page);
1180
1181 pte_unmap_unlock(ptep, ptl);
1182 err = 0;
1183out_mn:
1184 mmu_notifier_invalidate_range_end(&range);
1185out:
1186 return err;
1187}
1188
1189/*
1190 * try_to_merge_one_page - take two pages and merge them into one
1191 * @vma: the vma that holds the pte pointing to page
1192 * @page: the PageAnon page that we want to replace with kpage
1193 * @kpage: the PageKsm page that we want to map instead of page,
1194 * or NULL the first time when we want to use page as kpage.
1195 *
1196 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1197 */
1198static int try_to_merge_one_page(struct vm_area_struct *vma,
1199 struct page *page, struct page *kpage)
1200{
1201 pte_t orig_pte = __pte(0);
1202 int err = -EFAULT;
1203
1204 if (page == kpage) /* ksm page forked */
1205 return 0;
1206
1207 if (!PageAnon(page))
1208 goto out;
1209
1210 /*
1211 * We need the page lock to read a stable PageSwapCache in
1212 * write_protect_page(). We use trylock_page() instead of
1213 * lock_page() because we don't want to wait here - we
1214 * prefer to continue scanning and merging different pages,
1215 * then come back to this page when it is unlocked.
1216 */
1217 if (!trylock_page(page))
1218 goto out;
1219
1220 if (PageTransCompound(page)) {
1221 if (split_huge_page(page))
1222 goto out_unlock;
1223 }
1224
1225 /*
1226 * If this anonymous page is mapped only here, its pte may need
1227 * to be write-protected. If it's mapped elsewhere, all of its
1228 * ptes are necessarily already write-protected. But in either
1229 * case, we need to lock and check page_count is not raised.
1230 */
1231 if (write_protect_page(vma, page, &orig_pte) == 0) {
1232 if (!kpage) {
1233 /*
1234 * While we hold page lock, upgrade page from
1235 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1236 * stable_tree_insert() will update stable_node.
1237 */
1238 set_page_stable_node(page, NULL);
1239 mark_page_accessed(page);
1240 /*
1241 * Page reclaim just frees a clean page with no dirty
1242 * ptes: make sure that the ksm page would be swapped.
1243 */
1244 if (!PageDirty(page))
1245 SetPageDirty(page);
1246 err = 0;
1247 } else if (pages_identical(page, kpage))
1248 err = replace_page(vma, page, kpage, orig_pte);
1249 }
1250
1251 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
1252 munlock_vma_page(page);
1253 if (!PageMlocked(kpage)) {
1254 unlock_page(page);
1255 lock_page(kpage);
1256 mlock_vma_page(kpage);
1257 page = kpage; /* for final unlock */
1258 }
1259 }
1260
1261out_unlock:
1262 unlock_page(page);
1263out:
1264 return err;
1265}
1266
1267/*
1268 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1269 * but no new kernel page is allocated: kpage must already be a ksm page.
1270 *
1271 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1272 */
1273static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
1274 struct page *page, struct page *kpage)
1275{
1276 struct mm_struct *mm = rmap_item->mm;
1277 struct vm_area_struct *vma;
1278 int err = -EFAULT;
1279
1280 mmap_read_lock(mm);
1281 vma = find_mergeable_vma(mm, rmap_item->address);
1282 if (!vma)
1283 goto out;
1284
1285 err = try_to_merge_one_page(vma, page, kpage);
1286 if (err)
1287 goto out;
1288
1289 /* Unstable nid is in union with stable anon_vma: remove first */
1290 remove_rmap_item_from_tree(rmap_item);
1291
1292 /* Must get reference to anon_vma while still holding mmap_lock */
1293 rmap_item->anon_vma = vma->anon_vma;
1294 get_anon_vma(vma->anon_vma);
1295out:
1296 mmap_read_unlock(mm);
1297 return err;
1298}
1299
1300/*
1301 * try_to_merge_two_pages - take two identical pages and prepare them
1302 * to be merged into one page.
1303 *
1304 * This function returns the kpage if we successfully merged two identical
1305 * pages into one ksm page, NULL otherwise.
1306 *
1307 * Note that this function upgrades page to ksm page: if one of the pages
1308 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1309 */
1310static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
1311 struct page *page,
1312 struct rmap_item *tree_rmap_item,
1313 struct page *tree_page)
1314{
1315 int err;
1316
1317 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1318 if (!err) {
1319 err = try_to_merge_with_ksm_page(tree_rmap_item,
1320 tree_page, page);
1321 /*
1322 * If that fails, we have a ksm page with only one pte
1323 * pointing to it: so break it.
1324 */
1325 if (err)
1326 break_cow(rmap_item);
1327 }
1328 return err ? NULL : page;
1329}
1330
1331static __always_inline
1332bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
1333{
1334 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1335 /*
1336 * Check that at least one mapping still exists, otherwise
1337 * there's no much point to merge and share with this
1338 * stable_node, as the underlying tree_page of the other
1339 * sharer is going to be freed soon.
1340 */
1341 return stable_node->rmap_hlist_len &&
1342 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1343}
1344
1345static __always_inline
1346bool is_page_sharing_candidate(struct stable_node *stable_node)
1347{
1348 return __is_page_sharing_candidate(stable_node, 0);
1349}
1350
1351static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
1352 struct stable_node **_stable_node,
1353 struct rb_root *root,
1354 bool prune_stale_stable_nodes)
1355{
1356 struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1357 struct hlist_node *hlist_safe;
1358 struct page *_tree_page, *tree_page = NULL;
1359 int nr = 0;
1360 int found_rmap_hlist_len;
1361
1362 if (!prune_stale_stable_nodes ||
1363 time_before(jiffies, stable_node->chain_prune_time +
1364 msecs_to_jiffies(
1365 ksm_stable_node_chains_prune_millisecs)))
1366 prune_stale_stable_nodes = false;
1367 else
1368 stable_node->chain_prune_time = jiffies;
1369
1370 hlist_for_each_entry_safe(dup, hlist_safe,
1371 &stable_node->hlist, hlist_dup) {
1372 cond_resched();
1373 /*
1374 * We must walk all stable_node_dup to prune the stale
1375 * stable nodes during lookup.
1376 *
1377 * get_ksm_page can drop the nodes from the
1378 * stable_node->hlist if they point to freed pages
1379 * (that's why we do a _safe walk). The "dup"
1380 * stable_node parameter itself will be freed from
1381 * under us if it returns NULL.
1382 */
1383 _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
1384 if (!_tree_page)
1385 continue;
1386 nr += 1;
1387 if (is_page_sharing_candidate(dup)) {
1388 if (!found ||
1389 dup->rmap_hlist_len > found_rmap_hlist_len) {
1390 if (found)
1391 put_page(tree_page);
1392 found = dup;
1393 found_rmap_hlist_len = found->rmap_hlist_len;
1394 tree_page = _tree_page;
1395
1396 /* skip put_page for found dup */
1397 if (!prune_stale_stable_nodes)
1398 break;
1399 continue;
1400 }
1401 }
1402 put_page(_tree_page);
1403 }
1404
1405 if (found) {
1406 /*
1407 * nr is counting all dups in the chain only if
1408 * prune_stale_stable_nodes is true, otherwise we may
1409 * break the loop at nr == 1 even if there are
1410 * multiple entries.
1411 */
1412 if (prune_stale_stable_nodes && nr == 1) {
1413 /*
1414 * If there's not just one entry it would
1415 * corrupt memory, better BUG_ON. In KSM
1416 * context with no lock held it's not even
1417 * fatal.
1418 */
1419 BUG_ON(stable_node->hlist.first->next);
1420
1421 /*
1422 * There's just one entry and it is below the
1423 * deduplication limit so drop the chain.
1424 */
1425 rb_replace_node(&stable_node->node, &found->node,
1426 root);
1427 free_stable_node(stable_node);
1428 ksm_stable_node_chains--;
1429 ksm_stable_node_dups--;
1430 /*
1431 * NOTE: the caller depends on the stable_node
1432 * to be equal to stable_node_dup if the chain
1433 * was collapsed.
1434 */
1435 *_stable_node = found;
1436 /*
1437 * Just for robustness, as stable_node is
1438 * otherwise left as a stable pointer, the
1439 * compiler shall optimize it away at build
1440 * time.
1441 */
1442 stable_node = NULL;
1443 } else if (stable_node->hlist.first != &found->hlist_dup &&
1444 __is_page_sharing_candidate(found, 1)) {
1445 /*
1446 * If the found stable_node dup can accept one
1447 * more future merge (in addition to the one
1448 * that is underway) and is not at the head of
1449 * the chain, put it there so next search will
1450 * be quicker in the !prune_stale_stable_nodes
1451 * case.
1452 *
1453 * NOTE: it would be inaccurate to use nr > 1
1454 * instead of checking the hlist.first pointer
1455 * directly, because in the
1456 * prune_stale_stable_nodes case "nr" isn't
1457 * the position of the found dup in the chain,
1458 * but the total number of dups in the chain.
1459 */
1460 hlist_del(&found->hlist_dup);
1461 hlist_add_head(&found->hlist_dup,
1462 &stable_node->hlist);
1463 }
1464 }
1465
1466 *_stable_node_dup = found;
1467 return tree_page;
1468}
1469
1470static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
1471 struct rb_root *root)
1472{
1473 if (!is_stable_node_chain(stable_node))
1474 return stable_node;
1475 if (hlist_empty(&stable_node->hlist)) {
1476 free_stable_node_chain(stable_node, root);
1477 return NULL;
1478 }
1479 return hlist_entry(stable_node->hlist.first,
1480 typeof(*stable_node), hlist_dup);
1481}
1482
1483/*
1484 * Like for get_ksm_page, this function can free the *_stable_node and
1485 * *_stable_node_dup if the returned tree_page is NULL.
1486 *
1487 * It can also free and overwrite *_stable_node with the found
1488 * stable_node_dup if the chain is collapsed (in which case
1489 * *_stable_node will be equal to *_stable_node_dup like if the chain
1490 * never existed). It's up to the caller to verify tree_page is not
1491 * NULL before dereferencing *_stable_node or *_stable_node_dup.
1492 *
1493 * *_stable_node_dup is really a second output parameter of this
1494 * function and will be overwritten in all cases, the caller doesn't
1495 * need to initialize it.
1496 */
1497static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
1498 struct stable_node **_stable_node,
1499 struct rb_root *root,
1500 bool prune_stale_stable_nodes)
1501{
1502 struct stable_node *stable_node = *_stable_node;
1503 if (!is_stable_node_chain(stable_node)) {
1504 if (is_page_sharing_candidate(stable_node)) {
1505 *_stable_node_dup = stable_node;
1506 return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
1507 }
1508 /*
1509 * _stable_node_dup set to NULL means the stable_node
1510 * reached the ksm_max_page_sharing limit.
1511 */
1512 *_stable_node_dup = NULL;
1513 return NULL;
1514 }
1515 return stable_node_dup(_stable_node_dup, _stable_node, root,
1516 prune_stale_stable_nodes);
1517}
1518
1519static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
1520 struct stable_node **s_n,
1521 struct rb_root *root)
1522{
1523 return __stable_node_chain(s_n_d, s_n, root, true);
1524}
1525
1526static __always_inline struct page *chain(struct stable_node **s_n_d,
1527 struct stable_node *s_n,
1528 struct rb_root *root)
1529{
1530 struct stable_node *old_stable_node = s_n;
1531 struct page *tree_page;
1532
1533 tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
1534 /* not pruning dups so s_n cannot have changed */
1535 VM_BUG_ON(s_n != old_stable_node);
1536 return tree_page;
1537}
1538
1539/*
1540 * stable_tree_search - search for page inside the stable tree
1541 *
1542 * This function checks if there is a page inside the stable tree
1543 * with identical content to the page that we are scanning right now.
1544 *
1545 * This function returns the stable tree node of identical content if found,
1546 * NULL otherwise.
1547 */
1548static struct page *stable_tree_search(struct page *page)
1549{
1550 int nid;
1551 struct rb_root *root;
1552 struct rb_node **new;
1553 struct rb_node *parent;
1554 struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1555 struct stable_node *page_node;
1556
1557 page_node = page_stable_node(page);
1558 if (page_node && page_node->head != &migrate_nodes) {
1559 /* ksm page forked */
1560 get_page(page);
1561 return page;
1562 }
1563
1564 nid = get_kpfn_nid(page_to_pfn(page));
1565 root = root_stable_tree + nid;
1566again:
1567 new = &root->rb_node;
1568 parent = NULL;
1569
1570 while (*new) {
1571 struct page *tree_page;
1572 int ret;
1573
1574 cond_resched();
1575 stable_node = rb_entry(*new, struct stable_node, node);
1576 stable_node_any = NULL;
1577 tree_page = chain_prune(&stable_node_dup, &stable_node, root);
1578 /*
1579 * NOTE: stable_node may have been freed by
1580 * chain_prune() if the returned stable_node_dup is
1581 * not NULL. stable_node_dup may have been inserted in
1582 * the rbtree instead as a regular stable_node (in
1583 * order to collapse the stable_node chain if a single
1584 * stable_node dup was found in it). In such case the
1585 * stable_node is overwritten by the calleee to point
1586 * to the stable_node_dup that was collapsed in the
1587 * stable rbtree and stable_node will be equal to
1588 * stable_node_dup like if the chain never existed.
1589 */
1590 if (!stable_node_dup) {
1591 /*
1592 * Either all stable_node dups were full in
1593 * this stable_node chain, or this chain was
1594 * empty and should be rb_erased.
1595 */
1596 stable_node_any = stable_node_dup_any(stable_node,
1597 root);
1598 if (!stable_node_any) {
1599 /* rb_erase just run */
1600 goto again;
1601 }
1602 /*
1603 * Take any of the stable_node dups page of
1604 * this stable_node chain to let the tree walk
1605 * continue. All KSM pages belonging to the
1606 * stable_node dups in a stable_node chain
1607 * have the same content and they're
1608 * write protected at all times. Any will work
1609 * fine to continue the walk.
1610 */
1611 tree_page = get_ksm_page(stable_node_any,
1612 GET_KSM_PAGE_NOLOCK);
1613 }
1614 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1615 if (!tree_page) {
1616 /*
1617 * If we walked over a stale stable_node,
1618 * get_ksm_page() will call rb_erase() and it
1619 * may rebalance the tree from under us. So
1620 * restart the search from scratch. Returning
1621 * NULL would be safe too, but we'd generate
1622 * false negative insertions just because some
1623 * stable_node was stale.
1624 */
1625 goto again;
1626 }
1627
1628 ret = memcmp_pages(page, tree_page);
1629 put_page(tree_page);
1630
1631 parent = *new;
1632 if (ret < 0)
1633 new = &parent->rb_left;
1634 else if (ret > 0)
1635 new = &parent->rb_right;
1636 else {
1637 if (page_node) {
1638 VM_BUG_ON(page_node->head != &migrate_nodes);
1639 /*
1640 * Test if the migrated page should be merged
1641 * into a stable node dup. If the mapcount is
1642 * 1 we can migrate it with another KSM page
1643 * without adding it to the chain.
1644 */
1645 if (page_mapcount(page) > 1)
1646 goto chain_append;
1647 }
1648
1649 if (!stable_node_dup) {
1650 /*
1651 * If the stable_node is a chain and
1652 * we got a payload match in memcmp
1653 * but we cannot merge the scanned
1654 * page in any of the existing
1655 * stable_node dups because they're
1656 * all full, we need to wait the
1657 * scanned page to find itself a match
1658 * in the unstable tree to create a
1659 * brand new KSM page to add later to
1660 * the dups of this stable_node.
1661 */
1662 return NULL;
1663 }
1664
1665 /*
1666 * Lock and unlock the stable_node's page (which
1667 * might already have been migrated) so that page
1668 * migration is sure to notice its raised count.
1669 * It would be more elegant to return stable_node
1670 * than kpage, but that involves more changes.
1671 */
1672 tree_page = get_ksm_page(stable_node_dup,
1673 GET_KSM_PAGE_TRYLOCK);
1674
1675 if (PTR_ERR(tree_page) == -EBUSY)
1676 return ERR_PTR(-EBUSY);
1677
1678 if (unlikely(!tree_page))
1679 /*
1680 * The tree may have been rebalanced,
1681 * so re-evaluate parent and new.
1682 */
1683 goto again;
1684 unlock_page(tree_page);
1685
1686 if (get_kpfn_nid(stable_node_dup->kpfn) !=
1687 NUMA(stable_node_dup->nid)) {
1688 put_page(tree_page);
1689 goto replace;
1690 }
1691 return tree_page;
1692 }
1693 }
1694
1695 if (!page_node)
1696 return NULL;
1697
1698 list_del(&page_node->list);
1699 DO_NUMA(page_node->nid = nid);
1700 rb_link_node(&page_node->node, parent, new);
1701 rb_insert_color(&page_node->node, root);
1702out:
1703 if (is_page_sharing_candidate(page_node)) {
1704 get_page(page);
1705 return page;
1706 } else
1707 return NULL;
1708
1709replace:
1710 /*
1711 * If stable_node was a chain and chain_prune collapsed it,
1712 * stable_node has been updated to be the new regular
1713 * stable_node. A collapse of the chain is indistinguishable
1714 * from the case there was no chain in the stable
1715 * rbtree. Otherwise stable_node is the chain and
1716 * stable_node_dup is the dup to replace.
1717 */
1718 if (stable_node_dup == stable_node) {
1719 VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1720 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1721 /* there is no chain */
1722 if (page_node) {
1723 VM_BUG_ON(page_node->head != &migrate_nodes);
1724 list_del(&page_node->list);
1725 DO_NUMA(page_node->nid = nid);
1726 rb_replace_node(&stable_node_dup->node,
1727 &page_node->node,
1728 root);
1729 if (is_page_sharing_candidate(page_node))
1730 get_page(page);
1731 else
1732 page = NULL;
1733 } else {
1734 rb_erase(&stable_node_dup->node, root);
1735 page = NULL;
1736 }
1737 } else {
1738 VM_BUG_ON(!is_stable_node_chain(stable_node));
1739 __stable_node_dup_del(stable_node_dup);
1740 if (page_node) {
1741 VM_BUG_ON(page_node->head != &migrate_nodes);
1742 list_del(&page_node->list);
1743 DO_NUMA(page_node->nid = nid);
1744 stable_node_chain_add_dup(page_node, stable_node);
1745 if (is_page_sharing_candidate(page_node))
1746 get_page(page);
1747 else
1748 page = NULL;
1749 } else {
1750 page = NULL;
1751 }
1752 }
1753 stable_node_dup->head = &migrate_nodes;
1754 list_add(&stable_node_dup->list, stable_node_dup->head);
1755 return page;
1756
1757chain_append:
1758 /* stable_node_dup could be null if it reached the limit */
1759 if (!stable_node_dup)
1760 stable_node_dup = stable_node_any;
1761 /*
1762 * If stable_node was a chain and chain_prune collapsed it,
1763 * stable_node has been updated to be the new regular
1764 * stable_node. A collapse of the chain is indistinguishable
1765 * from the case there was no chain in the stable
1766 * rbtree. Otherwise stable_node is the chain and
1767 * stable_node_dup is the dup to replace.
1768 */
1769 if (stable_node_dup == stable_node) {
1770 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1771 /* chain is missing so create it */
1772 stable_node = alloc_stable_node_chain(stable_node_dup,
1773 root);
1774 if (!stable_node)
1775 return NULL;
1776 }
1777 /*
1778 * Add this stable_node dup that was
1779 * migrated to the stable_node chain
1780 * of the current nid for this page
1781 * content.
1782 */
1783 VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
1784 VM_BUG_ON(page_node->head != &migrate_nodes);
1785 list_del(&page_node->list);
1786 DO_NUMA(page_node->nid = nid);
1787 stable_node_chain_add_dup(page_node, stable_node);
1788 goto out;
1789}
1790
1791/*
1792 * stable_tree_insert - insert stable tree node pointing to new ksm page
1793 * into the stable tree.
1794 *
1795 * This function returns the stable tree node just allocated on success,
1796 * NULL otherwise.
1797 */
1798static struct stable_node *stable_tree_insert(struct page *kpage)
1799{
1800 int nid;
1801 unsigned long kpfn;
1802 struct rb_root *root;
1803 struct rb_node **new;
1804 struct rb_node *parent;
1805 struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1806 bool need_chain = false;
1807
1808 kpfn = page_to_pfn(kpage);
1809 nid = get_kpfn_nid(kpfn);
1810 root = root_stable_tree + nid;
1811again:
1812 parent = NULL;
1813 new = &root->rb_node;
1814
1815 while (*new) {
1816 struct page *tree_page;
1817 int ret;
1818
1819 cond_resched();
1820 stable_node = rb_entry(*new, struct stable_node, node);
1821 stable_node_any = NULL;
1822 tree_page = chain(&stable_node_dup, stable_node, root);
1823 if (!stable_node_dup) {
1824 /*
1825 * Either all stable_node dups were full in
1826 * this stable_node chain, or this chain was
1827 * empty and should be rb_erased.
1828 */
1829 stable_node_any = stable_node_dup_any(stable_node,
1830 root);
1831 if (!stable_node_any) {
1832 /* rb_erase just run */
1833 goto again;
1834 }
1835 /*
1836 * Take any of the stable_node dups page of
1837 * this stable_node chain to let the tree walk
1838 * continue. All KSM pages belonging to the
1839 * stable_node dups in a stable_node chain
1840 * have the same content and they're
1841 * write protected at all times. Any will work
1842 * fine to continue the walk.
1843 */
1844 tree_page = get_ksm_page(stable_node_any,
1845 GET_KSM_PAGE_NOLOCK);
1846 }
1847 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1848 if (!tree_page) {
1849 /*
1850 * If we walked over a stale stable_node,
1851 * get_ksm_page() will call rb_erase() and it
1852 * may rebalance the tree from under us. So
1853 * restart the search from scratch. Returning
1854 * NULL would be safe too, but we'd generate
1855 * false negative insertions just because some
1856 * stable_node was stale.
1857 */
1858 goto again;
1859 }
1860
1861 ret = memcmp_pages(kpage, tree_page);
1862 put_page(tree_page);
1863
1864 parent = *new;
1865 if (ret < 0)
1866 new = &parent->rb_left;
1867 else if (ret > 0)
1868 new = &parent->rb_right;
1869 else {
1870 need_chain = true;
1871 break;
1872 }
1873 }
1874
1875 stable_node_dup = alloc_stable_node();
1876 if (!stable_node_dup)
1877 return NULL;
1878
1879 INIT_HLIST_HEAD(&stable_node_dup->hlist);
1880 stable_node_dup->kpfn = kpfn;
1881 set_page_stable_node(kpage, stable_node_dup);
1882 stable_node_dup->rmap_hlist_len = 0;
1883 DO_NUMA(stable_node_dup->nid = nid);
1884 if (!need_chain) {
1885 rb_link_node(&stable_node_dup->node, parent, new);
1886 rb_insert_color(&stable_node_dup->node, root);
1887 } else {
1888 if (!is_stable_node_chain(stable_node)) {
1889 struct stable_node *orig = stable_node;
1890 /* chain is missing so create it */
1891 stable_node = alloc_stable_node_chain(orig, root);
1892 if (!stable_node) {
1893 free_stable_node(stable_node_dup);
1894 return NULL;
1895 }
1896 }
1897 stable_node_chain_add_dup(stable_node_dup, stable_node);
1898 }
1899
1900 return stable_node_dup;
1901}
1902
1903/*
1904 * unstable_tree_search_insert - search for identical page,
1905 * else insert rmap_item into the unstable tree.
1906 *
1907 * This function searches for a page in the unstable tree identical to the
1908 * page currently being scanned; and if no identical page is found in the
1909 * tree, we insert rmap_item as a new object into the unstable tree.
1910 *
1911 * This function returns pointer to rmap_item found to be identical
1912 * to the currently scanned page, NULL otherwise.
1913 *
1914 * This function does both searching and inserting, because they share
1915 * the same walking algorithm in an rbtree.
1916 */
1917static
1918struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1919 struct page *page,
1920 struct page **tree_pagep)
1921{
1922 struct rb_node **new;
1923 struct rb_root *root;
1924 struct rb_node *parent = NULL;
1925 int nid;
1926
1927 nid = get_kpfn_nid(page_to_pfn(page));
1928 root = root_unstable_tree + nid;
1929 new = &root->rb_node;
1930
1931 while (*new) {
1932 struct rmap_item *tree_rmap_item;
1933 struct page *tree_page;
1934 int ret;
1935
1936 cond_resched();
1937 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1938 tree_page = get_mergeable_page(tree_rmap_item);
1939 if (!tree_page)
1940 return NULL;
1941
1942 /*
1943 * Don't substitute a ksm page for a forked page.
1944 */
1945 if (page == tree_page) {
1946 put_page(tree_page);
1947 return NULL;
1948 }
1949
1950 ret = memcmp_pages(page, tree_page);
1951
1952 parent = *new;
1953 if (ret < 0) {
1954 put_page(tree_page);
1955 new = &parent->rb_left;
1956 } else if (ret > 0) {
1957 put_page(tree_page);
1958 new = &parent->rb_right;
1959 } else if (!ksm_merge_across_nodes &&
1960 page_to_nid(tree_page) != nid) {
1961 /*
1962 * If tree_page has been migrated to another NUMA node,
1963 * it will be flushed out and put in the right unstable
1964 * tree next time: only merge with it when across_nodes.
1965 */
1966 put_page(tree_page);
1967 return NULL;
1968 } else {
1969 *tree_pagep = tree_page;
1970 return tree_rmap_item;
1971 }
1972 }
1973
1974 rmap_item->address |= UNSTABLE_FLAG;
1975 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1976 DO_NUMA(rmap_item->nid = nid);
1977 rb_link_node(&rmap_item->node, parent, new);
1978 rb_insert_color(&rmap_item->node, root);
1979
1980 ksm_pages_unshared++;
1981 return NULL;
1982}
1983
1984/*
1985 * stable_tree_append - add another rmap_item to the linked list of
1986 * rmap_items hanging off a given node of the stable tree, all sharing
1987 * the same ksm page.
1988 */
1989static void stable_tree_append(struct rmap_item *rmap_item,
1990 struct stable_node *stable_node,
1991 bool max_page_sharing_bypass)
1992{
1993 /*
1994 * rmap won't find this mapping if we don't insert the
1995 * rmap_item in the right stable_node
1996 * duplicate. page_migration could break later if rmap breaks,
1997 * so we can as well crash here. We really need to check for
1998 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
1999 * for other negative values as an underflow if detected here
2000 * for the first time (and not when decreasing rmap_hlist_len)
2001 * would be sign of memory corruption in the stable_node.
2002 */
2003 BUG_ON(stable_node->rmap_hlist_len < 0);
2004
2005 stable_node->rmap_hlist_len++;
2006 if (!max_page_sharing_bypass)
2007 /* possibly non fatal but unexpected overflow, only warn */
2008 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2009 ksm_max_page_sharing);
2010
2011 rmap_item->head = stable_node;
2012 rmap_item->address |= STABLE_FLAG;
2013 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2014
2015 if (rmap_item->hlist.next)
2016 ksm_pages_sharing++;
2017 else
2018 ksm_pages_shared++;
2019}
2020
2021/*
2022 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2023 * if not, compare checksum to previous and if it's the same, see if page can
2024 * be inserted into the unstable tree, or merged with a page already there and
2025 * both transferred to the stable tree.
2026 *
2027 * @page: the page that we are searching identical page to.
2028 * @rmap_item: the reverse mapping into the virtual address of this page
2029 */
2030static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
2031{
2032 struct mm_struct *mm = rmap_item->mm;
2033 struct rmap_item *tree_rmap_item;
2034 struct page *tree_page = NULL;
2035 struct stable_node *stable_node;
2036 struct page *kpage;
2037 unsigned int checksum;
2038 int err;
2039 bool max_page_sharing_bypass = false;
2040
2041 stable_node = page_stable_node(page);
2042 if (stable_node) {
2043 if (stable_node->head != &migrate_nodes &&
2044 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2045 NUMA(stable_node->nid)) {
2046 stable_node_dup_del(stable_node);
2047 stable_node->head = &migrate_nodes;
2048 list_add(&stable_node->list, stable_node->head);
2049 }
2050 if (stable_node->head != &migrate_nodes &&
2051 rmap_item->head == stable_node)
2052 return;
2053 /*
2054 * If it's a KSM fork, allow it to go over the sharing limit
2055 * without warnings.
2056 */
2057 if (!is_page_sharing_candidate(stable_node))
2058 max_page_sharing_bypass = true;
2059 }
2060
2061 /* We first start with searching the page inside the stable tree */
2062 kpage = stable_tree_search(page);
2063 if (kpage == page && rmap_item->head == stable_node) {
2064 put_page(kpage);
2065 return;
2066 }
2067
2068 remove_rmap_item_from_tree(rmap_item);
2069
2070 if (kpage) {
2071 if (PTR_ERR(kpage) == -EBUSY)
2072 return;
2073
2074 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2075 if (!err) {
2076 /*
2077 * The page was successfully merged:
2078 * add its rmap_item to the stable tree.
2079 */
2080 lock_page(kpage);
2081 stable_tree_append(rmap_item, page_stable_node(kpage),
2082 max_page_sharing_bypass);
2083 unlock_page(kpage);
2084 }
2085 put_page(kpage);
2086 return;
2087 }
2088
2089 /*
2090 * If the hash value of the page has changed from the last time
2091 * we calculated it, this page is changing frequently: therefore we
2092 * don't want to insert it in the unstable tree, and we don't want
2093 * to waste our time searching for something identical to it there.
2094 */
2095 checksum = calc_checksum(page);
2096 if (rmap_item->oldchecksum != checksum) {
2097 rmap_item->oldchecksum = checksum;
2098 return;
2099 }
2100
2101 /*
2102 * Same checksum as an empty page. We attempt to merge it with the
2103 * appropriate zero page if the user enabled this via sysfs.
2104 */
2105 if (ksm_use_zero_pages && (checksum == zero_checksum)) {
2106 struct vm_area_struct *vma;
2107
2108 mmap_read_lock(mm);
2109 vma = find_mergeable_vma(mm, rmap_item->address);
2110 if (vma) {
2111 err = try_to_merge_one_page(vma, page,
2112 ZERO_PAGE(rmap_item->address));
2113 } else {
2114 /*
2115 * If the vma is out of date, we do not need to
2116 * continue.
2117 */
2118 err = 0;
2119 }
2120 mmap_read_unlock(mm);
2121 /*
2122 * In case of failure, the page was not really empty, so we
2123 * need to continue. Otherwise we're done.
2124 */
2125 if (!err)
2126 return;
2127 }
2128 tree_rmap_item =
2129 unstable_tree_search_insert(rmap_item, page, &tree_page);
2130 if (tree_rmap_item) {
2131 bool split;
2132
2133 kpage = try_to_merge_two_pages(rmap_item, page,
2134 tree_rmap_item, tree_page);
2135 /*
2136 * If both pages we tried to merge belong to the same compound
2137 * page, then we actually ended up increasing the reference
2138 * count of the same compound page twice, and split_huge_page
2139 * failed.
2140 * Here we set a flag if that happened, and we use it later to
2141 * try split_huge_page again. Since we call put_page right
2142 * afterwards, the reference count will be correct and
2143 * split_huge_page should succeed.
2144 */
2145 split = PageTransCompound(page)
2146 && compound_head(page) == compound_head(tree_page);
2147 put_page(tree_page);
2148 if (kpage) {
2149 /*
2150 * The pages were successfully merged: insert new
2151 * node in the stable tree and add both rmap_items.
2152 */
2153 lock_page(kpage);
2154 stable_node = stable_tree_insert(kpage);
2155 if (stable_node) {
2156 stable_tree_append(tree_rmap_item, stable_node,
2157 false);
2158 stable_tree_append(rmap_item, stable_node,
2159 false);
2160 }
2161 unlock_page(kpage);
2162
2163 /*
2164 * If we fail to insert the page into the stable tree,
2165 * we will have 2 virtual addresses that are pointing
2166 * to a ksm page left outside the stable tree,
2167 * in which case we need to break_cow on both.
2168 */
2169 if (!stable_node) {
2170 break_cow(tree_rmap_item);
2171 break_cow(rmap_item);
2172 }
2173 } else if (split) {
2174 /*
2175 * We are here if we tried to merge two pages and
2176 * failed because they both belonged to the same
2177 * compound page. We will split the page now, but no
2178 * merging will take place.
2179 * We do not want to add the cost of a full lock; if
2180 * the page is locked, it is better to skip it and
2181 * perhaps try again later.
2182 */
2183 if (!trylock_page(page))
2184 return;
2185 split_huge_page(page);
2186 unlock_page(page);
2187 }
2188 }
2189}
2190
2191static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
2192 struct rmap_item **rmap_list,
2193 unsigned long addr)
2194{
2195 struct rmap_item *rmap_item;
2196
2197 while (*rmap_list) {
2198 rmap_item = *rmap_list;
2199 if ((rmap_item->address & PAGE_MASK) == addr)
2200 return rmap_item;
2201 if (rmap_item->address > addr)
2202 break;
2203 *rmap_list = rmap_item->rmap_list;
2204 remove_rmap_item_from_tree(rmap_item);
2205 free_rmap_item(rmap_item);
2206 }
2207
2208 rmap_item = alloc_rmap_item();
2209 if (rmap_item) {
2210 /* It has already been zeroed */
2211 rmap_item->mm = mm_slot->mm;
2212 rmap_item->address = addr;
2213 rmap_item->rmap_list = *rmap_list;
2214 *rmap_list = rmap_item;
2215 }
2216 return rmap_item;
2217}
2218
2219static struct rmap_item *scan_get_next_rmap_item(struct page **page)
2220{
2221 struct mm_struct *mm;
2222 struct mm_slot *slot;
2223 struct vm_area_struct *vma;
2224 struct rmap_item *rmap_item;
2225 int nid;
2226
2227 if (list_empty(&ksm_mm_head.mm_list))
2228 return NULL;
2229
2230 slot = ksm_scan.mm_slot;
2231 if (slot == &ksm_mm_head) {
2232 /*
2233 * A number of pages can hang around indefinitely on per-cpu
2234 * pagevecs, raised page count preventing write_protect_page
2235 * from merging them. Though it doesn't really matter much,
2236 * it is puzzling to see some stuck in pages_volatile until
2237 * other activity jostles them out, and they also prevented
2238 * LTP's KSM test from succeeding deterministically; so drain
2239 * them here (here rather than on entry to ksm_do_scan(),
2240 * so we don't IPI too often when pages_to_scan is set low).
2241 */
2242 lru_add_drain_all();
2243
2244 /*
2245 * Whereas stale stable_nodes on the stable_tree itself
2246 * get pruned in the regular course of stable_tree_search(),
2247 * those moved out to the migrate_nodes list can accumulate:
2248 * so prune them once before each full scan.
2249 */
2250 if (!ksm_merge_across_nodes) {
2251 struct stable_node *stable_node, *next;
2252 struct page *page;
2253
2254 list_for_each_entry_safe(stable_node, next,
2255 &migrate_nodes, list) {
2256 page = get_ksm_page(stable_node,
2257 GET_KSM_PAGE_NOLOCK);
2258 if (page)
2259 put_page(page);
2260 cond_resched();
2261 }
2262 }
2263
2264 for (nid = 0; nid < ksm_nr_node_ids; nid++)
2265 root_unstable_tree[nid] = RB_ROOT;
2266
2267 spin_lock(&ksm_mmlist_lock);
2268 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
2269 ksm_scan.mm_slot = slot;
2270 spin_unlock(&ksm_mmlist_lock);
2271 /*
2272 * Although we tested list_empty() above, a racing __ksm_exit
2273 * of the last mm on the list may have removed it since then.
2274 */
2275 if (slot == &ksm_mm_head)
2276 return NULL;
2277next_mm:
2278 ksm_scan.address = 0;
2279 ksm_scan.rmap_list = &slot->rmap_list;
2280 }
2281
2282 mm = slot->mm;
2283 mmap_read_lock(mm);
2284 if (ksm_test_exit(mm))
2285 vma = NULL;
2286 else
2287 vma = find_vma(mm, ksm_scan.address);
2288
2289 for (; vma; vma = vma->vm_next) {
2290 if (!(vma->vm_flags & VM_MERGEABLE))
2291 continue;
2292 if (ksm_scan.address < vma->vm_start)
2293 ksm_scan.address = vma->vm_start;
2294 if (!vma->anon_vma)
2295 ksm_scan.address = vma->vm_end;
2296
2297 while (ksm_scan.address < vma->vm_end) {
2298 if (ksm_test_exit(mm))
2299 break;
2300 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
2301 if (IS_ERR_OR_NULL(*page)) {
2302 ksm_scan.address += PAGE_SIZE;
2303 cond_resched();
2304 continue;
2305 }
2306 if (PageAnon(*page)) {
2307 flush_anon_page(vma, *page, ksm_scan.address);
2308 flush_dcache_page(*page);
2309 rmap_item = get_next_rmap_item(slot,
2310 ksm_scan.rmap_list, ksm_scan.address);
2311 if (rmap_item) {
2312 ksm_scan.rmap_list =
2313 &rmap_item->rmap_list;
2314 ksm_scan.address += PAGE_SIZE;
2315 } else
2316 put_page(*page);
2317 mmap_read_unlock(mm);
2318 return rmap_item;
2319 }
2320 put_page(*page);
2321 ksm_scan.address += PAGE_SIZE;
2322 cond_resched();
2323 }
2324 }
2325
2326 if (ksm_test_exit(mm)) {
2327 ksm_scan.address = 0;
2328 ksm_scan.rmap_list = &slot->rmap_list;
2329 }
2330 /*
2331 * Nuke all the rmap_items that are above this current rmap:
2332 * because there were no VM_MERGEABLE vmas with such addresses.
2333 */
2334 remove_trailing_rmap_items(ksm_scan.rmap_list);
2335
2336 spin_lock(&ksm_mmlist_lock);
2337 ksm_scan.mm_slot = list_entry(slot->mm_list.next,
2338 struct mm_slot, mm_list);
2339 if (ksm_scan.address == 0) {
2340 /*
2341 * We've completed a full scan of all vmas, holding mmap_lock
2342 * throughout, and found no VM_MERGEABLE: so do the same as
2343 * __ksm_exit does to remove this mm from all our lists now.
2344 * This applies either when cleaning up after __ksm_exit
2345 * (but beware: we can reach here even before __ksm_exit),
2346 * or when all VM_MERGEABLE areas have been unmapped (and
2347 * mmap_lock then protects against race with MADV_MERGEABLE).
2348 */
2349 hash_del(&slot->link);
2350 list_del(&slot->mm_list);
2351 spin_unlock(&ksm_mmlist_lock);
2352
2353 free_mm_slot(slot);
2354 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2355 mmap_read_unlock(mm);
2356 mmdrop(mm);
2357 } else {
2358 mmap_read_unlock(mm);
2359 /*
2360 * mmap_read_unlock(mm) first because after
2361 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2362 * already have been freed under us by __ksm_exit()
2363 * because the "mm_slot" is still hashed and
2364 * ksm_scan.mm_slot doesn't point to it anymore.
2365 */
2366 spin_unlock(&ksm_mmlist_lock);
2367 }
2368
2369 /* Repeat until we've completed scanning the whole list */
2370 slot = ksm_scan.mm_slot;
2371 if (slot != &ksm_mm_head)
2372 goto next_mm;
2373
2374 ksm_scan.seqnr++;
2375 return NULL;
2376}
2377
2378/**
2379 * ksm_do_scan - the ksm scanner main worker function.
2380 * @scan_npages: number of pages we want to scan before we return.
2381 */
2382static void ksm_do_scan(unsigned int scan_npages)
2383{
2384 struct rmap_item *rmap_item;
2385 struct page *page;
2386
2387 while (scan_npages-- && likely(!freezing(current))) {
2388 cond_resched();
2389 rmap_item = scan_get_next_rmap_item(&page);
2390 if (!rmap_item)
2391 return;
2392 cmp_and_merge_page(page, rmap_item);
2393 put_page(page);
2394 }
2395}
2396
2397static int ksmd_should_run(void)
2398{
2399 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
2400}
2401
2402static int ksm_scan_thread(void *nothing)
2403{
2404 unsigned int sleep_ms;
2405
2406 set_freezable();
2407 set_user_nice(current, 5);
2408
2409 while (!kthread_should_stop()) {
2410 mutex_lock(&ksm_thread_mutex);
2411 wait_while_offlining();
2412 if (ksmd_should_run())
2413 ksm_do_scan(ksm_thread_pages_to_scan);
2414 mutex_unlock(&ksm_thread_mutex);
2415
2416 try_to_freeze();
2417
2418 if (ksmd_should_run()) {
2419 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
2420 wait_event_interruptible_timeout(ksm_iter_wait,
2421 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
2422 msecs_to_jiffies(sleep_ms));
2423 } else {
2424 wait_event_freezable(ksm_thread_wait,
2425 ksmd_should_run() || kthread_should_stop());
2426 }
2427 }
2428 return 0;
2429}
2430
2431int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2432 unsigned long end, int advice, unsigned long *vm_flags)
2433{
2434 struct mm_struct *mm = vma->vm_mm;
2435 int err;
2436
2437 switch (advice) {
2438 case MADV_MERGEABLE:
2439 /*
2440 * Be somewhat over-protective for now!
2441 */
2442 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
2443 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
2444 VM_HUGETLB | VM_MIXEDMAP))
2445 return 0; /* just ignore the advice */
2446
2447 if (vma_is_dax(vma))
2448 return 0;
2449
2450#ifdef VM_SAO
2451 if (*vm_flags & VM_SAO)
2452 return 0;
2453#endif
2454#ifdef VM_SPARC_ADI
2455 if (*vm_flags & VM_SPARC_ADI)
2456 return 0;
2457#endif
2458
2459 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2460 err = __ksm_enter(mm);
2461 if (err)
2462 return err;
2463 }
2464
2465 *vm_flags |= VM_MERGEABLE;
2466 break;
2467
2468 case MADV_UNMERGEABLE:
2469 if (!(*vm_flags & VM_MERGEABLE))
2470 return 0; /* just ignore the advice */
2471
2472 if (vma->anon_vma) {
2473 err = unmerge_ksm_pages(vma, start, end);
2474 if (err)
2475 return err;
2476 }
2477
2478 *vm_flags &= ~VM_MERGEABLE;
2479 break;
2480 }
2481
2482 return 0;
2483}
2484EXPORT_SYMBOL_GPL(ksm_madvise);
2485
2486int __ksm_enter(struct mm_struct *mm)
2487{
2488 struct mm_slot *mm_slot;
2489 int needs_wakeup;
2490
2491 mm_slot = alloc_mm_slot();
2492 if (!mm_slot)
2493 return -ENOMEM;
2494
2495 /* Check ksm_run too? Would need tighter locking */
2496 needs_wakeup = list_empty(&ksm_mm_head.mm_list);
2497
2498 spin_lock(&ksm_mmlist_lock);
2499 insert_to_mm_slots_hash(mm, mm_slot);
2500 /*
2501 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2502 * insert just behind the scanning cursor, to let the area settle
2503 * down a little; when fork is followed by immediate exec, we don't
2504 * want ksmd to waste time setting up and tearing down an rmap_list.
2505 *
2506 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2507 * scanning cursor, otherwise KSM pages in newly forked mms will be
2508 * missed: then we might as well insert at the end of the list.
2509 */
2510 if (ksm_run & KSM_RUN_UNMERGE)
2511 list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
2512 else
2513 list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
2514 spin_unlock(&ksm_mmlist_lock);
2515
2516 set_bit(MMF_VM_MERGEABLE, &mm->flags);
2517 mmgrab(mm);
2518
2519 if (needs_wakeup)
2520 wake_up_interruptible(&ksm_thread_wait);
2521
2522 return 0;
2523}
2524
2525void __ksm_exit(struct mm_struct *mm)
2526{
2527 struct mm_slot *mm_slot;
2528 int easy_to_free = 0;
2529
2530 /*
2531 * This process is exiting: if it's straightforward (as is the
2532 * case when ksmd was never running), free mm_slot immediately.
2533 * But if it's at the cursor or has rmap_items linked to it, use
2534 * mmap_lock to synchronize with any break_cows before pagetables
2535 * are freed, and leave the mm_slot on the list for ksmd to free.
2536 * Beware: ksm may already have noticed it exiting and freed the slot.
2537 */
2538
2539 spin_lock(&ksm_mmlist_lock);
2540 mm_slot = get_mm_slot(mm);
2541 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
2542 if (!mm_slot->rmap_list) {
2543 hash_del(&mm_slot->link);
2544 list_del(&mm_slot->mm_list);
2545 easy_to_free = 1;
2546 } else {
2547 list_move(&mm_slot->mm_list,
2548 &ksm_scan.mm_slot->mm_list);
2549 }
2550 }
2551 spin_unlock(&ksm_mmlist_lock);
2552
2553 if (easy_to_free) {
2554 free_mm_slot(mm_slot);
2555 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2556 mmdrop(mm);
2557 } else if (mm_slot) {
2558 mmap_write_lock(mm);
2559 mmap_write_unlock(mm);
2560 }
2561}
2562
2563struct page *ksm_might_need_to_copy(struct page *page,
2564 struct vm_area_struct *vma, unsigned long address)
2565{
2566 struct anon_vma *anon_vma = page_anon_vma(page);
2567 struct page *new_page;
2568
2569 if (PageKsm(page)) {
2570 if (page_stable_node(page) &&
2571 !(ksm_run & KSM_RUN_UNMERGE))
2572 return page; /* no need to copy it */
2573 } else if (!anon_vma) {
2574 return page; /* no need to copy it */
2575 } else if (anon_vma->root == vma->anon_vma->root &&
2576 page->index == linear_page_index(vma, address)) {
2577 return page; /* still no need to copy it */
2578 }
2579 if (!PageUptodate(page))
2580 return page; /* let do_swap_page report the error */
2581
2582 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2583 if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) {
2584 put_page(new_page);
2585 new_page = NULL;
2586 }
2587 if (new_page) {
2588 copy_user_highpage(new_page, page, address, vma);
2589
2590 SetPageDirty(new_page);
2591 __SetPageUptodate(new_page);
2592 __SetPageLocked(new_page);
2593 }
2594
2595 return new_page;
2596}
2597
2598void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2599{
2600 struct stable_node *stable_node;
2601 struct rmap_item *rmap_item;
2602 int search_new_forks = 0;
2603
2604 VM_BUG_ON_PAGE(!PageKsm(page), page);
2605
2606 /*
2607 * Rely on the page lock to protect against concurrent modifications
2608 * to that page's node of the stable tree.
2609 */
2610 VM_BUG_ON_PAGE(!PageLocked(page), page);
2611
2612 stable_node = page_stable_node(page);
2613 if (!stable_node)
2614 return;
2615again:
2616 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2617 struct anon_vma *anon_vma = rmap_item->anon_vma;
2618 struct anon_vma_chain *vmac;
2619 struct vm_area_struct *vma;
2620
2621 cond_resched();
2622 anon_vma_lock_read(anon_vma);
2623 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
2624 0, ULONG_MAX) {
2625 unsigned long addr;
2626
2627 cond_resched();
2628 vma = vmac->vma;
2629
2630 /* Ignore the stable/unstable/sqnr flags */
2631 addr = rmap_item->address & PAGE_MASK;
2632
2633 if (addr < vma->vm_start || addr >= vma->vm_end)
2634 continue;
2635 /*
2636 * Initially we examine only the vma which covers this
2637 * rmap_item; but later, if there is still work to do,
2638 * we examine covering vmas in other mms: in case they
2639 * were forked from the original since ksmd passed.
2640 */
2641 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2642 continue;
2643
2644 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2645 continue;
2646
2647 if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
2648 anon_vma_unlock_read(anon_vma);
2649 return;
2650 }
2651 if (rwc->done && rwc->done(page)) {
2652 anon_vma_unlock_read(anon_vma);
2653 return;
2654 }
2655 }
2656 anon_vma_unlock_read(anon_vma);
2657 }
2658 if (!search_new_forks++)
2659 goto again;
2660}
2661
2662#ifdef CONFIG_MIGRATION
2663void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2664{
2665 struct stable_node *stable_node;
2666
2667 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
2668 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
2669 VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
2670
2671 stable_node = page_stable_node(newpage);
2672 if (stable_node) {
2673 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
2674 stable_node->kpfn = page_to_pfn(newpage);
2675 /*
2676 * newpage->mapping was set in advance; now we need smp_wmb()
2677 * to make sure that the new stable_node->kpfn is visible
2678 * to get_ksm_page() before it can see that oldpage->mapping
2679 * has gone stale (or that PageSwapCache has been cleared).
2680 */
2681 smp_wmb();
2682 set_page_stable_node(oldpage, NULL);
2683 }
2684}
2685#endif /* CONFIG_MIGRATION */
2686
2687#ifdef CONFIG_MEMORY_HOTREMOVE
2688static void wait_while_offlining(void)
2689{
2690 while (ksm_run & KSM_RUN_OFFLINE) {
2691 mutex_unlock(&ksm_thread_mutex);
2692 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
2693 TASK_UNINTERRUPTIBLE);
2694 mutex_lock(&ksm_thread_mutex);
2695 }
2696}
2697
2698static bool stable_node_dup_remove_range(struct stable_node *stable_node,
2699 unsigned long start_pfn,
2700 unsigned long end_pfn)
2701{
2702 if (stable_node->kpfn >= start_pfn &&
2703 stable_node->kpfn < end_pfn) {
2704 /*
2705 * Don't get_ksm_page, page has already gone:
2706 * which is why we keep kpfn instead of page*
2707 */
2708 remove_node_from_stable_tree(stable_node);
2709 return true;
2710 }
2711 return false;
2712}
2713
2714static bool stable_node_chain_remove_range(struct stable_node *stable_node,
2715 unsigned long start_pfn,
2716 unsigned long end_pfn,
2717 struct rb_root *root)
2718{
2719 struct stable_node *dup;
2720 struct hlist_node *hlist_safe;
2721
2722 if (!is_stable_node_chain(stable_node)) {
2723 VM_BUG_ON(is_stable_node_dup(stable_node));
2724 return stable_node_dup_remove_range(stable_node, start_pfn,
2725 end_pfn);
2726 }
2727
2728 hlist_for_each_entry_safe(dup, hlist_safe,
2729 &stable_node->hlist, hlist_dup) {
2730 VM_BUG_ON(!is_stable_node_dup(dup));
2731 stable_node_dup_remove_range(dup, start_pfn, end_pfn);
2732 }
2733 if (hlist_empty(&stable_node->hlist)) {
2734 free_stable_node_chain(stable_node, root);
2735 return true; /* notify caller that tree was rebalanced */
2736 } else
2737 return false;
2738}
2739
2740static void ksm_check_stable_tree(unsigned long start_pfn,
2741 unsigned long end_pfn)
2742{
2743 struct stable_node *stable_node, *next;
2744 struct rb_node *node;
2745 int nid;
2746
2747 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
2748 node = rb_first(root_stable_tree + nid);
2749 while (node) {
2750 stable_node = rb_entry(node, struct stable_node, node);
2751 if (stable_node_chain_remove_range(stable_node,
2752 start_pfn, end_pfn,
2753 root_stable_tree +
2754 nid))
2755 node = rb_first(root_stable_tree + nid);
2756 else
2757 node = rb_next(node);
2758 cond_resched();
2759 }
2760 }
2761 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
2762 if (stable_node->kpfn >= start_pfn &&
2763 stable_node->kpfn < end_pfn)
2764 remove_node_from_stable_tree(stable_node);
2765 cond_resched();
2766 }
2767}
2768
2769static int ksm_memory_callback(struct notifier_block *self,
2770 unsigned long action, void *arg)
2771{
2772 struct memory_notify *mn = arg;
2773
2774 switch (action) {
2775 case MEM_GOING_OFFLINE:
2776 /*
2777 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
2778 * and remove_all_stable_nodes() while memory is going offline:
2779 * it is unsafe for them to touch the stable tree at this time.
2780 * But unmerge_ksm_pages(), rmap lookups and other entry points
2781 * which do not need the ksm_thread_mutex are all safe.
2782 */
2783 mutex_lock(&ksm_thread_mutex);
2784 ksm_run |= KSM_RUN_OFFLINE;
2785 mutex_unlock(&ksm_thread_mutex);
2786 break;
2787
2788 case MEM_OFFLINE:
2789 /*
2790 * Most of the work is done by page migration; but there might
2791 * be a few stable_nodes left over, still pointing to struct
2792 * pages which have been offlined: prune those from the tree,
2793 * otherwise get_ksm_page() might later try to access a
2794 * non-existent struct page.
2795 */
2796 ksm_check_stable_tree(mn->start_pfn,
2797 mn->start_pfn + mn->nr_pages);
2798 fallthrough;
2799 case MEM_CANCEL_OFFLINE:
2800 mutex_lock(&ksm_thread_mutex);
2801 ksm_run &= ~KSM_RUN_OFFLINE;
2802 mutex_unlock(&ksm_thread_mutex);
2803
2804 smp_mb(); /* wake_up_bit advises this */
2805 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
2806 break;
2807 }
2808 return NOTIFY_OK;
2809}
2810#else
2811static void wait_while_offlining(void)
2812{
2813}
2814#endif /* CONFIG_MEMORY_HOTREMOVE */
2815
2816#ifdef CONFIG_SYSFS
2817/*
2818 * This all compiles without CONFIG_SYSFS, but is a waste of space.
2819 */
2820
2821#define KSM_ATTR_RO(_name) \
2822 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2823#define KSM_ATTR(_name) \
2824 static struct kobj_attribute _name##_attr = \
2825 __ATTR(_name, 0644, _name##_show, _name##_store)
2826
2827static ssize_t sleep_millisecs_show(struct kobject *kobj,
2828 struct kobj_attribute *attr, char *buf)
2829{
2830 return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
2831}
2832
2833static ssize_t sleep_millisecs_store(struct kobject *kobj,
2834 struct kobj_attribute *attr,
2835 const char *buf, size_t count)
2836{
2837 unsigned int msecs;
2838 int err;
2839
2840 err = kstrtouint(buf, 10, &msecs);
2841 if (err)
2842 return -EINVAL;
2843
2844 ksm_thread_sleep_millisecs = msecs;
2845 wake_up_interruptible(&ksm_iter_wait);
2846
2847 return count;
2848}
2849KSM_ATTR(sleep_millisecs);
2850
2851static ssize_t pages_to_scan_show(struct kobject *kobj,
2852 struct kobj_attribute *attr, char *buf)
2853{
2854 return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
2855}
2856
2857static ssize_t pages_to_scan_store(struct kobject *kobj,
2858 struct kobj_attribute *attr,
2859 const char *buf, size_t count)
2860{
2861 unsigned int nr_pages;
2862 int err;
2863
2864 err = kstrtouint(buf, 10, &nr_pages);
2865 if (err)
2866 return -EINVAL;
2867
2868 ksm_thread_pages_to_scan = nr_pages;
2869
2870 return count;
2871}
2872KSM_ATTR(pages_to_scan);
2873
2874static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
2875 char *buf)
2876{
2877 return sysfs_emit(buf, "%lu\n", ksm_run);
2878}
2879
2880static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
2881 const char *buf, size_t count)
2882{
2883 unsigned int flags;
2884 int err;
2885
2886 err = kstrtouint(buf, 10, &flags);
2887 if (err)
2888 return -EINVAL;
2889 if (flags > KSM_RUN_UNMERGE)
2890 return -EINVAL;
2891
2892 /*
2893 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
2894 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
2895 * breaking COW to free the pages_shared (but leaves mm_slots
2896 * on the list for when ksmd may be set running again).
2897 */
2898
2899 mutex_lock(&ksm_thread_mutex);
2900 wait_while_offlining();
2901 if (ksm_run != flags) {
2902 ksm_run = flags;
2903 if (flags & KSM_RUN_UNMERGE) {
2904 set_current_oom_origin();
2905 err = unmerge_and_remove_all_rmap_items();
2906 clear_current_oom_origin();
2907 if (err) {
2908 ksm_run = KSM_RUN_STOP;
2909 count = err;
2910 }
2911 }
2912 }
2913 mutex_unlock(&ksm_thread_mutex);
2914
2915 if (flags & KSM_RUN_MERGE)
2916 wake_up_interruptible(&ksm_thread_wait);
2917
2918 return count;
2919}
2920KSM_ATTR(run);
2921
2922#ifdef CONFIG_NUMA
2923static ssize_t merge_across_nodes_show(struct kobject *kobj,
2924 struct kobj_attribute *attr, char *buf)
2925{
2926 return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
2927}
2928
2929static ssize_t merge_across_nodes_store(struct kobject *kobj,
2930 struct kobj_attribute *attr,
2931 const char *buf, size_t count)
2932{
2933 int err;
2934 unsigned long knob;
2935
2936 err = kstrtoul(buf, 10, &knob);
2937 if (err)
2938 return err;
2939 if (knob > 1)
2940 return -EINVAL;
2941
2942 mutex_lock(&ksm_thread_mutex);
2943 wait_while_offlining();
2944 if (ksm_merge_across_nodes != knob) {
2945 if (ksm_pages_shared || remove_all_stable_nodes())
2946 err = -EBUSY;
2947 else if (root_stable_tree == one_stable_tree) {
2948 struct rb_root *buf;
2949 /*
2950 * This is the first time that we switch away from the
2951 * default of merging across nodes: must now allocate
2952 * a buffer to hold as many roots as may be needed.
2953 * Allocate stable and unstable together:
2954 * MAXSMP NODES_SHIFT 10 will use 16kB.
2955 */
2956 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
2957 GFP_KERNEL);
2958 /* Let us assume that RB_ROOT is NULL is zero */
2959 if (!buf)
2960 err = -ENOMEM;
2961 else {
2962 root_stable_tree = buf;
2963 root_unstable_tree = buf + nr_node_ids;
2964 /* Stable tree is empty but not the unstable */
2965 root_unstable_tree[0] = one_unstable_tree[0];
2966 }
2967 }
2968 if (!err) {
2969 ksm_merge_across_nodes = knob;
2970 ksm_nr_node_ids = knob ? 1 : nr_node_ids;
2971 }
2972 }
2973 mutex_unlock(&ksm_thread_mutex);
2974
2975 return err ? err : count;
2976}
2977KSM_ATTR(merge_across_nodes);
2978#endif
2979
2980static ssize_t use_zero_pages_show(struct kobject *kobj,
2981 struct kobj_attribute *attr, char *buf)
2982{
2983 return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
2984}
2985static ssize_t use_zero_pages_store(struct kobject *kobj,
2986 struct kobj_attribute *attr,
2987 const char *buf, size_t count)
2988{
2989 int err;
2990 bool value;
2991
2992 err = kstrtobool(buf, &value);
2993 if (err)
2994 return -EINVAL;
2995
2996 ksm_use_zero_pages = value;
2997
2998 return count;
2999}
3000KSM_ATTR(use_zero_pages);
3001
3002static ssize_t max_page_sharing_show(struct kobject *kobj,
3003 struct kobj_attribute *attr, char *buf)
3004{
3005 return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
3006}
3007
3008static ssize_t max_page_sharing_store(struct kobject *kobj,
3009 struct kobj_attribute *attr,
3010 const char *buf, size_t count)
3011{
3012 int err;
3013 int knob;
3014
3015 err = kstrtoint(buf, 10, &knob);
3016 if (err)
3017 return err;
3018 /*
3019 * When a KSM page is created it is shared by 2 mappings. This
3020 * being a signed comparison, it implicitly verifies it's not
3021 * negative.
3022 */
3023 if (knob < 2)
3024 return -EINVAL;
3025
3026 if (READ_ONCE(ksm_max_page_sharing) == knob)
3027 return count;
3028
3029 mutex_lock(&ksm_thread_mutex);
3030 wait_while_offlining();
3031 if (ksm_max_page_sharing != knob) {
3032 if (ksm_pages_shared || remove_all_stable_nodes())
3033 err = -EBUSY;
3034 else
3035 ksm_max_page_sharing = knob;
3036 }
3037 mutex_unlock(&ksm_thread_mutex);
3038
3039 return err ? err : count;
3040}
3041KSM_ATTR(max_page_sharing);
3042
3043static ssize_t pages_shared_show(struct kobject *kobj,
3044 struct kobj_attribute *attr, char *buf)
3045{
3046 return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
3047}
3048KSM_ATTR_RO(pages_shared);
3049
3050static ssize_t pages_sharing_show(struct kobject *kobj,
3051 struct kobj_attribute *attr, char *buf)
3052{
3053 return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
3054}
3055KSM_ATTR_RO(pages_sharing);
3056
3057static ssize_t pages_unshared_show(struct kobject *kobj,
3058 struct kobj_attribute *attr, char *buf)
3059{
3060 return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
3061}
3062KSM_ATTR_RO(pages_unshared);
3063
3064static ssize_t pages_volatile_show(struct kobject *kobj,
3065 struct kobj_attribute *attr, char *buf)
3066{
3067 long ksm_pages_volatile;
3068
3069 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3070 - ksm_pages_sharing - ksm_pages_unshared;
3071 /*
3072 * It was not worth any locking to calculate that statistic,
3073 * but it might therefore sometimes be negative: conceal that.
3074 */
3075 if (ksm_pages_volatile < 0)
3076 ksm_pages_volatile = 0;
3077 return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
3078}
3079KSM_ATTR_RO(pages_volatile);
3080
3081static ssize_t stable_node_dups_show(struct kobject *kobj,
3082 struct kobj_attribute *attr, char *buf)
3083{
3084 return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
3085}
3086KSM_ATTR_RO(stable_node_dups);
3087
3088static ssize_t stable_node_chains_show(struct kobject *kobj,
3089 struct kobj_attribute *attr, char *buf)
3090{
3091 return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
3092}
3093KSM_ATTR_RO(stable_node_chains);
3094
3095static ssize_t
3096stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3097 struct kobj_attribute *attr,
3098 char *buf)
3099{
3100 return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
3101}
3102
3103static ssize_t
3104stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3105 struct kobj_attribute *attr,
3106 const char *buf, size_t count)
3107{
3108 unsigned long msecs;
3109 int err;
3110
3111 err = kstrtoul(buf, 10, &msecs);
3112 if (err || msecs > UINT_MAX)
3113 return -EINVAL;
3114
3115 ksm_stable_node_chains_prune_millisecs = msecs;
3116
3117 return count;
3118}
3119KSM_ATTR(stable_node_chains_prune_millisecs);
3120
3121static ssize_t full_scans_show(struct kobject *kobj,
3122 struct kobj_attribute *attr, char *buf)
3123{
3124 return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
3125}
3126KSM_ATTR_RO(full_scans);
3127
3128static struct attribute *ksm_attrs[] = {
3129 &sleep_millisecs_attr.attr,
3130 &pages_to_scan_attr.attr,
3131 &run_attr.attr,
3132 &pages_shared_attr.attr,
3133 &pages_sharing_attr.attr,
3134 &pages_unshared_attr.attr,
3135 &pages_volatile_attr.attr,
3136 &full_scans_attr.attr,
3137#ifdef CONFIG_NUMA
3138 &merge_across_nodes_attr.attr,
3139#endif
3140 &max_page_sharing_attr.attr,
3141 &stable_node_chains_attr.attr,
3142 &stable_node_dups_attr.attr,
3143 &stable_node_chains_prune_millisecs_attr.attr,
3144 &use_zero_pages_attr.attr,
3145 NULL,
3146};
3147
3148static const struct attribute_group ksm_attr_group = {
3149 .attrs = ksm_attrs,
3150 .name = "ksm",
3151};
3152#endif /* CONFIG_SYSFS */
3153
3154static int __init ksm_init(void)
3155{
3156 struct task_struct *ksm_thread;
3157 int err;
3158
3159 /* The correct value depends on page size and endianness */
3160 zero_checksum = calc_checksum(ZERO_PAGE(0));
3161 /* Default to false for backwards compatibility */
3162 ksm_use_zero_pages = false;
3163
3164 err = ksm_slab_init();
3165 if (err)
3166 goto out;
3167
3168 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3169 if (IS_ERR(ksm_thread)) {
3170 pr_err("ksm: creating kthread failed\n");
3171 err = PTR_ERR(ksm_thread);
3172 goto out_free;
3173 }
3174
3175#ifdef CONFIG_SYSFS
3176 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3177 if (err) {
3178 pr_err("ksm: register sysfs failed\n");
3179 kthread_stop(ksm_thread);
3180 goto out_free;
3181 }
3182#else
3183 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
3184
3185#endif /* CONFIG_SYSFS */
3186
3187#ifdef CONFIG_MEMORY_HOTREMOVE
3188 /* There is no significance to this priority 100 */
3189 hotplug_memory_notifier(ksm_memory_callback, 100);
3190#endif
3191 return 0;
3192
3193out_free:
3194 ksm_slab_free();
3195out:
3196 return err;
3197}
3198subsys_initcall(ksm_init);