Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <stdbool.h>
4#include <stdio.h>
5#include <stdlib.h>
6
7#include "generated/bit-length.h"
8
9#include "maple-shared.h"
10#include "vma_internal.h"
11
12/* Include so header guard set. */
13#include "../../../mm/vma.h"
14
15static bool fail_prealloc;
16
17/* Then override vma_iter_prealloc() so we can choose to fail it. */
18#define vma_iter_prealloc(vmi, vma) \
19 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
20
21/*
22 * Directly import the VMA implementation here. Our vma_internal.h wrapper
23 * provides userland-equivalent functionality for everything vma.c uses.
24 */
25#include "../../../mm/vma.c"
26
27const struct vm_operations_struct vma_dummy_vm_ops;
28static struct anon_vma dummy_anon_vma;
29
30#define ASSERT_TRUE(_expr) \
31 do { \
32 if (!(_expr)) { \
33 fprintf(stderr, \
34 "Assert FAILED at %s:%d:%s(): %s is FALSE.\n", \
35 __FILE__, __LINE__, __FUNCTION__, #_expr); \
36 return false; \
37 } \
38 } while (0)
39#define ASSERT_FALSE(_expr) ASSERT_TRUE(!(_expr))
40#define ASSERT_EQ(_val1, _val2) ASSERT_TRUE((_val1) == (_val2))
41#define ASSERT_NE(_val1, _val2) ASSERT_TRUE((_val1) != (_val2))
42
43static struct task_struct __current;
44
45struct task_struct *get_current(void)
46{
47 return &__current;
48}
49
50/* Helper function to simply allocate a VMA. */
51static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
52 unsigned long start,
53 unsigned long end,
54 pgoff_t pgoff,
55 vm_flags_t flags)
56{
57 struct vm_area_struct *ret = vm_area_alloc(mm);
58
59 if (ret == NULL)
60 return NULL;
61
62 ret->vm_start = start;
63 ret->vm_end = end;
64 ret->vm_pgoff = pgoff;
65 ret->__vm_flags = flags;
66
67 return ret;
68}
69
70/* Helper function to allocate a VMA and link it to the tree. */
71static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
72 unsigned long start,
73 unsigned long end,
74 pgoff_t pgoff,
75 vm_flags_t flags)
76{
77 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags);
78
79 if (vma == NULL)
80 return NULL;
81
82 if (vma_link(mm, vma)) {
83 vm_area_free(vma);
84 return NULL;
85 }
86
87 /*
88 * Reset this counter which we use to track whether writes have
89 * begun. Linking to the tree will have caused this to be incremented,
90 * which means we will get a false positive otherwise.
91 */
92 vma->vm_lock_seq = -1;
93
94 return vma;
95}
96
97/* Helper function which provides a wrapper around a merge new VMA operation. */
98static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
99{
100 /*
101 * For convenience, get prev and next VMAs. Which the new VMA operation
102 * requires.
103 */
104 vmg->next = vma_next(vmg->vmi);
105 vmg->prev = vma_prev(vmg->vmi);
106 vma_iter_next_range(vmg->vmi);
107
108 return vma_merge_new_range(vmg);
109}
110
111/*
112 * Helper function which provides a wrapper around a merge existing VMA
113 * operation.
114 */
115static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
116{
117 return vma_merge_existing_range(vmg);
118}
119
120/*
121 * Helper function which provides a wrapper around the expansion of an existing
122 * VMA.
123 */
124static int expand_existing(struct vma_merge_struct *vmg)
125{
126 return vma_expand(vmg);
127}
128
129/*
130 * Helper function to reset merge state the associated VMA iterator to a
131 * specified new range.
132 */
133static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
134 unsigned long end, pgoff_t pgoff, vm_flags_t flags)
135{
136 vma_iter_set(vmg->vmi, start);
137
138 vmg->prev = NULL;
139 vmg->next = NULL;
140 vmg->vma = NULL;
141
142 vmg->start = start;
143 vmg->end = end;
144 vmg->pgoff = pgoff;
145 vmg->flags = flags;
146}
147
148/*
149 * Helper function to try to merge a new VMA.
150 *
151 * Update vmg and the iterator for it and try to merge, otherwise allocate a new
152 * VMA, link it to the maple tree and return it.
153 */
154static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
155 struct vma_merge_struct *vmg,
156 unsigned long start, unsigned long end,
157 pgoff_t pgoff, vm_flags_t flags,
158 bool *was_merged)
159{
160 struct vm_area_struct *merged;
161
162 vmg_set_range(vmg, start, end, pgoff, flags);
163
164 merged = merge_new(vmg);
165 if (merged) {
166 *was_merged = true;
167 ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
168 return merged;
169 }
170
171 *was_merged = false;
172
173 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
174
175 return alloc_and_link_vma(mm, start, end, pgoff, flags);
176}
177
178/*
179 * Helper function to reset the dummy anon_vma to indicate it has not been
180 * duplicated.
181 */
182static void reset_dummy_anon_vma(void)
183{
184 dummy_anon_vma.was_cloned = false;
185 dummy_anon_vma.was_unlinked = false;
186}
187
188/*
189 * Helper function to remove all VMAs and destroy the maple tree associated with
190 * a virtual address space. Returns a count of VMAs in the tree.
191 */
192static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
193{
194 struct vm_area_struct *vma;
195 int count = 0;
196
197 fail_prealloc = false;
198 reset_dummy_anon_vma();
199
200 vma_iter_set(vmi, 0);
201 for_each_vma(*vmi, vma) {
202 vm_area_free(vma);
203 count++;
204 }
205
206 mtree_destroy(&mm->mm_mt);
207 mm->map_count = 0;
208 return count;
209}
210
211/* Helper function to determine if VMA has had vma_start_write() performed. */
212static bool vma_write_started(struct vm_area_struct *vma)
213{
214 int seq = vma->vm_lock_seq;
215
216 /* We reset after each check. */
217 vma->vm_lock_seq = -1;
218
219 /* The vma_start_write() stub simply increments this value. */
220 return seq > -1;
221}
222
223/* Helper function providing a dummy vm_ops->close() method.*/
224static void dummy_close(struct vm_area_struct *)
225{
226}
227
228static bool test_simple_merge(void)
229{
230 struct vm_area_struct *vma;
231 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
232 struct mm_struct mm = {};
233 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags);
234 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags);
235 VMA_ITERATOR(vmi, &mm, 0x1000);
236 struct vma_merge_struct vmg = {
237 .mm = &mm,
238 .vmi = &vmi,
239 .start = 0x1000,
240 .end = 0x2000,
241 .flags = flags,
242 .pgoff = 1,
243 };
244
245 ASSERT_FALSE(vma_link(&mm, vma_left));
246 ASSERT_FALSE(vma_link(&mm, vma_right));
247
248 vma = merge_new(&vmg);
249 ASSERT_NE(vma, NULL);
250
251 ASSERT_EQ(vma->vm_start, 0);
252 ASSERT_EQ(vma->vm_end, 0x3000);
253 ASSERT_EQ(vma->vm_pgoff, 0);
254 ASSERT_EQ(vma->vm_flags, flags);
255
256 vm_area_free(vma);
257 mtree_destroy(&mm.mm_mt);
258
259 return true;
260}
261
262static bool test_simple_modify(void)
263{
264 struct vm_area_struct *vma;
265 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
266 struct mm_struct mm = {};
267 struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
268 VMA_ITERATOR(vmi, &mm, 0x1000);
269
270 ASSERT_FALSE(vma_link(&mm, init_vma));
271
272 /*
273 * The flags will not be changed, the vma_modify_flags() function
274 * performs the merge/split only.
275 */
276 vma = vma_modify_flags(&vmi, init_vma, init_vma,
277 0x1000, 0x2000, VM_READ | VM_MAYREAD);
278 ASSERT_NE(vma, NULL);
279 /* We modify the provided VMA, and on split allocate new VMAs. */
280 ASSERT_EQ(vma, init_vma);
281
282 ASSERT_EQ(vma->vm_start, 0x1000);
283 ASSERT_EQ(vma->vm_end, 0x2000);
284 ASSERT_EQ(vma->vm_pgoff, 1);
285
286 /*
287 * Now walk through the three split VMAs and make sure they are as
288 * expected.
289 */
290
291 vma_iter_set(&vmi, 0);
292 vma = vma_iter_load(&vmi);
293
294 ASSERT_EQ(vma->vm_start, 0);
295 ASSERT_EQ(vma->vm_end, 0x1000);
296 ASSERT_EQ(vma->vm_pgoff, 0);
297
298 vm_area_free(vma);
299 vma_iter_clear(&vmi);
300
301 vma = vma_next(&vmi);
302
303 ASSERT_EQ(vma->vm_start, 0x1000);
304 ASSERT_EQ(vma->vm_end, 0x2000);
305 ASSERT_EQ(vma->vm_pgoff, 1);
306
307 vm_area_free(vma);
308 vma_iter_clear(&vmi);
309
310 vma = vma_next(&vmi);
311
312 ASSERT_EQ(vma->vm_start, 0x2000);
313 ASSERT_EQ(vma->vm_end, 0x3000);
314 ASSERT_EQ(vma->vm_pgoff, 2);
315
316 vm_area_free(vma);
317 mtree_destroy(&mm.mm_mt);
318
319 return true;
320}
321
322static bool test_simple_expand(void)
323{
324 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
325 struct mm_struct mm = {};
326 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags);
327 VMA_ITERATOR(vmi, &mm, 0);
328 struct vma_merge_struct vmg = {
329 .vmi = &vmi,
330 .vma = vma,
331 .start = 0,
332 .end = 0x3000,
333 .pgoff = 0,
334 };
335
336 ASSERT_FALSE(vma_link(&mm, vma));
337
338 ASSERT_FALSE(expand_existing(&vmg));
339
340 ASSERT_EQ(vma->vm_start, 0);
341 ASSERT_EQ(vma->vm_end, 0x3000);
342 ASSERT_EQ(vma->vm_pgoff, 0);
343
344 vm_area_free(vma);
345 mtree_destroy(&mm.mm_mt);
346
347 return true;
348}
349
350static bool test_simple_shrink(void)
351{
352 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
353 struct mm_struct mm = {};
354 struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
355 VMA_ITERATOR(vmi, &mm, 0);
356
357 ASSERT_FALSE(vma_link(&mm, vma));
358
359 ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
360
361 ASSERT_EQ(vma->vm_start, 0);
362 ASSERT_EQ(vma->vm_end, 0x1000);
363 ASSERT_EQ(vma->vm_pgoff, 0);
364
365 vm_area_free(vma);
366 mtree_destroy(&mm.mm_mt);
367
368 return true;
369}
370
371static bool test_merge_new(void)
372{
373 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
374 struct mm_struct mm = {};
375 VMA_ITERATOR(vmi, &mm, 0);
376 struct vma_merge_struct vmg = {
377 .mm = &mm,
378 .vmi = &vmi,
379 };
380 struct anon_vma_chain dummy_anon_vma_chain_a = {
381 .anon_vma = &dummy_anon_vma,
382 };
383 struct anon_vma_chain dummy_anon_vma_chain_b = {
384 .anon_vma = &dummy_anon_vma,
385 };
386 struct anon_vma_chain dummy_anon_vma_chain_c = {
387 .anon_vma = &dummy_anon_vma,
388 };
389 struct anon_vma_chain dummy_anon_vma_chain_d = {
390 .anon_vma = &dummy_anon_vma,
391 };
392 const struct vm_operations_struct vm_ops = {
393 .close = dummy_close,
394 };
395 int count;
396 struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
397 bool merged;
398
399 /*
400 * 0123456789abc
401 * AA B CC
402 */
403 vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
404 ASSERT_NE(vma_a, NULL);
405 /* We give each VMA a single avc so we can test anon_vma duplication. */
406 INIT_LIST_HEAD(&vma_a->anon_vma_chain);
407 list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
408
409 vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
410 ASSERT_NE(vma_b, NULL);
411 INIT_LIST_HEAD(&vma_b->anon_vma_chain);
412 list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
413
414 vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags);
415 ASSERT_NE(vma_c, NULL);
416 INIT_LIST_HEAD(&vma_c->anon_vma_chain);
417 list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
418
419 /*
420 * NO merge.
421 *
422 * 0123456789abc
423 * AA B ** CC
424 */
425 vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged);
426 ASSERT_NE(vma_d, NULL);
427 INIT_LIST_HEAD(&vma_d->anon_vma_chain);
428 list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
429 ASSERT_FALSE(merged);
430 ASSERT_EQ(mm.map_count, 4);
431
432 /*
433 * Merge BOTH sides.
434 *
435 * 0123456789abc
436 * AA*B DD CC
437 */
438 vma_a->vm_ops = &vm_ops; /* This should have no impact. */
439 vma_b->anon_vma = &dummy_anon_vma;
440 vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged);
441 ASSERT_EQ(vma, vma_a);
442 /* Merge with A, delete B. */
443 ASSERT_TRUE(merged);
444 ASSERT_EQ(vma->vm_start, 0);
445 ASSERT_EQ(vma->vm_end, 0x4000);
446 ASSERT_EQ(vma->vm_pgoff, 0);
447 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
448 ASSERT_TRUE(vma_write_started(vma));
449 ASSERT_EQ(mm.map_count, 3);
450
451 /*
452 * Merge to PREVIOUS VMA.
453 *
454 * 0123456789abc
455 * AAAA* DD CC
456 */
457 vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged);
458 ASSERT_EQ(vma, vma_a);
459 /* Extend A. */
460 ASSERT_TRUE(merged);
461 ASSERT_EQ(vma->vm_start, 0);
462 ASSERT_EQ(vma->vm_end, 0x5000);
463 ASSERT_EQ(vma->vm_pgoff, 0);
464 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
465 ASSERT_TRUE(vma_write_started(vma));
466 ASSERT_EQ(mm.map_count, 3);
467
468 /*
469 * Merge to NEXT VMA.
470 *
471 * 0123456789abc
472 * AAAAA *DD CC
473 */
474 vma_d->anon_vma = &dummy_anon_vma;
475 vma_d->vm_ops = &vm_ops; /* This should have no impact. */
476 vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged);
477 ASSERT_EQ(vma, vma_d);
478 /* Prepend. */
479 ASSERT_TRUE(merged);
480 ASSERT_EQ(vma->vm_start, 0x6000);
481 ASSERT_EQ(vma->vm_end, 0x9000);
482 ASSERT_EQ(vma->vm_pgoff, 6);
483 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
484 ASSERT_TRUE(vma_write_started(vma));
485 ASSERT_EQ(mm.map_count, 3);
486
487 /*
488 * Merge BOTH sides.
489 *
490 * 0123456789abc
491 * AAAAA*DDD CC
492 */
493 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
494 vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged);
495 ASSERT_EQ(vma, vma_a);
496 /* Merge with A, delete D. */
497 ASSERT_TRUE(merged);
498 ASSERT_EQ(vma->vm_start, 0);
499 ASSERT_EQ(vma->vm_end, 0x9000);
500 ASSERT_EQ(vma->vm_pgoff, 0);
501 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
502 ASSERT_TRUE(vma_write_started(vma));
503 ASSERT_EQ(mm.map_count, 2);
504
505 /*
506 * Merge to NEXT VMA.
507 *
508 * 0123456789abc
509 * AAAAAAAAA *CC
510 */
511 vma_c->anon_vma = &dummy_anon_vma;
512 vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged);
513 ASSERT_EQ(vma, vma_c);
514 /* Prepend C. */
515 ASSERT_TRUE(merged);
516 ASSERT_EQ(vma->vm_start, 0xa000);
517 ASSERT_EQ(vma->vm_end, 0xc000);
518 ASSERT_EQ(vma->vm_pgoff, 0xa);
519 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
520 ASSERT_TRUE(vma_write_started(vma));
521 ASSERT_EQ(mm.map_count, 2);
522
523 /*
524 * Merge BOTH sides.
525 *
526 * 0123456789abc
527 * AAAAAAAAA*CCC
528 */
529 vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged);
530 ASSERT_EQ(vma, vma_a);
531 /* Extend A and delete C. */
532 ASSERT_TRUE(merged);
533 ASSERT_EQ(vma->vm_start, 0);
534 ASSERT_EQ(vma->vm_end, 0xc000);
535 ASSERT_EQ(vma->vm_pgoff, 0);
536 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
537 ASSERT_TRUE(vma_write_started(vma));
538 ASSERT_EQ(mm.map_count, 1);
539
540 /*
541 * Final state.
542 *
543 * 0123456789abc
544 * AAAAAAAAAAAAA
545 */
546
547 count = 0;
548 vma_iter_set(&vmi, 0);
549 for_each_vma(vmi, vma) {
550 ASSERT_NE(vma, NULL);
551 ASSERT_EQ(vma->vm_start, 0);
552 ASSERT_EQ(vma->vm_end, 0xc000);
553 ASSERT_EQ(vma->vm_pgoff, 0);
554 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
555
556 vm_area_free(vma);
557 count++;
558 }
559
560 /* Should only have one VMA left (though freed) after all is done.*/
561 ASSERT_EQ(count, 1);
562
563 mtree_destroy(&mm.mm_mt);
564 return true;
565}
566
567static bool test_vma_merge_special_flags(void)
568{
569 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
570 struct mm_struct mm = {};
571 VMA_ITERATOR(vmi, &mm, 0);
572 struct vma_merge_struct vmg = {
573 .mm = &mm,
574 .vmi = &vmi,
575 };
576 vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
577 vm_flags_t all_special_flags = 0;
578 int i;
579 struct vm_area_struct *vma_left, *vma;
580
581 /* Make sure there aren't new VM_SPECIAL flags. */
582 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
583 all_special_flags |= special_flags[i];
584 }
585 ASSERT_EQ(all_special_flags, VM_SPECIAL);
586
587 /*
588 * 01234
589 * AAA
590 */
591 vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
592 ASSERT_NE(vma_left, NULL);
593
594 /* 1. Set up new VMA with special flag that would otherwise merge. */
595
596 /*
597 * 01234
598 * AAA*
599 *
600 * This should merge if not for the VM_SPECIAL flag.
601 */
602 vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags);
603 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
604 vm_flags_t special_flag = special_flags[i];
605
606 vma_left->__vm_flags = flags | special_flag;
607 vmg.flags = flags | special_flag;
608 vma = merge_new(&vmg);
609 ASSERT_EQ(vma, NULL);
610 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
611 }
612
613 /* 2. Modify VMA with special flag that would otherwise merge. */
614
615 /*
616 * 01234
617 * AAAB
618 *
619 * Create a VMA to modify.
620 */
621 vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
622 ASSERT_NE(vma, NULL);
623 vmg.vma = vma;
624
625 for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
626 vm_flags_t special_flag = special_flags[i];
627
628 vma_left->__vm_flags = flags | special_flag;
629 vmg.flags = flags | special_flag;
630 vma = merge_existing(&vmg);
631 ASSERT_EQ(vma, NULL);
632 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
633 }
634
635 cleanup_mm(&mm, &vmi);
636 return true;
637}
638
639static bool test_vma_merge_with_close(void)
640{
641 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
642 struct mm_struct mm = {};
643 VMA_ITERATOR(vmi, &mm, 0);
644 struct vma_merge_struct vmg = {
645 .mm = &mm,
646 .vmi = &vmi,
647 };
648 const struct vm_operations_struct vm_ops = {
649 .close = dummy_close,
650 };
651 struct vm_area_struct *vma_prev, *vma_next, *vma;
652
653 /*
654 * When merging VMAs we are not permitted to remove any VMA that has a
655 * vm_ops->close() hook.
656 *
657 * Considering the two possible adjacent VMAs to which a VMA can be
658 * merged:
659 *
660 * [ prev ][ vma ][ next ]
661 *
662 * In no case will we need to delete prev. If the operation is
663 * mergeable, then prev will be extended with one or both of vma and
664 * next deleted.
665 *
666 * As a result, during initial mergeability checks, only
667 * can_vma_merge_before() (which implies the VMA being merged with is
668 * 'next' as shown above) bothers to check to see whether the next VMA
669 * has a vm_ops->close() callback that will need to be called when
670 * removed.
671 *
672 * If it does, then we cannot merge as the resources that the close()
673 * operation potentially clears down are tied only to the existing VMA
674 * range and we have no way of extending those to the nearly merged one.
675 *
676 * We must consider two scenarios:
677 *
678 * A.
679 *
680 * vm_ops->close: - - !NULL
681 * [ prev ][ vma ][ next ]
682 *
683 * Where prev may or may not be present/mergeable.
684 *
685 * This is picked up by a specific check in can_vma_merge_before().
686 *
687 * B.
688 *
689 * vm_ops->close: - !NULL
690 * [ prev ][ vma ]
691 *
692 * Where prev and vma are present and mergeable.
693 *
694 * This is picked up by a specific check in the modified VMA merge.
695 *
696 * IMPORTANT NOTE: We make the assumption that the following case:
697 *
698 * - !NULL NULL
699 * [ prev ][ vma ][ next ]
700 *
701 * Cannot occur, because vma->vm_ops being the same implies the same
702 * vma->vm_file, and therefore this would mean that next->vm_ops->close
703 * would be set too, and thus scenario A would pick this up.
704 */
705
706 /*
707 * The only case of a new VMA merge that results in a VMA being deleted
708 * is one where both the previous and next VMAs are merged - in this
709 * instance the next VMA is deleted, and the previous VMA is extended.
710 *
711 * If we are unable to do so, we reduce the operation to simply
712 * extending the prev VMA and not merging next.
713 *
714 * 0123456789
715 * PPP**NNNN
716 * ->
717 * 0123456789
718 * PPPPPPNNN
719 */
720
721 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
722 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
723 vma_next->vm_ops = &vm_ops;
724
725 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
726 ASSERT_EQ(merge_new(&vmg), vma_prev);
727 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
728 ASSERT_EQ(vma_prev->vm_start, 0);
729 ASSERT_EQ(vma_prev->vm_end, 0x5000);
730 ASSERT_EQ(vma_prev->vm_pgoff, 0);
731
732 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
733
734 /*
735 * When modifying an existing VMA there are further cases where we
736 * delete VMAs.
737 *
738 * <>
739 * 0123456789
740 * PPPVV
741 *
742 * In this instance, if vma has a close hook, the merge simply cannot
743 * proceed.
744 */
745
746 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
747 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
748 vma->vm_ops = &vm_ops;
749
750 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
751 vmg.prev = vma_prev;
752 vmg.vma = vma;
753
754 /*
755 * The VMA being modified in a way that would otherwise merge should
756 * also fail.
757 */
758 ASSERT_EQ(merge_existing(&vmg), NULL);
759 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
760
761 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
762
763 /*
764 * This case is mirrored if merging with next.
765 *
766 * <>
767 * 0123456789
768 * VVNNNN
769 *
770 * In this instance, if vma has a close hook, the merge simply cannot
771 * proceed.
772 */
773
774 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
775 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
776 vma->vm_ops = &vm_ops;
777
778 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
779 vmg.vma = vma;
780 ASSERT_EQ(merge_existing(&vmg), NULL);
781 /*
782 * Initially this is misapprehended as an out of memory report, as the
783 * close() check is handled in the same way as anon_vma duplication
784 * failures, however a subsequent patch resolves this.
785 */
786 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
787
788 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
789
790 /*
791 * Finally, we consider two variants of the case where we modify a VMA
792 * to merge with both the previous and next VMAs.
793 *
794 * The first variant is where vma has a close hook. In this instance, no
795 * merge can proceed.
796 *
797 * <>
798 * 0123456789
799 * PPPVVNNNN
800 */
801
802 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
803 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
804 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
805 vma->vm_ops = &vm_ops;
806
807 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
808 vmg.prev = vma_prev;
809 vmg.vma = vma;
810
811 ASSERT_EQ(merge_existing(&vmg), NULL);
812 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
813
814 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
815
816 /*
817 * The second variant is where next has a close hook. In this instance,
818 * we reduce the operation to a merge between prev and vma.
819 *
820 * <>
821 * 0123456789
822 * PPPVVNNNN
823 * ->
824 * 0123456789
825 * PPPPPNNNN
826 */
827
828 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
829 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
830 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags);
831 vma_next->vm_ops = &vm_ops;
832
833 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
834 vmg.prev = vma_prev;
835 vmg.vma = vma;
836
837 ASSERT_EQ(merge_existing(&vmg), vma_prev);
838 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
839 ASSERT_EQ(vma_prev->vm_start, 0);
840 ASSERT_EQ(vma_prev->vm_end, 0x5000);
841 ASSERT_EQ(vma_prev->vm_pgoff, 0);
842
843 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
844
845 return true;
846}
847
848static bool test_vma_merge_new_with_close(void)
849{
850 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
851 struct mm_struct mm = {};
852 VMA_ITERATOR(vmi, &mm, 0);
853 struct vma_merge_struct vmg = {
854 .mm = &mm,
855 .vmi = &vmi,
856 };
857 struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
858 struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags);
859 const struct vm_operations_struct vm_ops = {
860 .close = dummy_close,
861 };
862 struct vm_area_struct *vma;
863
864 /*
865 * We should allow the partial merge of a proposed new VMA if the
866 * surrounding VMAs have vm_ops->close() hooks (but are otherwise
867 * compatible), e.g.:
868 *
869 * New VMA
870 * A v-------v B
871 * |-----| |-----|
872 * close close
873 *
874 * Since the rule is to not DELETE a VMA with a close operation, this
875 * should be permitted, only rather than expanding A and deleting B, we
876 * should simply expand A and leave B intact, e.g.:
877 *
878 * New VMA
879 * A B
880 * |------------||-----|
881 * close close
882 */
883
884 /* Have prev and next have a vm_ops->close() hook. */
885 vma_prev->vm_ops = &vm_ops;
886 vma_next->vm_ops = &vm_ops;
887
888 vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags);
889 vma = merge_new(&vmg);
890 ASSERT_NE(vma, NULL);
891 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
892 ASSERT_EQ(vma->vm_start, 0);
893 ASSERT_EQ(vma->vm_end, 0x5000);
894 ASSERT_EQ(vma->vm_pgoff, 0);
895 ASSERT_EQ(vma->vm_ops, &vm_ops);
896 ASSERT_TRUE(vma_write_started(vma));
897 ASSERT_EQ(mm.map_count, 2);
898
899 cleanup_mm(&mm, &vmi);
900 return true;
901}
902
903static bool test_merge_existing(void)
904{
905 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
906 struct mm_struct mm = {};
907 VMA_ITERATOR(vmi, &mm, 0);
908 struct vm_area_struct *vma, *vma_prev, *vma_next;
909 struct vma_merge_struct vmg = {
910 .mm = &mm,
911 .vmi = &vmi,
912 };
913 const struct vm_operations_struct vm_ops = {
914 .close = dummy_close,
915 };
916
917 /*
918 * Merge right case - partial span.
919 *
920 * <->
921 * 0123456789
922 * VVVVNNN
923 * ->
924 * 0123456789
925 * VNNNNNN
926 */
927 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
928 vma->vm_ops = &vm_ops; /* This should have no impact. */
929 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
930 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
931 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
932 vmg.vma = vma;
933 vmg.prev = vma;
934 vma->anon_vma = &dummy_anon_vma;
935 ASSERT_EQ(merge_existing(&vmg), vma_next);
936 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
937 ASSERT_EQ(vma_next->vm_start, 0x3000);
938 ASSERT_EQ(vma_next->vm_end, 0x9000);
939 ASSERT_EQ(vma_next->vm_pgoff, 3);
940 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
941 ASSERT_EQ(vma->vm_start, 0x2000);
942 ASSERT_EQ(vma->vm_end, 0x3000);
943 ASSERT_EQ(vma->vm_pgoff, 2);
944 ASSERT_TRUE(vma_write_started(vma));
945 ASSERT_TRUE(vma_write_started(vma_next));
946 ASSERT_EQ(mm.map_count, 2);
947
948 /* Clear down and reset. */
949 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
950
951 /*
952 * Merge right case - full span.
953 *
954 * <-->
955 * 0123456789
956 * VVVVNNN
957 * ->
958 * 0123456789
959 * NNNNNNN
960 */
961 vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags);
962 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
963 vma_next->vm_ops = &vm_ops; /* This should have no impact. */
964 vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
965 vmg.vma = vma;
966 vma->anon_vma = &dummy_anon_vma;
967 ASSERT_EQ(merge_existing(&vmg), vma_next);
968 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
969 ASSERT_EQ(vma_next->vm_start, 0x2000);
970 ASSERT_EQ(vma_next->vm_end, 0x9000);
971 ASSERT_EQ(vma_next->vm_pgoff, 2);
972 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
973 ASSERT_TRUE(vma_write_started(vma_next));
974 ASSERT_EQ(mm.map_count, 1);
975
976 /* Clear down and reset. We should have deleted vma. */
977 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
978
979 /*
980 * Merge left case - partial span.
981 *
982 * <->
983 * 0123456789
984 * PPPVVVV
985 * ->
986 * 0123456789
987 * PPPPPPV
988 */
989 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
990 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
991 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
992 vma->vm_ops = &vm_ops; /* This should have no impact. */
993 vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
994 vmg.prev = vma_prev;
995 vmg.vma = vma;
996 vma->anon_vma = &dummy_anon_vma;
997
998 ASSERT_EQ(merge_existing(&vmg), vma_prev);
999 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1000 ASSERT_EQ(vma_prev->vm_start, 0);
1001 ASSERT_EQ(vma_prev->vm_end, 0x6000);
1002 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1003 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1004 ASSERT_EQ(vma->vm_start, 0x6000);
1005 ASSERT_EQ(vma->vm_end, 0x7000);
1006 ASSERT_EQ(vma->vm_pgoff, 6);
1007 ASSERT_TRUE(vma_write_started(vma_prev));
1008 ASSERT_TRUE(vma_write_started(vma));
1009 ASSERT_EQ(mm.map_count, 2);
1010
1011 /* Clear down and reset. */
1012 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1013
1014 /*
1015 * Merge left case - full span.
1016 *
1017 * <-->
1018 * 0123456789
1019 * PPPVVVV
1020 * ->
1021 * 0123456789
1022 * PPPPPPP
1023 */
1024 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1025 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1026 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1027 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1028 vmg.prev = vma_prev;
1029 vmg.vma = vma;
1030 vma->anon_vma = &dummy_anon_vma;
1031 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1032 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1033 ASSERT_EQ(vma_prev->vm_start, 0);
1034 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1035 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1036 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1037 ASSERT_TRUE(vma_write_started(vma_prev));
1038 ASSERT_EQ(mm.map_count, 1);
1039
1040 /* Clear down and reset. We should have deleted vma. */
1041 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1042
1043 /*
1044 * Merge both case.
1045 *
1046 * <-->
1047 * 0123456789
1048 * PPPVVVVNNN
1049 * ->
1050 * 0123456789
1051 * PPPPPPPPPP
1052 */
1053 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1054 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
1055 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1056 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1057 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1058 vmg.prev = vma_prev;
1059 vmg.vma = vma;
1060 vma->anon_vma = &dummy_anon_vma;
1061 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1062 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1063 ASSERT_EQ(vma_prev->vm_start, 0);
1064 ASSERT_EQ(vma_prev->vm_end, 0x9000);
1065 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1066 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1067 ASSERT_TRUE(vma_write_started(vma_prev));
1068 ASSERT_EQ(mm.map_count, 1);
1069
1070 /* Clear down and reset. We should have deleted prev and next. */
1071 ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
1072
1073 /*
1074 * Non-merge ranges. the modified VMA merge operation assumes that the
1075 * caller always specifies ranges within the input VMA so we need only
1076 * examine these cases.
1077 *
1078 * -
1079 * -
1080 * -
1081 * <->
1082 * <>
1083 * <>
1084 * 0123456789a
1085 * PPPVVVVVNNN
1086 */
1087
1088 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1089 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1090 vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags);
1091
1092 vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
1093 vmg.prev = vma;
1094 vmg.vma = vma;
1095 ASSERT_EQ(merge_existing(&vmg), NULL);
1096 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1097
1098 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1099 vmg.prev = vma;
1100 vmg.vma = vma;
1101 ASSERT_EQ(merge_existing(&vmg), NULL);
1102 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1103
1104 vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
1105 vmg.prev = vma;
1106 vmg.vma = vma;
1107 ASSERT_EQ(merge_existing(&vmg), NULL);
1108 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1109
1110 vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
1111 vmg.prev = vma;
1112 vmg.vma = vma;
1113 ASSERT_EQ(merge_existing(&vmg), NULL);
1114 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1115
1116 vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
1117 vmg.prev = vma;
1118 vmg.vma = vma;
1119 ASSERT_EQ(merge_existing(&vmg), NULL);
1120 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1121
1122 vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
1123 vmg.prev = vma;
1124 vmg.vma = vma;
1125 ASSERT_EQ(merge_existing(&vmg), NULL);
1126 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
1127
1128 ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
1129
1130 return true;
1131}
1132
1133static bool test_anon_vma_non_mergeable(void)
1134{
1135 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1136 struct mm_struct mm = {};
1137 VMA_ITERATOR(vmi, &mm, 0);
1138 struct vm_area_struct *vma, *vma_prev, *vma_next;
1139 struct vma_merge_struct vmg = {
1140 .mm = &mm,
1141 .vmi = &vmi,
1142 };
1143 struct anon_vma_chain dummy_anon_vma_chain1 = {
1144 .anon_vma = &dummy_anon_vma,
1145 };
1146 struct anon_vma_chain dummy_anon_vma_chain2 = {
1147 .anon_vma = &dummy_anon_vma,
1148 };
1149
1150 /*
1151 * In the case of modified VMA merge, merging both left and right VMAs
1152 * but where prev and next have incompatible anon_vma objects, we revert
1153 * to a merge of prev and VMA:
1154 *
1155 * <-->
1156 * 0123456789
1157 * PPPVVVVNNN
1158 * ->
1159 * 0123456789
1160 * PPPPPPPNNN
1161 */
1162 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1163 vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
1164 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1165
1166 /*
1167 * Give both prev and next single anon_vma_chain fields, so they will
1168 * merge with the NULL vmg->anon_vma.
1169 *
1170 * However, when prev is compared to next, the merge should fail.
1171 */
1172
1173 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1174 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1175 ASSERT_TRUE(list_is_singular(&vma_prev->anon_vma_chain));
1176 vma_prev->anon_vma = &dummy_anon_vma;
1177 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_prev->anon_vma, vma_prev));
1178
1179 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1180 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1181 ASSERT_TRUE(list_is_singular(&vma_next->anon_vma_chain));
1182 vma_next->anon_vma = (struct anon_vma *)2;
1183 ASSERT_TRUE(is_mergeable_anon_vma(NULL, vma_next->anon_vma, vma_next));
1184
1185 ASSERT_FALSE(is_mergeable_anon_vma(vma_prev->anon_vma, vma_next->anon_vma, NULL));
1186
1187 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1188 vmg.prev = vma_prev;
1189 vmg.vma = vma;
1190
1191 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1192 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1193 ASSERT_EQ(vma_prev->vm_start, 0);
1194 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1195 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1196 ASSERT_TRUE(vma_write_started(vma_prev));
1197 ASSERT_FALSE(vma_write_started(vma_next));
1198
1199 /* Clear down and reset. */
1200 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1201
1202 /*
1203 * Now consider the new VMA case. This is equivalent, only adding a new
1204 * VMA in a gap between prev and next.
1205 *
1206 * <-->
1207 * 0123456789
1208 * PPP****NNN
1209 * ->
1210 * 0123456789
1211 * PPPPPPPNNN
1212 */
1213 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1214 vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
1215
1216 INIT_LIST_HEAD(&vma_prev->anon_vma_chain);
1217 list_add(&dummy_anon_vma_chain1.same_vma, &vma_prev->anon_vma_chain);
1218 vma_prev->anon_vma = (struct anon_vma *)1;
1219
1220 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1221 list_add(&dummy_anon_vma_chain2.same_vma, &vma_next->anon_vma_chain);
1222 vma_next->anon_vma = (struct anon_vma *)2;
1223
1224 vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
1225 vmg.prev = vma_prev;
1226
1227 ASSERT_EQ(merge_new(&vmg), vma_prev);
1228 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1229 ASSERT_EQ(vma_prev->vm_start, 0);
1230 ASSERT_EQ(vma_prev->vm_end, 0x7000);
1231 ASSERT_EQ(vma_prev->vm_pgoff, 0);
1232 ASSERT_TRUE(vma_write_started(vma_prev));
1233 ASSERT_FALSE(vma_write_started(vma_next));
1234
1235 /* Final cleanup. */
1236 ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
1237
1238 return true;
1239}
1240
1241static bool test_dup_anon_vma(void)
1242{
1243 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1244 struct mm_struct mm = {};
1245 VMA_ITERATOR(vmi, &mm, 0);
1246 struct vma_merge_struct vmg = {
1247 .mm = &mm,
1248 .vmi = &vmi,
1249 };
1250 struct anon_vma_chain dummy_anon_vma_chain = {
1251 .anon_vma = &dummy_anon_vma,
1252 };
1253 struct vm_area_struct *vma_prev, *vma_next, *vma;
1254
1255 reset_dummy_anon_vma();
1256
1257 /*
1258 * Expanding a VMA delete the next one duplicates next's anon_vma and
1259 * assigns it to the expanded VMA.
1260 *
1261 * This covers new VMA merging, as these operations amount to a VMA
1262 * expand.
1263 */
1264 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1265 vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1266 vma_next->anon_vma = &dummy_anon_vma;
1267
1268 vmg_set_range(&vmg, 0, 0x5000, 0, flags);
1269 vmg.vma = vma_prev;
1270 vmg.next = vma_next;
1271
1272 ASSERT_EQ(expand_existing(&vmg), 0);
1273
1274 /* Will have been cloned. */
1275 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1276 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1277
1278 /* Cleanup ready for next run. */
1279 cleanup_mm(&mm, &vmi);
1280
1281 /*
1282 * next has anon_vma, we assign to prev.
1283 *
1284 * |<----->|
1285 * |-------*********-------|
1286 * prev vma next
1287 * extend delete delete
1288 */
1289
1290 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1291 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1292 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1293
1294 /* Initialise avc so mergeability check passes. */
1295 INIT_LIST_HEAD(&vma_next->anon_vma_chain);
1296 list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
1297
1298 vma_next->anon_vma = &dummy_anon_vma;
1299 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1300 vmg.prev = vma_prev;
1301 vmg.vma = vma;
1302
1303 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1304 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1305
1306 ASSERT_EQ(vma_prev->vm_start, 0);
1307 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1308
1309 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1310 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1311
1312 cleanup_mm(&mm, &vmi);
1313
1314 /*
1315 * vma has anon_vma, we assign to prev.
1316 *
1317 * |<----->|
1318 * |-------*********-------|
1319 * prev vma next
1320 * extend delete delete
1321 */
1322
1323 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1324 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1325 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1326
1327 vma->anon_vma = &dummy_anon_vma;
1328 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1329 vmg.prev = vma_prev;
1330 vmg.vma = vma;
1331
1332 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1333 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1334
1335 ASSERT_EQ(vma_prev->vm_start, 0);
1336 ASSERT_EQ(vma_prev->vm_end, 0x8000);
1337
1338 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1339 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1340
1341 cleanup_mm(&mm, &vmi);
1342
1343 /*
1344 * vma has anon_vma, we assign to prev.
1345 *
1346 * |<----->|
1347 * |-------*************
1348 * prev vma
1349 * extend shrink/delete
1350 */
1351
1352 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1353 vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags);
1354
1355 vma->anon_vma = &dummy_anon_vma;
1356 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1357 vmg.prev = vma_prev;
1358 vmg.vma = vma;
1359
1360 ASSERT_EQ(merge_existing(&vmg), vma_prev);
1361 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1362
1363 ASSERT_EQ(vma_prev->vm_start, 0);
1364 ASSERT_EQ(vma_prev->vm_end, 0x5000);
1365
1366 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1367 ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
1368
1369 cleanup_mm(&mm, &vmi);
1370
1371 /*
1372 * vma has anon_vma, we assign to next.
1373 *
1374 * |<----->|
1375 * *************-------|
1376 * vma next
1377 * shrink/delete extend
1378 */
1379
1380 vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags);
1381 vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags);
1382
1383 vma->anon_vma = &dummy_anon_vma;
1384 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1385 vmg.prev = vma;
1386 vmg.vma = vma;
1387
1388 ASSERT_EQ(merge_existing(&vmg), vma_next);
1389 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1390
1391 ASSERT_EQ(vma_next->vm_start, 0x3000);
1392 ASSERT_EQ(vma_next->vm_end, 0x8000);
1393
1394 ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
1395 ASSERT_TRUE(vma_next->anon_vma->was_cloned);
1396
1397 cleanup_mm(&mm, &vmi);
1398 return true;
1399}
1400
1401static bool test_vmi_prealloc_fail(void)
1402{
1403 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1404 struct mm_struct mm = {};
1405 VMA_ITERATOR(vmi, &mm, 0);
1406 struct vma_merge_struct vmg = {
1407 .mm = &mm,
1408 .vmi = &vmi,
1409 };
1410 struct vm_area_struct *vma_prev, *vma;
1411
1412 /*
1413 * We are merging vma into prev, with vma possessing an anon_vma, which
1414 * will be duplicated. We cause the vmi preallocation to fail and assert
1415 * the duplicated anon_vma is unlinked.
1416 */
1417
1418 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1419 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1420 vma->anon_vma = &dummy_anon_vma;
1421
1422 vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
1423 vmg.prev = vma_prev;
1424 vmg.vma = vma;
1425
1426 fail_prealloc = true;
1427
1428 /* This will cause the merge to fail. */
1429 ASSERT_EQ(merge_existing(&vmg), NULL);
1430 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1431 /* We will already have assigned the anon_vma. */
1432 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1433 /* And it was both cloned and unlinked. */
1434 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1435 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1436
1437 cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
1438
1439 /*
1440 * We repeat the same operation for expanding a VMA, which is what new
1441 * VMA merging ultimately uses too. This asserts that unlinking is
1442 * performed in this case too.
1443 */
1444
1445 vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags);
1446 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1447 vma->anon_vma = &dummy_anon_vma;
1448
1449 vmg_set_range(&vmg, 0, 0x5000, 3, flags);
1450 vmg.vma = vma_prev;
1451 vmg.next = vma;
1452
1453 fail_prealloc = true;
1454 ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
1455 ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
1456
1457 ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
1458 ASSERT_TRUE(dummy_anon_vma.was_cloned);
1459 ASSERT_TRUE(dummy_anon_vma.was_unlinked);
1460
1461 cleanup_mm(&mm, &vmi);
1462 return true;
1463}
1464
1465static bool test_merge_extend(void)
1466{
1467 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1468 struct mm_struct mm = {};
1469 VMA_ITERATOR(vmi, &mm, 0x1000);
1470 struct vm_area_struct *vma;
1471
1472 vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags);
1473 alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
1474
1475 /*
1476 * Extend a VMA into the gap between itself and the following VMA.
1477 * This should result in a merge.
1478 *
1479 * <->
1480 * * *
1481 *
1482 */
1483
1484 ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
1485 ASSERT_EQ(vma->vm_start, 0);
1486 ASSERT_EQ(vma->vm_end, 0x4000);
1487 ASSERT_EQ(vma->vm_pgoff, 0);
1488 ASSERT_TRUE(vma_write_started(vma));
1489 ASSERT_EQ(mm.map_count, 1);
1490
1491 cleanup_mm(&mm, &vmi);
1492 return true;
1493}
1494
1495static bool test_copy_vma(void)
1496{
1497 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1498 struct mm_struct mm = {};
1499 bool need_locks = false;
1500 VMA_ITERATOR(vmi, &mm, 0);
1501 struct vm_area_struct *vma, *vma_new, *vma_next;
1502
1503 /* Move backwards and do not merge. */
1504
1505 vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1506 vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
1507
1508 ASSERT_NE(vma_new, vma);
1509 ASSERT_EQ(vma_new->vm_start, 0);
1510 ASSERT_EQ(vma_new->vm_end, 0x2000);
1511 ASSERT_EQ(vma_new->vm_pgoff, 0);
1512
1513 cleanup_mm(&mm, &vmi);
1514
1515 /* Move a VMA into position next to another and merge the two. */
1516
1517 vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1518 vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
1519 vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
1520
1521 ASSERT_EQ(vma_new, vma_next);
1522
1523 cleanup_mm(&mm, &vmi);
1524 return true;
1525}
1526
1527static bool test_expand_only_mode(void)
1528{
1529 unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
1530 struct mm_struct mm = {};
1531 VMA_ITERATOR(vmi, &mm, 0);
1532 struct vm_area_struct *vma_prev, *vma;
1533 VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
1534
1535 /*
1536 * Place a VMA prior to the one we're expanding so we assert that we do
1537 * not erroneously try to traverse to the previous VMA even though we
1538 * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
1539 * need to do so.
1540 */
1541 alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
1542
1543 /*
1544 * We will be positioned at the prev VMA, but looking to expand to
1545 * 0x9000.
1546 */
1547 vma_iter_set(&vmi, 0x3000);
1548 vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
1549 vmg.prev = vma_prev;
1550 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1551
1552 vma = vma_merge_new_range(&vmg);
1553 ASSERT_NE(vma, NULL);
1554 ASSERT_EQ(vma, vma_prev);
1555 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
1556 ASSERT_EQ(vma->vm_start, 0x3000);
1557 ASSERT_EQ(vma->vm_end, 0x9000);
1558 ASSERT_EQ(vma->vm_pgoff, 3);
1559 ASSERT_TRUE(vma_write_started(vma));
1560 ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
1561
1562 cleanup_mm(&mm, &vmi);
1563 return true;
1564}
1565
1566int main(void)
1567{
1568 int num_tests = 0, num_fail = 0;
1569
1570 maple_tree_init();
1571
1572#define TEST(name) \
1573 do { \
1574 num_tests++; \
1575 if (!test_##name()) { \
1576 num_fail++; \
1577 fprintf(stderr, "Test " #name " FAILED\n"); \
1578 } \
1579 } while (0)
1580
1581 /* Very simple tests to kick the tyres. */
1582 TEST(simple_merge);
1583 TEST(simple_modify);
1584 TEST(simple_expand);
1585 TEST(simple_shrink);
1586
1587 TEST(merge_new);
1588 TEST(vma_merge_special_flags);
1589 TEST(vma_merge_with_close);
1590 TEST(vma_merge_new_with_close);
1591 TEST(merge_existing);
1592 TEST(anon_vma_non_mergeable);
1593 TEST(dup_anon_vma);
1594 TEST(vmi_prealloc_fail);
1595 TEST(merge_extend);
1596 TEST(copy_vma);
1597 TEST(expand_only_mode);
1598
1599#undef TEST
1600
1601 printf("%d tests run, %d passed, %d failed.\n",
1602 num_tests, num_tests - num_fail, num_fail);
1603
1604 return num_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
1605}