Loading...
1/*
2 * ARC700 mmap
3 *
4 * (started from arm version - for VIPT alias handling)
5 *
6 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/sched.h>
17#include <asm/cacheflush.h>
18
19#define COLOUR_ALIGN(addr, pgoff) \
20 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
21 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
22
23/*
24 * Ensure that shared mappings are correctly aligned to
25 * avoid aliasing issues with VIPT caches.
26 * We need to ensure that
27 * a specific page of an object is always mapped at a multiple of
28 * SHMLBA bytes.
29 */
30unsigned long
31arch_get_unmapped_area(struct file *filp, unsigned long addr,
32 unsigned long len, unsigned long pgoff, unsigned long flags)
33{
34 struct mm_struct *mm = current->mm;
35 struct vm_area_struct *vma;
36 int do_align = 0;
37 int aliasing = cache_is_vipt_aliasing();
38 struct vm_unmapped_area_info info;
39
40 /*
41 * We only need to do colour alignment if D cache aliases.
42 */
43 if (aliasing)
44 do_align = filp || (flags & MAP_SHARED);
45
46 /*
47 * We enforce the MAP_FIXED case.
48 */
49 if (flags & MAP_FIXED) {
50 if (aliasing && flags & MAP_SHARED &&
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
52 return -EINVAL;
53 return addr;
54 }
55
56 if (len > TASK_SIZE)
57 return -ENOMEM;
58
59 if (addr) {
60 if (do_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
64
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vma->vm_start))
68 return addr;
69 }
70
71 info.flags = 0;
72 info.length = len;
73 info.low_limit = mm->mmap_base;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
78}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ARC700 mmap
4 *
5 * (started from arm version - for VIPT alias handling)
6 *
7 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
8 */
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/mman.h>
13#include <linux/sched/mm.h>
14
15#include <asm/cacheflush.h>
16
17/*
18 * Ensure that shared mappings are correctly aligned to
19 * avoid aliasing issues with VIPT caches.
20 * We need to ensure that
21 * a specific page of an object is always mapped at a multiple of
22 * SHMLBA bytes.
23 */
24unsigned long
25arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 unsigned long len, unsigned long pgoff,
27 unsigned long flags, vm_flags_t vm_flags)
28{
29 struct mm_struct *mm = current->mm;
30 struct vm_area_struct *vma;
31 struct vm_unmapped_area_info info = {};
32
33 /*
34 * We enforce the MAP_FIXED case.
35 */
36 if (flags & MAP_FIXED) {
37 if (flags & MAP_SHARED &&
38 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
39 return -EINVAL;
40 return addr;
41 }
42
43 if (len > TASK_SIZE)
44 return -ENOMEM;
45
46 if (addr) {
47 addr = PAGE_ALIGN(addr);
48
49 vma = find_vma(mm, addr);
50 if (TASK_SIZE - len >= addr &&
51 (!vma || addr + len <= vm_start_gap(vma)))
52 return addr;
53 }
54
55 info.length = len;
56 info.low_limit = mm->mmap_base;
57 info.high_limit = TASK_SIZE;
58 info.align_offset = pgoff << PAGE_SHIFT;
59 return vm_unmapped_area(&info);
60}
61
62static const pgprot_t protection_map[16] = {
63 [VM_NONE] = PAGE_U_NONE,
64 [VM_READ] = PAGE_U_R,
65 [VM_WRITE] = PAGE_U_R,
66 [VM_WRITE | VM_READ] = PAGE_U_R,
67 [VM_EXEC] = PAGE_U_X_R,
68 [VM_EXEC | VM_READ] = PAGE_U_X_R,
69 [VM_EXEC | VM_WRITE] = PAGE_U_X_R,
70 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
71 [VM_SHARED] = PAGE_U_NONE,
72 [VM_SHARED | VM_READ] = PAGE_U_R,
73 [VM_SHARED | VM_WRITE] = PAGE_U_W_R,
74 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
75 [VM_SHARED | VM_EXEC] = PAGE_U_X_R,
76 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
77 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
78 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
79};
80DECLARE_VM_GET_PAGE_PROT