Linux Audio

Check our new training course

Loading...
v5.9
 1// SPDX-License-Identifier: GPL-2.0-only
 2/*
 3 * ARC700 mmap
 4 *
 5 * (started from arm version - for VIPT alias handling)
 6 *
 7 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
 8 */
 9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/mman.h>
13#include <linux/sched/mm.h>
14
15#include <asm/cacheflush.h>
16
17#define COLOUR_ALIGN(addr, pgoff)			\
18	((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) +	\
19	 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
20
21/*
22 * Ensure that shared mappings are correctly aligned to
23 * avoid aliasing issues with VIPT caches.
24 * We need to ensure that
25 * a specific page of an object is always mapped at a multiple of
26 * SHMLBA bytes.
27 */
28unsigned long
29arch_get_unmapped_area(struct file *filp, unsigned long addr,
30		unsigned long len, unsigned long pgoff, unsigned long flags)
 
31{
32	struct mm_struct *mm = current->mm;
33	struct vm_area_struct *vma;
34	int do_align = 0;
35	int aliasing = cache_is_vipt_aliasing();
36	struct vm_unmapped_area_info info;
37
38	/*
39	 * We only need to do colour alignment if D cache aliases.
40	 */
41	if (aliasing)
42		do_align = filp || (flags & MAP_SHARED);
43
44	/*
45	 * We enforce the MAP_FIXED case.
46	 */
47	if (flags & MAP_FIXED) {
48		if (aliasing && flags & MAP_SHARED &&
49		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
50			return -EINVAL;
51		return addr;
52	}
53
54	if (len > TASK_SIZE)
55		return -ENOMEM;
56
57	if (addr) {
58		if (do_align)
59			addr = COLOUR_ALIGN(addr, pgoff);
60		else
61			addr = PAGE_ALIGN(addr);
62
63		vma = find_vma(mm, addr);
64		if (TASK_SIZE - len >= addr &&
65		    (!vma || addr + len <= vm_start_gap(vma)))
66			return addr;
67	}
68
69	info.flags = 0;
70	info.length = len;
71	info.low_limit = mm->mmap_base;
72	info.high_limit = TASK_SIZE;
73	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
74	info.align_offset = pgoff << PAGE_SHIFT;
75	return vm_unmapped_area(&info);
76}
v6.13.7
 1// SPDX-License-Identifier: GPL-2.0-only
 2/*
 3 * ARC700 mmap
 4 *
 5 * (started from arm version - for VIPT alias handling)
 6 *
 7 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
 8 */
 9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/mman.h>
13#include <linux/sched/mm.h>
14
15#include <asm/cacheflush.h>
16
 
 
 
 
17/*
18 * Ensure that shared mappings are correctly aligned to
19 * avoid aliasing issues with VIPT caches.
20 * We need to ensure that
21 * a specific page of an object is always mapped at a multiple of
22 * SHMLBA bytes.
23 */
24unsigned long
25arch_get_unmapped_area(struct file *filp, unsigned long addr,
26		unsigned long len, unsigned long pgoff,
27		unsigned long flags, vm_flags_t vm_flags)
28{
29	struct mm_struct *mm = current->mm;
30	struct vm_area_struct *vma;
31	struct vm_unmapped_area_info info = {};
 
 
 
 
 
 
 
 
32
33	/*
34	 * We enforce the MAP_FIXED case.
35	 */
36	if (flags & MAP_FIXED) {
37		if (flags & MAP_SHARED &&
38		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
39			return -EINVAL;
40		return addr;
41	}
42
43	if (len > TASK_SIZE)
44		return -ENOMEM;
45
46	if (addr) {
47		addr = PAGE_ALIGN(addr);
 
 
 
48
49		vma = find_vma(mm, addr);
50		if (TASK_SIZE - len >= addr &&
51		    (!vma || addr + len <= vm_start_gap(vma)))
52			return addr;
53	}
54
 
55	info.length = len;
56	info.low_limit = mm->mmap_base;
57	info.high_limit = TASK_SIZE;
 
58	info.align_offset = pgoff << PAGE_SHIFT;
59	return vm_unmapped_area(&info);
60}
61
62static const pgprot_t protection_map[16] = {
63	[VM_NONE]					= PAGE_U_NONE,
64	[VM_READ]					= PAGE_U_R,
65	[VM_WRITE]					= PAGE_U_R,
66	[VM_WRITE | VM_READ]				= PAGE_U_R,
67	[VM_EXEC]					= PAGE_U_X_R,
68	[VM_EXEC | VM_READ]				= PAGE_U_X_R,
69	[VM_EXEC | VM_WRITE]				= PAGE_U_X_R,
70	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_U_X_R,
71	[VM_SHARED]					= PAGE_U_NONE,
72	[VM_SHARED | VM_READ]				= PAGE_U_R,
73	[VM_SHARED | VM_WRITE]				= PAGE_U_W_R,
74	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_U_W_R,
75	[VM_SHARED | VM_EXEC]				= PAGE_U_X_R,
76	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_U_X_R,
77	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_U_X_W_R,
78	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_U_X_W_R
79};
80DECLARE_VM_GET_PAGE_PROT