Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | // SPDX-License-Identifier: GPL-2.0 /* * Author: Huacai Chen <chenhuacai@loongson.cn> * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include <linux/binfmts.h> #include <linux/elf.h> #include <linux/err.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/time_namespace.h> #include <linux/timekeeper_internal.h> #include <asm/page.h> #include <asm/vdso.h> #include <vdso/helpers.h> #include <vdso/vsyscall.h> #include <vdso/datapage.h> #include <generated/vdso-offsets.h> extern char vdso_start[], vdso_end[]; /* Kernel-provided data used by the VDSO. */ static union vdso_data_store generic_vdso_data __page_aligned_data; static union { u8 page[LOONGARCH_VDSO_DATA_SIZE]; struct loongarch_vdso_data vdata; } loongarch_vdso_data __page_aligned_data; static struct page *vdso_pages[] = { NULL }; struct vdso_data *vdso_data = generic_vdso_data.data; struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata; static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { current->mm->context.vdso = (void *)(new_vma->vm_start); return 0; } static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { unsigned long pfn; struct page *timens_page = find_timens_vvar_page(vma); switch (vmf->pgoff) { case VVAR_GENERIC_PAGE_OFFSET: if (!timens_page) pfn = sym_to_pfn(vdso_data); else pfn = page_to_pfn(timens_page); break; #ifdef CONFIG_TIME_NS case VVAR_TIMENS_PAGE_OFFSET: /* * If a task belongs to a time namespace then a namespace specific * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset. * See also the comment near timens_setup_vdso_data(). */ if (!timens_page) return VM_FAULT_SIGBUS; else pfn = sym_to_pfn(vdso_data); break; #endif /* CONFIG_TIME_NS */ case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END: pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START; break; default: return VM_FAULT_SIGBUS; } return vmf_insert_pfn(vma, vmf->address, pfn); } struct loongarch_vdso_info vdso_info = { .vdso = vdso_start, .size = PAGE_SIZE, .code_mapping = { .name = "[vdso]", .pages = vdso_pages, .mremap = vdso_mremap, }, .data_mapping = { .name = "[vvar]", .fault = vvar_fault, }, .offset_sigreturn = vdso_offset_sigreturn, }; static int __init init_vdso(void) { unsigned long i, cpu, pfn; BUG_ON(!PAGE_ALIGNED(vdso_info.vdso)); BUG_ON(!PAGE_ALIGNED(vdso_info.size)); for_each_possible_cpu(cpu) vdso_pdata[cpu].node = cpu_to_node(cpu); pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); for (i = 0; i < vdso_info.size / PAGE_SIZE; i++) vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i); return 0; } subsys_initcall(init_vdso); #ifdef CONFIG_TIME_NS struct vdso_data *arch_get_vdso_data(void *vvar_page) { return (struct vdso_data *)(vvar_page); } /* * The vvar mapping contains data for a specific time namespace, so when a * task changes namespace we must unmap its vvar data for the old namespace. * Subsequent faults will map in data for the new namespace. * * For more details see timens_setup_vdso_data(). */ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, &vdso_info.data_mapping)) zap_vma_pages(vma); } mmap_read_unlock(mm); return 0; } #endif static unsigned long vdso_base(void) { unsigned long base = STACK_TOP; if (current->flags & PF_RANDOMIZE) { base += get_random_u32_below(VDSO_RANDOMIZE_SIZE); base = PAGE_ALIGN(base); } return base; } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; unsigned long size, data_addr, vdso_addr; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct loongarch_vdso_info *info = current->thread.vdso; if (mmap_write_lock_killable(mm)) return -EINTR; /* * Determine total area size. This includes the VDSO data itself * and the data pages. */ size = VVAR_SIZE + info->size; data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0); if (IS_ERR_VALUE(data_addr)) { ret = data_addr; goto out; } vma = _install_special_mapping(mm, data_addr, VVAR_SIZE, VM_READ | VM_MAYREAD | VM_PFNMAP, &info->data_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } vdso_addr = data_addr + VVAR_SIZE; vma = _install_special_mapping(mm, vdso_addr, info->size, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, &info->code_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } mm->context.vdso = (void *)vdso_addr; ret = 0; out: mmap_write_unlock(mm); return ret; } |