Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/smp.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/slab.h>
16#include <linux/user.h>
17#include <linux/elf.h>
18#include <linux/security.h>
19#include <linux/memblock.h>
20#include <linux/syscalls.h>
21#include <linux/time_namespace.h>
22#include <vdso/datapage.h>
23
24#include <asm/syscall.h>
25#include <asm/processor.h>
26#include <asm/mmu.h>
27#include <asm/mmu_context.h>
28#include <asm/machdep.h>
29#include <asm/cputable.h>
30#include <asm/sections.h>
31#include <asm/firmware.h>
32#include <asm/vdso.h>
33#include <asm/vdso_datapage.h>
34#include <asm/setup.h>
35
36/* The alignment of the vDSO */
37#define VDSO_ALIGNMENT (1 << 16)
38
39extern char vdso32_start, vdso32_end;
40extern char vdso64_start, vdso64_end;
41
42long sys_ni_syscall(void);
43
44/*
45 * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
46 * Once the early boot kernel code no longer needs to muck around
47 * with it, it will become dynamically allocated
48 */
49static union {
50 struct vdso_arch_data data;
51 u8 page[PAGE_SIZE];
52} vdso_data_store __page_aligned_data;
53struct vdso_arch_data *vdso_data = &vdso_data_store.data;
54
55enum vvar_pages {
56 VVAR_DATA_PAGE_OFFSET,
57 VVAR_TIMENS_PAGE_OFFSET,
58 VVAR_NR_PAGES,
59};
60
61static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
62 unsigned long text_size)
63{
64 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
65
66 if (new_size != text_size)
67 return -EINVAL;
68
69 current->mm->context.vdso = (void __user *)new_vma->vm_start;
70
71 return 0;
72}
73
74static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
75{
76 return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
77}
78
79static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
80{
81 return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
82}
83
84static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
85 struct vm_area_struct *vma, struct vm_fault *vmf);
86
87static struct vm_special_mapping vvar_spec __ro_after_init = {
88 .name = "[vvar]",
89 .fault = vvar_fault,
90};
91
92static struct vm_special_mapping vdso32_spec __ro_after_init = {
93 .name = "[vdso]",
94 .mremap = vdso32_mremap,
95};
96
97static struct vm_special_mapping vdso64_spec __ro_after_init = {
98 .name = "[vdso]",
99 .mremap = vdso64_mremap,
100};
101
102#ifdef CONFIG_TIME_NS
103struct vdso_data *arch_get_vdso_data(void *vvar_page)
104{
105 return ((struct vdso_arch_data *)vvar_page)->data;
106}
107
108/*
109 * The vvar mapping contains data for a specific time namespace, so when a task
110 * changes namespace we must unmap its vvar data for the old namespace.
111 * Subsequent faults will map in data for the new namespace.
112 *
113 * For more details see timens_setup_vdso_data().
114 */
115int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
116{
117 struct mm_struct *mm = task->mm;
118 VMA_ITERATOR(vmi, mm, 0);
119 struct vm_area_struct *vma;
120
121 mmap_read_lock(mm);
122 for_each_vma(vmi, vma) {
123 unsigned long size = vma->vm_end - vma->vm_start;
124
125 if (vma_is_special_mapping(vma, &vvar_spec))
126 zap_page_range(vma, vma->vm_start, size);
127 }
128 mmap_read_unlock(mm);
129
130 return 0;
131}
132#endif
133
134static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
135 struct vm_area_struct *vma, struct vm_fault *vmf)
136{
137 struct page *timens_page = find_timens_vvar_page(vma);
138 unsigned long pfn;
139
140 switch (vmf->pgoff) {
141 case VVAR_DATA_PAGE_OFFSET:
142 if (timens_page)
143 pfn = page_to_pfn(timens_page);
144 else
145 pfn = virt_to_pfn(vdso_data);
146 break;
147#ifdef CONFIG_TIME_NS
148 case VVAR_TIMENS_PAGE_OFFSET:
149 /*
150 * If a task belongs to a time namespace then a namespace
151 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
152 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
153 * offset.
154 * See also the comment near timens_setup_vdso_data().
155 */
156 if (!timens_page)
157 return VM_FAULT_SIGBUS;
158 pfn = virt_to_pfn(vdso_data);
159 break;
160#endif /* CONFIG_TIME_NS */
161 default:
162 return VM_FAULT_SIGBUS;
163 }
164
165 return vmf_insert_pfn(vma, vmf->address, pfn);
166}
167
168/*
169 * This is called from binfmt_elf, we create the special vma for the
170 * vDSO and insert it into the mm struct tree
171 */
172static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
173{
174 unsigned long vdso_size, vdso_base, mappings_size;
175 struct vm_special_mapping *vdso_spec;
176 unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
177 struct mm_struct *mm = current->mm;
178 struct vm_area_struct *vma;
179
180 if (is_32bit_task()) {
181 vdso_spec = &vdso32_spec;
182 vdso_size = &vdso32_end - &vdso32_start;
183 } else {
184 vdso_spec = &vdso64_spec;
185 vdso_size = &vdso64_end - &vdso64_start;
186 }
187
188 mappings_size = vdso_size + vvar_size;
189 mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
190
191 /*
192 * Pick a base address for the vDSO in process space.
193 * Add enough to the size so that the result can be aligned.
194 */
195 vdso_base = get_unmapped_area(NULL, 0, mappings_size, 0, 0);
196 if (IS_ERR_VALUE(vdso_base))
197 return vdso_base;
198
199 /* Add required alignment. */
200 vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
201
202 /*
203 * Put vDSO base into mm struct. We need to do this before calling
204 * install_special_mapping or the perf counter mmap tracking code
205 * will fail to recognise it as a vDSO.
206 */
207 mm->context.vdso = (void __user *)vdso_base + vvar_size;
208
209 vma = _install_special_mapping(mm, vdso_base, vvar_size,
210 VM_READ | VM_MAYREAD | VM_IO |
211 VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
212 if (IS_ERR(vma))
213 return PTR_ERR(vma);
214
215 /*
216 * our vma flags don't have VM_WRITE so by default, the process isn't
217 * allowed to write those pages.
218 * gdb can break that with ptrace interface, and thus trigger COW on
219 * those pages but it's then your responsibility to never do that on
220 * the "data" page of the vDSO or you'll stop getting kernel updates
221 * and your nice userland gettimeofday will be totally dead.
222 * It's fine to use that for setting breakpoints in the vDSO code
223 * pages though.
224 */
225 vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
226 VM_READ | VM_EXEC | VM_MAYREAD |
227 VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
228 if (IS_ERR(vma))
229 do_munmap(mm, vdso_base, vvar_size, NULL);
230
231 return PTR_ERR_OR_ZERO(vma);
232}
233
234int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
235{
236 struct mm_struct *mm = current->mm;
237 int rc;
238
239 mm->context.vdso = NULL;
240
241 if (mmap_write_lock_killable(mm))
242 return -EINTR;
243
244 rc = __arch_setup_additional_pages(bprm, uses_interp);
245 if (rc)
246 mm->context.vdso = NULL;
247
248 mmap_write_unlock(mm);
249 return rc;
250}
251
252#define VDSO_DO_FIXUPS(type, value, bits, sec) do { \
253 void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start); \
254 void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end); \
255 \
256 do_##type##_fixups((value), __start, __end); \
257} while (0)
258
259static void __init vdso_fixup_features(void)
260{
261#ifdef CONFIG_PPC64
262 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
263 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
264 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
265 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
266#endif /* CONFIG_PPC64 */
267
268#ifdef CONFIG_VDSO32
269 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
270 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
271#ifdef CONFIG_PPC64
272 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
273#endif /* CONFIG_PPC64 */
274 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
275#endif
276}
277
278/*
279 * Called from setup_arch to initialize the bitmap of available
280 * syscalls in the systemcfg page
281 */
282static void __init vdso_setup_syscall_map(void)
283{
284 unsigned int i;
285
286 for (i = 0; i < NR_syscalls; i++) {
287 if (sys_call_table[i] != (void *)&sys_ni_syscall)
288 vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
289 if (IS_ENABLED(CONFIG_COMPAT) &&
290 compat_sys_call_table[i] != (void *)&sys_ni_syscall)
291 vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
292 }
293}
294
295#ifdef CONFIG_PPC64
296int vdso_getcpu_init(void)
297{
298 unsigned long cpu, node, val;
299
300 /*
301 * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
302 * in the next 16 bits. The VDSO uses this to implement getcpu().
303 */
304 cpu = get_cpu();
305 WARN_ON_ONCE(cpu > 0xffff);
306
307 node = cpu_to_node(cpu);
308 WARN_ON_ONCE(node > 0xffff);
309
310 val = (cpu & 0xffff) | ((node & 0xffff) << 16);
311 mtspr(SPRN_SPRG_VDSO_WRITE, val);
312 get_paca()->sprg_vdso = val;
313
314 put_cpu();
315
316 return 0;
317}
318/* We need to call this before SMP init */
319early_initcall(vdso_getcpu_init);
320#endif
321
322static struct page ** __init vdso_setup_pages(void *start, void *end)
323{
324 int i;
325 struct page **pagelist;
326 int pages = (end - start) >> PAGE_SHIFT;
327
328 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
329 if (!pagelist)
330 panic("%s: Cannot allocate page list for VDSO", __func__);
331
332 for (i = 0; i < pages; i++)
333 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
334
335 return pagelist;
336}
337
338static int __init vdso_init(void)
339{
340#ifdef CONFIG_PPC64
341 /*
342 * Fill up the "systemcfg" stuff for backward compatibility
343 */
344 strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
345 vdso_data->version.major = SYSTEMCFG_MAJOR;
346 vdso_data->version.minor = SYSTEMCFG_MINOR;
347 vdso_data->processor = mfspr(SPRN_PVR);
348 /*
349 * Fake the old platform number for pSeries and add
350 * in LPAR bit if necessary
351 */
352 vdso_data->platform = 0x100;
353 if (firmware_has_feature(FW_FEATURE_LPAR))
354 vdso_data->platform |= 1;
355 vdso_data->physicalMemorySize = memblock_phys_mem_size();
356 vdso_data->dcache_size = ppc64_caches.l1d.size;
357 vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
358 vdso_data->icache_size = ppc64_caches.l1i.size;
359 vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
360 vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
361 vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
362 vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
363 vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
364#endif /* CONFIG_PPC64 */
365
366 vdso_setup_syscall_map();
367
368 vdso_fixup_features();
369
370 if (IS_ENABLED(CONFIG_VDSO32))
371 vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
372
373 if (IS_ENABLED(CONFIG_PPC64))
374 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
375
376 smp_wmb();
377
378 return 0;
379}
380arch_initcall(vdso_init);
1
2/*
3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
4 * <benh@kernel.crashing.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/slab.h>
20#include <linux/user.h>
21#include <linux/elf.h>
22#include <linux/security.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h>
25
26#include <asm/pgtable.h>
27#include <asm/processor.h>
28#include <asm/mmu.h>
29#include <asm/mmu_context.h>
30#include <asm/prom.h>
31#include <asm/machdep.h>
32#include <asm/cputable.h>
33#include <asm/sections.h>
34#include <asm/firmware.h>
35#include <asm/vdso.h>
36#include <asm/vdso_datapage.h>
37
38#include "setup.h"
39
40#undef DEBUG
41
42#ifdef DEBUG
43#define DBG(fmt...) printk(fmt)
44#else
45#define DBG(fmt...)
46#endif
47
48/* Max supported size for symbol names */
49#define MAX_SYMNAME 64
50
51/* The alignment of the vDSO */
52#define VDSO_ALIGNMENT (1 << 16)
53
54extern char vdso32_start, vdso32_end;
55static void *vdso32_kbase = &vdso32_start;
56static unsigned int vdso32_pages;
57static struct page **vdso32_pagelist;
58unsigned long vdso32_sigtramp;
59unsigned long vdso32_rt_sigtramp;
60
61#ifdef CONFIG_PPC64
62extern char vdso64_start, vdso64_end;
63static void *vdso64_kbase = &vdso64_start;
64static unsigned int vdso64_pages;
65static struct page **vdso64_pagelist;
66unsigned long vdso64_rt_sigtramp;
67#endif /* CONFIG_PPC64 */
68
69static int vdso_ready;
70
71/*
72 * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
73 * Once the early boot kernel code no longer needs to muck around
74 * with it, it will become dynamically allocated
75 */
76static union {
77 struct vdso_data data;
78 u8 page[PAGE_SIZE];
79} vdso_data_store __page_aligned_data;
80struct vdso_data *vdso_data = &vdso_data_store.data;
81
82/* Format of the patch table */
83struct vdso_patch_def
84{
85 unsigned long ftr_mask, ftr_value;
86 const char *gen_name;
87 const char *fix_name;
88};
89
90/* Table of functions to patch based on the CPU type/revision
91 *
92 * Currently, we only change sync_dicache to do nothing on processors
93 * with a coherent icache
94 */
95static struct vdso_patch_def vdso_patches[] = {
96 {
97 CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE,
98 "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
99 },
100 {
101 CPU_FTR_USE_TB, 0,
102 "__kernel_gettimeofday", NULL
103 },
104 {
105 CPU_FTR_USE_TB, 0,
106 "__kernel_clock_gettime", NULL
107 },
108 {
109 CPU_FTR_USE_TB, 0,
110 "__kernel_clock_getres", NULL
111 },
112 {
113 CPU_FTR_USE_TB, 0,
114 "__kernel_get_tbfreq", NULL
115 },
116};
117
118/*
119 * Some infos carried around for each of them during parsing at
120 * boot time.
121 */
122struct lib32_elfinfo
123{
124 Elf32_Ehdr *hdr; /* ptr to ELF */
125 Elf32_Sym *dynsym; /* ptr to .dynsym section */
126 unsigned long dynsymsize; /* size of .dynsym section */
127 char *dynstr; /* ptr to .dynstr section */
128 unsigned long text; /* offset of .text section in .so */
129};
130
131struct lib64_elfinfo
132{
133 Elf64_Ehdr *hdr;
134 Elf64_Sym *dynsym;
135 unsigned long dynsymsize;
136 char *dynstr;
137 unsigned long text;
138};
139
140
141#ifdef __DEBUG
142static void dump_one_vdso_page(struct page *pg, struct page *upg)
143{
144 printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT),
145 page_count(pg),
146 pg->flags);
147 if (upg && !IS_ERR(upg) /* && pg != upg*/) {
148 printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg)
149 << PAGE_SHIFT),
150 page_count(upg),
151 upg->flags);
152 }
153 printk("\n");
154}
155
156static void dump_vdso_pages(struct vm_area_struct * vma)
157{
158 int i;
159
160 if (!vma || is_32bit_task()) {
161 printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
162 for (i=0; i<vdso32_pages; i++) {
163 struct page *pg = virt_to_page(vdso32_kbase +
164 i*PAGE_SIZE);
165 struct page *upg = (vma && vma->vm_mm) ?
166 follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
167 : NULL;
168 dump_one_vdso_page(pg, upg);
169 }
170 }
171 if (!vma || !is_32bit_task()) {
172 printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
173 for (i=0; i<vdso64_pages; i++) {
174 struct page *pg = virt_to_page(vdso64_kbase +
175 i*PAGE_SIZE);
176 struct page *upg = (vma && vma->vm_mm) ?
177 follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0)
178 : NULL;
179 dump_one_vdso_page(pg, upg);
180 }
181 }
182}
183#endif /* DEBUG */
184
185/*
186 * This is called from binfmt_elf, we create the special vma for the
187 * vDSO and insert it into the mm struct tree
188 */
189int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
190{
191 struct mm_struct *mm = current->mm;
192 struct page **vdso_pagelist;
193 unsigned long vdso_pages;
194 unsigned long vdso_base;
195 int rc;
196
197 if (!vdso_ready)
198 return 0;
199
200#ifdef CONFIG_PPC64
201 if (is_32bit_task()) {
202 vdso_pagelist = vdso32_pagelist;
203 vdso_pages = vdso32_pages;
204 vdso_base = VDSO32_MBASE;
205 } else {
206 vdso_pagelist = vdso64_pagelist;
207 vdso_pages = vdso64_pages;
208 /*
209 * On 64bit we don't have a preferred map address. This
210 * allows get_unmapped_area to find an area near other mmaps
211 * and most likely share a SLB entry.
212 */
213 vdso_base = 0;
214 }
215#else
216 vdso_pagelist = vdso32_pagelist;
217 vdso_pages = vdso32_pages;
218 vdso_base = VDSO32_MBASE;
219#endif
220
221 current->mm->context.vdso_base = 0;
222
223 /* vDSO has a problem and was disabled, just don't "enable" it for the
224 * process
225 */
226 if (vdso_pages == 0)
227 return 0;
228 /* Add a page to the vdso size for the data page */
229 vdso_pages ++;
230
231 /*
232 * pick a base address for the vDSO in process space. We try to put it
233 * at vdso_base which is the "natural" base for it, but we might fail
234 * and end up putting it elsewhere.
235 * Add enough to the size so that the result can be aligned.
236 */
237 down_write(&mm->mmap_sem);
238 vdso_base = get_unmapped_area(NULL, vdso_base,
239 (vdso_pages << PAGE_SHIFT) +
240 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
241 0, 0);
242 if (IS_ERR_VALUE(vdso_base)) {
243 rc = vdso_base;
244 goto fail_mmapsem;
245 }
246
247 /* Add required alignment. */
248 vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
249
250 /*
251 * Put vDSO base into mm struct. We need to do this before calling
252 * install_special_mapping or the perf counter mmap tracking code
253 * will fail to recognise it as a vDSO (since arch_vma_name fails).
254 */
255 current->mm->context.vdso_base = vdso_base;
256
257 /*
258 * our vma flags don't have VM_WRITE so by default, the process isn't
259 * allowed to write those pages.
260 * gdb can break that with ptrace interface, and thus trigger COW on
261 * those pages but it's then your responsibility to never do that on
262 * the "data" page of the vDSO or you'll stop getting kernel updates
263 * and your nice userland gettimeofday will be totally dead.
264 * It's fine to use that for setting breakpoints in the vDSO code
265 * pages though.
266 */
267 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
268 VM_READ|VM_EXEC|
269 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
270 vdso_pagelist);
271 if (rc) {
272 current->mm->context.vdso_base = 0;
273 goto fail_mmapsem;
274 }
275
276 up_write(&mm->mmap_sem);
277 return 0;
278
279 fail_mmapsem:
280 up_write(&mm->mmap_sem);
281 return rc;
282}
283
284const char *arch_vma_name(struct vm_area_struct *vma)
285{
286 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
287 return "[vdso]";
288 return NULL;
289}
290
291
292
293static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
294 unsigned long *size)
295{
296 Elf32_Shdr *sechdrs;
297 unsigned int i;
298 char *secnames;
299
300 /* Grab section headers and strings so we can tell who is who */
301 sechdrs = (void *)ehdr + ehdr->e_shoff;
302 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
303
304 /* Find the section they want */
305 for (i = 1; i < ehdr->e_shnum; i++) {
306 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
307 if (size)
308 *size = sechdrs[i].sh_size;
309 return (void *)ehdr + sechdrs[i].sh_offset;
310 }
311 }
312 *size = 0;
313 return NULL;
314}
315
316static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib,
317 const char *symname)
318{
319 unsigned int i;
320 char name[MAX_SYMNAME], *c;
321
322 for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
323 if (lib->dynsym[i].st_name == 0)
324 continue;
325 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
326 MAX_SYMNAME);
327 c = strchr(name, '@');
328 if (c)
329 *c = 0;
330 if (strcmp(symname, name) == 0)
331 return &lib->dynsym[i];
332 }
333 return NULL;
334}
335
336/* Note that we assume the section is .text and the symbol is relative to
337 * the library base
338 */
339static unsigned long __init find_function32(struct lib32_elfinfo *lib,
340 const char *symname)
341{
342 Elf32_Sym *sym = find_symbol32(lib, symname);
343
344 if (sym == NULL) {
345 printk(KERN_WARNING "vDSO32: function %s not found !\n",
346 symname);
347 return 0;
348 }
349 return sym->st_value - VDSO32_LBASE;
350}
351
352static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
353 struct lib64_elfinfo *v64,
354 const char *orig, const char *fix)
355{
356 Elf32_Sym *sym32_gen, *sym32_fix;
357
358 sym32_gen = find_symbol32(v32, orig);
359 if (sym32_gen == NULL) {
360 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
361 return -1;
362 }
363 if (fix == NULL) {
364 sym32_gen->st_name = 0;
365 return 0;
366 }
367 sym32_fix = find_symbol32(v32, fix);
368 if (sym32_fix == NULL) {
369 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
370 return -1;
371 }
372 sym32_gen->st_value = sym32_fix->st_value;
373 sym32_gen->st_size = sym32_fix->st_size;
374 sym32_gen->st_info = sym32_fix->st_info;
375 sym32_gen->st_other = sym32_fix->st_other;
376 sym32_gen->st_shndx = sym32_fix->st_shndx;
377
378 return 0;
379}
380
381
382#ifdef CONFIG_PPC64
383
384static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
385 unsigned long *size)
386{
387 Elf64_Shdr *sechdrs;
388 unsigned int i;
389 char *secnames;
390
391 /* Grab section headers and strings so we can tell who is who */
392 sechdrs = (void *)ehdr + ehdr->e_shoff;
393 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
394
395 /* Find the section they want */
396 for (i = 1; i < ehdr->e_shnum; i++) {
397 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
398 if (size)
399 *size = sechdrs[i].sh_size;
400 return (void *)ehdr + sechdrs[i].sh_offset;
401 }
402 }
403 if (size)
404 *size = 0;
405 return NULL;
406}
407
408static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib,
409 const char *symname)
410{
411 unsigned int i;
412 char name[MAX_SYMNAME], *c;
413
414 for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
415 if (lib->dynsym[i].st_name == 0)
416 continue;
417 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
418 MAX_SYMNAME);
419 c = strchr(name, '@');
420 if (c)
421 *c = 0;
422 if (strcmp(symname, name) == 0)
423 return &lib->dynsym[i];
424 }
425 return NULL;
426}
427
428/* Note that we assume the section is .text and the symbol is relative to
429 * the library base
430 */
431static unsigned long __init find_function64(struct lib64_elfinfo *lib,
432 const char *symname)
433{
434 Elf64_Sym *sym = find_symbol64(lib, symname);
435
436 if (sym == NULL) {
437 printk(KERN_WARNING "vDSO64: function %s not found !\n",
438 symname);
439 return 0;
440 }
441#ifdef VDS64_HAS_DESCRIPTORS
442 return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) -
443 VDSO64_LBASE;
444#else
445 return sym->st_value - VDSO64_LBASE;
446#endif
447}
448
449static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
450 struct lib64_elfinfo *v64,
451 const char *orig, const char *fix)
452{
453 Elf64_Sym *sym64_gen, *sym64_fix;
454
455 sym64_gen = find_symbol64(v64, orig);
456 if (sym64_gen == NULL) {
457 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
458 return -1;
459 }
460 if (fix == NULL) {
461 sym64_gen->st_name = 0;
462 return 0;
463 }
464 sym64_fix = find_symbol64(v64, fix);
465 if (sym64_fix == NULL) {
466 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
467 return -1;
468 }
469 sym64_gen->st_value = sym64_fix->st_value;
470 sym64_gen->st_size = sym64_fix->st_size;
471 sym64_gen->st_info = sym64_fix->st_info;
472 sym64_gen->st_other = sym64_fix->st_other;
473 sym64_gen->st_shndx = sym64_fix->st_shndx;
474
475 return 0;
476}
477
478#endif /* CONFIG_PPC64 */
479
480
481static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
482 struct lib64_elfinfo *v64)
483{
484 void *sect;
485
486 /*
487 * Locate symbol tables & text section
488 */
489
490 v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
491 v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
492 if (v32->dynsym == NULL || v32->dynstr == NULL) {
493 printk(KERN_ERR "vDSO32: required symbol section not found\n");
494 return -1;
495 }
496 sect = find_section32(v32->hdr, ".text", NULL);
497 if (sect == NULL) {
498 printk(KERN_ERR "vDSO32: the .text section was not found\n");
499 return -1;
500 }
501 v32->text = sect - vdso32_kbase;
502
503#ifdef CONFIG_PPC64
504 v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
505 v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
506 if (v64->dynsym == NULL || v64->dynstr == NULL) {
507 printk(KERN_ERR "vDSO64: required symbol section not found\n");
508 return -1;
509 }
510 sect = find_section64(v64->hdr, ".text", NULL);
511 if (sect == NULL) {
512 printk(KERN_ERR "vDSO64: the .text section was not found\n");
513 return -1;
514 }
515 v64->text = sect - vdso64_kbase;
516#endif /* CONFIG_PPC64 */
517
518 return 0;
519}
520
521static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
522 struct lib64_elfinfo *v64)
523{
524 /*
525 * Find signal trampolines
526 */
527
528#ifdef CONFIG_PPC64
529 vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64");
530#endif
531 vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32");
532 vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32");
533}
534
535static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
536 struct lib64_elfinfo *v64)
537{
538 Elf32_Sym *sym32;
539#ifdef CONFIG_PPC64
540 Elf64_Sym *sym64;
541
542 sym64 = find_symbol64(v64, "__kernel_datapage_offset");
543 if (sym64 == NULL) {
544 printk(KERN_ERR "vDSO64: Can't find symbol "
545 "__kernel_datapage_offset !\n");
546 return -1;
547 }
548 *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
549 (vdso64_pages << PAGE_SHIFT) -
550 (sym64->st_value - VDSO64_LBASE);
551#endif /* CONFIG_PPC64 */
552
553 sym32 = find_symbol32(v32, "__kernel_datapage_offset");
554 if (sym32 == NULL) {
555 printk(KERN_ERR "vDSO32: Can't find symbol "
556 "__kernel_datapage_offset !\n");
557 return -1;
558 }
559 *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
560 (vdso32_pages << PAGE_SHIFT) -
561 (sym32->st_value - VDSO32_LBASE);
562
563 return 0;
564}
565
566
567static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
568 struct lib64_elfinfo *v64)
569{
570 void *start32;
571 unsigned long size32;
572
573#ifdef CONFIG_PPC64
574 void *start64;
575 unsigned long size64;
576
577 start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
578 if (start64)
579 do_feature_fixups(cur_cpu_spec->cpu_features,
580 start64, start64 + size64);
581
582 start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64);
583 if (start64)
584 do_feature_fixups(cur_cpu_spec->mmu_features,
585 start64, start64 + size64);
586
587 start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
588 if (start64)
589 do_feature_fixups(powerpc_firmware_features,
590 start64, start64 + size64);
591
592 start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64);
593 if (start64)
594 do_lwsync_fixups(cur_cpu_spec->cpu_features,
595 start64, start64 + size64);
596#endif /* CONFIG_PPC64 */
597
598 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
599 if (start32)
600 do_feature_fixups(cur_cpu_spec->cpu_features,
601 start32, start32 + size32);
602
603 start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32);
604 if (start32)
605 do_feature_fixups(cur_cpu_spec->mmu_features,
606 start32, start32 + size32);
607
608#ifdef CONFIG_PPC64
609 start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
610 if (start32)
611 do_feature_fixups(powerpc_firmware_features,
612 start32, start32 + size32);
613#endif /* CONFIG_PPC64 */
614
615 start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32);
616 if (start32)
617 do_lwsync_fixups(cur_cpu_spec->cpu_features,
618 start32, start32 + size32);
619
620 return 0;
621}
622
623static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
624 struct lib64_elfinfo *v64)
625{
626 int i;
627
628 for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
629 struct vdso_patch_def *patch = &vdso_patches[i];
630 int match = (cur_cpu_spec->cpu_features & patch->ftr_mask)
631 == patch->ftr_value;
632 if (!match)
633 continue;
634
635 DBG("replacing %s with %s...\n", patch->gen_name,
636 patch->fix_name ? "NONE" : patch->fix_name);
637
638 /*
639 * Patch the 32 bits and 64 bits symbols. Note that we do not
640 * patch the "." symbol on 64 bits.
641 * It would be easy to do, but doesn't seem to be necessary,
642 * patching the OPD symbol is enough.
643 */
644 vdso_do_func_patch32(v32, v64, patch->gen_name,
645 patch->fix_name);
646#ifdef CONFIG_PPC64
647 vdso_do_func_patch64(v32, v64, patch->gen_name,
648 patch->fix_name);
649#endif /* CONFIG_PPC64 */
650 }
651
652 return 0;
653}
654
655
656static __init int vdso_setup(void)
657{
658 struct lib32_elfinfo v32;
659 struct lib64_elfinfo v64;
660
661 v32.hdr = vdso32_kbase;
662#ifdef CONFIG_PPC64
663 v64.hdr = vdso64_kbase;
664#endif
665 if (vdso_do_find_sections(&v32, &v64))
666 return -1;
667
668 if (vdso_fixup_datapage(&v32, &v64))
669 return -1;
670
671 if (vdso_fixup_features(&v32, &v64))
672 return -1;
673
674 if (vdso_fixup_alt_funcs(&v32, &v64))
675 return -1;
676
677 vdso_setup_trampolines(&v32, &v64);
678
679 return 0;
680}
681
682/*
683 * Called from setup_arch to initialize the bitmap of available
684 * syscalls in the systemcfg page
685 */
686static void __init vdso_setup_syscall_map(void)
687{
688 unsigned int i;
689 extern unsigned long *sys_call_table;
690 extern unsigned long sys_ni_syscall;
691
692
693 for (i = 0; i < __NR_syscalls; i++) {
694#ifdef CONFIG_PPC64
695 if (sys_call_table[i*2] != sys_ni_syscall)
696 vdso_data->syscall_map_64[i >> 5] |=
697 0x80000000UL >> (i & 0x1f);
698 if (sys_call_table[i*2+1] != sys_ni_syscall)
699 vdso_data->syscall_map_32[i >> 5] |=
700 0x80000000UL >> (i & 0x1f);
701#else /* CONFIG_PPC64 */
702 if (sys_call_table[i] != sys_ni_syscall)
703 vdso_data->syscall_map_32[i >> 5] |=
704 0x80000000UL >> (i & 0x1f);
705#endif /* CONFIG_PPC64 */
706 }
707}
708
709
710static int __init vdso_init(void)
711{
712 int i;
713
714#ifdef CONFIG_PPC64
715 /*
716 * Fill up the "systemcfg" stuff for backward compatibility
717 */
718 strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
719 vdso_data->version.major = SYSTEMCFG_MAJOR;
720 vdso_data->version.minor = SYSTEMCFG_MINOR;
721 vdso_data->processor = mfspr(SPRN_PVR);
722 /*
723 * Fake the old platform number for pSeries and add
724 * in LPAR bit if necessary
725 */
726 vdso_data->platform = 0x100;
727 if (firmware_has_feature(FW_FEATURE_LPAR))
728 vdso_data->platform |= 1;
729 vdso_data->physicalMemorySize = memblock_phys_mem_size();
730 vdso_data->dcache_size = ppc64_caches.dsize;
731 vdso_data->dcache_line_size = ppc64_caches.dline_size;
732 vdso_data->icache_size = ppc64_caches.isize;
733 vdso_data->icache_line_size = ppc64_caches.iline_size;
734
735 /* XXXOJN: Blocks should be added to ppc64_caches and used instead */
736 vdso_data->dcache_block_size = ppc64_caches.dline_size;
737 vdso_data->icache_block_size = ppc64_caches.iline_size;
738 vdso_data->dcache_log_block_size = ppc64_caches.log_dline_size;
739 vdso_data->icache_log_block_size = ppc64_caches.log_iline_size;
740
741 /*
742 * Calculate the size of the 64 bits vDSO
743 */
744 vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
745 DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages);
746#else
747 vdso_data->dcache_block_size = L1_CACHE_BYTES;
748 vdso_data->dcache_log_block_size = L1_CACHE_SHIFT;
749 vdso_data->icache_block_size = L1_CACHE_BYTES;
750 vdso_data->icache_log_block_size = L1_CACHE_SHIFT;
751#endif /* CONFIG_PPC64 */
752
753
754 /*
755 * Calculate the size of the 32 bits vDSO
756 */
757 vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
758 DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages);
759
760
761 /*
762 * Setup the syscall map in the vDOS
763 */
764 vdso_setup_syscall_map();
765
766 /*
767 * Initialize the vDSO images in memory, that is do necessary
768 * fixups of vDSO symbols, locate trampolines, etc...
769 */
770 if (vdso_setup()) {
771 printk(KERN_ERR "vDSO setup failure, not enabled !\n");
772 vdso32_pages = 0;
773#ifdef CONFIG_PPC64
774 vdso64_pages = 0;
775#endif
776 return 0;
777 }
778
779 /* Make sure pages are in the correct state */
780 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2),
781 GFP_KERNEL);
782 BUG_ON(vdso32_pagelist == NULL);
783 for (i = 0; i < vdso32_pages; i++) {
784 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
785 ClearPageReserved(pg);
786 get_page(pg);
787 vdso32_pagelist[i] = pg;
788 }
789 vdso32_pagelist[i++] = virt_to_page(vdso_data);
790 vdso32_pagelist[i] = NULL;
791
792#ifdef CONFIG_PPC64
793 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2),
794 GFP_KERNEL);
795 BUG_ON(vdso64_pagelist == NULL);
796 for (i = 0; i < vdso64_pages; i++) {
797 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
798 ClearPageReserved(pg);
799 get_page(pg);
800 vdso64_pagelist[i] = pg;
801 }
802 vdso64_pagelist[i++] = virt_to_page(vdso_data);
803 vdso64_pagelist[i] = NULL;
804#endif /* CONFIG_PPC64 */
805
806 get_page(virt_to_page(vdso_data));
807
808 smp_wmb();
809 vdso_ready = 1;
810
811 return 0;
812}
813arch_initcall(vdso_init);
814
815int in_gate_area_no_mm(unsigned long addr)
816{
817 return 0;
818}
819
820int in_gate_area(struct mm_struct *mm, unsigned long addr)
821{
822 return 0;
823}
824
825struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
826{
827 return NULL;
828}
829