Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
  3 * Licensed under the GPL
  4 */
  5
  6#include <linux/percpu.h>
  7#include <linux/sched.h>
  8#include <linux/syscalls.h>
  9#include <linux/uaccess.h>
 10#include <asm/ptrace-abi.h>
 11#include <os.h>
 12#include <skas.h>
 13#include <sysdep/tls.h>
 14#include <asm/desc.h>
 15
 16/*
 17 * If needed we can detect when it's uninitialized.
 18 *
 19 * These are initialized in an initcall and unchanged thereafter.
 20 */
 21static int host_supports_tls = -1;
 22int host_gdt_entry_tls_min;
 23
 24static int do_set_thread_area(struct user_desc *info)
 25{
 26	int ret;
 27	u32 cpu;
 28
 29	cpu = get_cpu();
 30	ret = os_set_thread_area(info, userspace_pid[cpu]);
 31	put_cpu();
 32
 33	if (ret)
 34		printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
 35		       "index = %d\n", ret, info->entry_number);
 36
 37	return ret;
 38}
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40/*
 41 * sys_get_thread_area: get a yet unused TLS descriptor index.
 42 * XXX: Consider leaving one free slot for glibc usage at first place. This must
 43 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
 44 *
 45 * Also, this must be tested when compiling in SKAS mode with dynamic linking
 46 * and running against NPTL.
 47 */
 48static int get_free_idx(struct task_struct* task)
 49{
 50	struct thread_struct *t = &task->thread;
 51	int idx;
 52
 
 
 
 53	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
 54		if (!t->arch.tls_array[idx].present)
 55			return idx + GDT_ENTRY_TLS_MIN;
 56	return -ESRCH;
 57}
 58
 59static inline void clear_user_desc(struct user_desc* info)
 60{
 61	/* Postcondition: LDT_empty(info) returns true. */
 62	memset(info, 0, sizeof(*info));
 63
 64	/*
 65	 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
 66	 * indeed an empty user_desc.
 67	 */
 68	info->read_exec_only = 1;
 69	info->seg_not_present = 1;
 70}
 71
 72#define O_FORCE 1
 73
 74static int load_TLS(int flags, struct task_struct *to)
 75{
 76	int ret = 0;
 77	int idx;
 78
 79	for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
 80		struct uml_tls_struct* curr =
 81			&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
 82
 83		/*
 84		 * Actually, now if it wasn't flushed it gets cleared and
 85		 * flushed to the host, which will clear it.
 86		 */
 87		if (!curr->present) {
 88			if (!curr->flushed) {
 89				clear_user_desc(&curr->tls);
 90				curr->tls.entry_number = idx;
 91			} else {
 92				WARN_ON(!LDT_empty(&curr->tls));
 93				continue;
 94			}
 95		}
 96
 97		if (!(flags & O_FORCE) && curr->flushed)
 98			continue;
 99
100		ret = do_set_thread_area(&curr->tls);
101		if (ret)
102			goto out;
103
104		curr->flushed = 1;
105	}
106out:
107	return ret;
108}
109
110/*
111 * Verify if we need to do a flush for the new process, i.e. if there are any
112 * present desc's, only if they haven't been flushed.
113 */
114static inline int needs_TLS_update(struct task_struct *task)
115{
116	int i;
117	int ret = 0;
118
119	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
120		struct uml_tls_struct* curr =
121			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
122
123		/*
124		 * Can't test curr->present, we may need to clear a descriptor
125		 * which had a value.
126		 */
127		if (curr->flushed)
128			continue;
129		ret = 1;
130		break;
131	}
132	return ret;
133}
134
135/*
136 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
137 * we mark them as such and the first switch_to will do the job.
138 */
139void clear_flushed_tls(struct task_struct *task)
140{
141	int i;
142
143	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
144		struct uml_tls_struct* curr =
145			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
146
147		/*
148		 * Still correct to do this, if it wasn't present on the host it
149		 * will remain as flushed as it was.
150		 */
151		if (!curr->present)
152			continue;
153
154		curr->flushed = 0;
155	}
156}
157
158/*
159 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
160 * common host process. So this is needed in SKAS0 too.
161 *
162 * However, if each thread had a different host process (and this was discussed
163 * for SMP support) this won't be needed.
164 *
165 * And this will not need be used when (and if) we'll add support to the host
166 * SKAS patch.
167 */
168
169int arch_switch_tls(struct task_struct *to)
170{
171	if (!host_supports_tls)
172		return 0;
173
174	/*
175	 * We have no need whatsoever to switch TLS for kernel threads; beyond
176	 * that, that would also result in us calling os_set_thread_area with
177	 * userspace_pid[cpu] == 0, which gives an error.
178	 */
179	if (likely(to->mm))
180		return load_TLS(O_FORCE, to);
181
182	return 0;
183}
184
185static int set_tls_entry(struct task_struct* task, struct user_desc *info,
186			 int idx, int flushed)
187{
188	struct thread_struct *t = &task->thread;
189
190	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
191		return -EINVAL;
192
193	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
194	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
195	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
196
197	return 0;
198}
199
200int arch_set_tls(struct task_struct *new, unsigned long tls)
201{
202	struct user_desc info;
203	int idx, ret = -EFAULT;
204
205	if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
 
 
206		goto out;
207
208	ret = -EINVAL;
209	if (LDT_empty(&info))
210		goto out;
211
212	idx = info.entry_number;
213
214	ret = set_tls_entry(new, &info, idx, 0);
215out:
216	return ret;
217}
218
 
219static int get_tls_entry(struct task_struct *task, struct user_desc *info,
220			 int idx)
221{
222	struct thread_struct *t = &task->thread;
 
 
 
223
224	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
225		return -EINVAL;
226
227	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
228		goto clear;
229
230	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
231
232out:
233	/*
234	 * Temporary debugging check, to make sure that things have been
235	 * flushed. This could be triggered if load_TLS() failed.
236	 */
237	if (unlikely(task == current &&
238		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
239		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
240				"without flushed TLS.", current->pid);
241	}
242
243	return 0;
244clear:
245	/*
246	 * When the TLS entry has not been set, the values read to user in the
247	 * tls_array are 0 (because it's cleared at boot, see
248	 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
249	 */
250	clear_user_desc(info);
251	info->entry_number = idx;
252	goto out;
253}
254
255SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
256{
257	struct user_desc info;
258	int idx, ret;
259
260	if (!host_supports_tls)
261		return -ENOSYS;
262
263	if (copy_from_user(&info, user_desc, sizeof(info)))
264		return -EFAULT;
265
266	idx = info.entry_number;
267
268	if (idx == -1) {
269		idx = get_free_idx(current);
270		if (idx < 0)
271			return idx;
272		info.entry_number = idx;
273		/* Tell the user which slot we chose for him.*/
274		if (put_user(idx, &user_desc->entry_number))
275			return -EFAULT;
276	}
277
278	ret = do_set_thread_area(&info);
279	if (ret)
280		return ret;
281	return set_tls_entry(current, &info, idx, 1);
282}
283
284/*
285 * Perform set_thread_area on behalf of the traced child.
286 * Note: error handling is not done on the deferred load, and this differ from
287 * i386. However the only possible error are caused by bugs.
288 */
289int ptrace_set_thread_area(struct task_struct *child, int idx,
290			   struct user_desc __user *user_desc)
291{
292	struct user_desc info;
293
294	if (!host_supports_tls)
295		return -EIO;
296
297	if (copy_from_user(&info, user_desc, sizeof(info)))
298		return -EFAULT;
299
300	return set_tls_entry(child, &info, idx, 0);
301}
302
303SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
304{
305	struct user_desc info;
306	int idx, ret;
307
308	if (!host_supports_tls)
309		return -ENOSYS;
310
311	if (get_user(idx, &user_desc->entry_number))
312		return -EFAULT;
313
314	ret = get_tls_entry(current, &info, idx);
315	if (ret < 0)
316		goto out;
317
318	if (copy_to_user(user_desc, &info, sizeof(info)))
319		ret = -EFAULT;
320
321out:
322	return ret;
323}
324
325/*
326 * Perform get_thread_area on behalf of the traced child.
327 */
328int ptrace_get_thread_area(struct task_struct *child, int idx,
329		struct user_desc __user *user_desc)
330{
331	struct user_desc info;
332	int ret;
333
334	if (!host_supports_tls)
335		return -EIO;
336
337	ret = get_tls_entry(child, &info, idx);
338	if (ret < 0)
339		goto out;
340
341	if (copy_to_user(user_desc, &info, sizeof(info)))
342		ret = -EFAULT;
343out:
344	return ret;
345}
346
347/*
348 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
349 * if a 32-bit UML is running on a 64-bit host.
350 */
351static int __init __setup_host_supports_tls(void)
352{
353	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
354	if (host_supports_tls) {
355		printk(KERN_INFO "Host TLS support detected\n");
356		printk(KERN_INFO "Detected host type: ");
357		switch (host_gdt_entry_tls_min) {
358		case GDT_ENTRY_TLS_MIN_I386:
359			printk(KERN_CONT "i386");
360			break;
361		case GDT_ENTRY_TLS_MIN_X86_64:
362			printk(KERN_CONT "x86_64");
363			break;
364		}
365		printk(KERN_CONT " (GDT indexes %d to %d)\n",
366		       host_gdt_entry_tls_min,
367		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
368	} else
369		printk(KERN_ERR "  Host TLS support NOT detected! "
370				"TLS support inside UML will not work\n");
371	return 0;
372}
373
374__initcall(__setup_host_supports_tls);
v5.4
  1/*
  2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
  3 * Licensed under the GPL
  4 */
  5
  6#include <linux/percpu.h>
  7#include <linux/sched.h>
  8#include <linux/syscalls.h>
  9#include <linux/uaccess.h>
 10#include <asm/ptrace-abi.h>
 11#include <os.h>
 12#include <skas.h>
 13#include <sysdep/tls.h>
 
 14
 15/*
 16 * If needed we can detect when it's uninitialized.
 17 *
 18 * These are initialized in an initcall and unchanged thereafter.
 19 */
 20static int host_supports_tls = -1;
 21int host_gdt_entry_tls_min;
 22
 23int do_set_thread_area(struct user_desc *info)
 24{
 25	int ret;
 26	u32 cpu;
 27
 28	cpu = get_cpu();
 29	ret = os_set_thread_area(info, userspace_pid[cpu]);
 30	put_cpu();
 31
 32	if (ret)
 33		printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
 34		       "index = %d\n", ret, info->entry_number);
 35
 36	return ret;
 37}
 38
 39int do_get_thread_area(struct user_desc *info)
 40{
 41	int ret;
 42	u32 cpu;
 43
 44	cpu = get_cpu();
 45	ret = os_get_thread_area(info, userspace_pid[cpu]);
 46	put_cpu();
 47
 48	if (ret)
 49		printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
 50		       "index = %d\n", ret, info->entry_number);
 51
 52	return ret;
 53}
 54
 55/*
 56 * sys_get_thread_area: get a yet unused TLS descriptor index.
 57 * XXX: Consider leaving one free slot for glibc usage at first place. This must
 58 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
 59 *
 60 * Also, this must be tested when compiling in SKAS mode with dynamic linking
 61 * and running against NPTL.
 62 */
 63static int get_free_idx(struct task_struct* task)
 64{
 65	struct thread_struct *t = &task->thread;
 66	int idx;
 67
 68	if (!t->arch.tls_array)
 69		return GDT_ENTRY_TLS_MIN;
 70
 71	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
 72		if (!t->arch.tls_array[idx].present)
 73			return idx + GDT_ENTRY_TLS_MIN;
 74	return -ESRCH;
 75}
 76
 77static inline void clear_user_desc(struct user_desc* info)
 78{
 79	/* Postcondition: LDT_empty(info) returns true. */
 80	memset(info, 0, sizeof(*info));
 81
 82	/*
 83	 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
 84	 * indeed an empty user_desc.
 85	 */
 86	info->read_exec_only = 1;
 87	info->seg_not_present = 1;
 88}
 89
 90#define O_FORCE 1
 91
 92static int load_TLS(int flags, struct task_struct *to)
 93{
 94	int ret = 0;
 95	int idx;
 96
 97	for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
 98		struct uml_tls_struct* curr =
 99			&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
100
101		/*
102		 * Actually, now if it wasn't flushed it gets cleared and
103		 * flushed to the host, which will clear it.
104		 */
105		if (!curr->present) {
106			if (!curr->flushed) {
107				clear_user_desc(&curr->tls);
108				curr->tls.entry_number = idx;
109			} else {
110				WARN_ON(!LDT_empty(&curr->tls));
111				continue;
112			}
113		}
114
115		if (!(flags & O_FORCE) && curr->flushed)
116			continue;
117
118		ret = do_set_thread_area(&curr->tls);
119		if (ret)
120			goto out;
121
122		curr->flushed = 1;
123	}
124out:
125	return ret;
126}
127
128/*
129 * Verify if we need to do a flush for the new process, i.e. if there are any
130 * present desc's, only if they haven't been flushed.
131 */
132static inline int needs_TLS_update(struct task_struct *task)
133{
134	int i;
135	int ret = 0;
136
137	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
138		struct uml_tls_struct* curr =
139			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
140
141		/*
142		 * Can't test curr->present, we may need to clear a descriptor
143		 * which had a value.
144		 */
145		if (curr->flushed)
146			continue;
147		ret = 1;
148		break;
149	}
150	return ret;
151}
152
153/*
154 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
155 * we mark them as such and the first switch_to will do the job.
156 */
157void clear_flushed_tls(struct task_struct *task)
158{
159	int i;
160
161	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
162		struct uml_tls_struct* curr =
163			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
164
165		/*
166		 * Still correct to do this, if it wasn't present on the host it
167		 * will remain as flushed as it was.
168		 */
169		if (!curr->present)
170			continue;
171
172		curr->flushed = 0;
173	}
174}
175
176/*
177 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
178 * common host process. So this is needed in SKAS0 too.
179 *
180 * However, if each thread had a different host process (and this was discussed
181 * for SMP support) this won't be needed.
182 *
183 * And this will not need be used when (and if) we'll add support to the host
184 * SKAS patch.
185 */
186
187int arch_switch_tls(struct task_struct *to)
188{
189	if (!host_supports_tls)
190		return 0;
191
192	/*
193	 * We have no need whatsoever to switch TLS for kernel threads; beyond
194	 * that, that would also result in us calling os_set_thread_area with
195	 * userspace_pid[cpu] == 0, which gives an error.
196	 */
197	if (likely(to->mm))
198		return load_TLS(O_FORCE, to);
199
200	return 0;
201}
202
203static int set_tls_entry(struct task_struct* task, struct user_desc *info,
204			 int idx, int flushed)
205{
206	struct thread_struct *t = &task->thread;
207
208	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
209		return -EINVAL;
210
211	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
212	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
213	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
214
215	return 0;
216}
217
218int arch_copy_tls(struct task_struct *new)
219{
220	struct user_desc info;
221	int idx, ret = -EFAULT;
222
223	if (copy_from_user(&info,
224			   (void __user *) UPT_SI(&new->thread.regs.regs),
225			   sizeof(info)))
226		goto out;
227
228	ret = -EINVAL;
229	if (LDT_empty(&info))
230		goto out;
231
232	idx = info.entry_number;
233
234	ret = set_tls_entry(new, &info, idx, 0);
235out:
236	return ret;
237}
238
239/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
240static int get_tls_entry(struct task_struct *task, struct user_desc *info,
241			 int idx)
242{
243	struct thread_struct *t = &task->thread;
244
245	if (!t->arch.tls_array)
246		goto clear;
247
248	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
249		return -EINVAL;
250
251	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
252		goto clear;
253
254	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
255
256out:
257	/*
258	 * Temporary debugging check, to make sure that things have been
259	 * flushed. This could be triggered if load_TLS() failed.
260	 */
261	if (unlikely(task == current &&
262		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
263		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
264				"without flushed TLS.", current->pid);
265	}
266
267	return 0;
268clear:
269	/*
270	 * When the TLS entry has not been set, the values read to user in the
271	 * tls_array are 0 (because it's cleared at boot, see
272	 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
273	 */
274	clear_user_desc(info);
275	info->entry_number = idx;
276	goto out;
277}
278
279SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
280{
281	struct user_desc info;
282	int idx, ret;
283
284	if (!host_supports_tls)
285		return -ENOSYS;
286
287	if (copy_from_user(&info, user_desc, sizeof(info)))
288		return -EFAULT;
289
290	idx = info.entry_number;
291
292	if (idx == -1) {
293		idx = get_free_idx(current);
294		if (idx < 0)
295			return idx;
296		info.entry_number = idx;
297		/* Tell the user which slot we chose for him.*/
298		if (put_user(idx, &user_desc->entry_number))
299			return -EFAULT;
300	}
301
302	ret = do_set_thread_area(&info);
303	if (ret)
304		return ret;
305	return set_tls_entry(current, &info, idx, 1);
306}
307
308/*
309 * Perform set_thread_area on behalf of the traced child.
310 * Note: error handling is not done on the deferred load, and this differ from
311 * i386. However the only possible error are caused by bugs.
312 */
313int ptrace_set_thread_area(struct task_struct *child, int idx,
314			   struct user_desc __user *user_desc)
315{
316	struct user_desc info;
317
318	if (!host_supports_tls)
319		return -EIO;
320
321	if (copy_from_user(&info, user_desc, sizeof(info)))
322		return -EFAULT;
323
324	return set_tls_entry(child, &info, idx, 0);
325}
326
327SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
328{
329	struct user_desc info;
330	int idx, ret;
331
332	if (!host_supports_tls)
333		return -ENOSYS;
334
335	if (get_user(idx, &user_desc->entry_number))
336		return -EFAULT;
337
338	ret = get_tls_entry(current, &info, idx);
339	if (ret < 0)
340		goto out;
341
342	if (copy_to_user(user_desc, &info, sizeof(info)))
343		ret = -EFAULT;
344
345out:
346	return ret;
347}
348
349/*
350 * Perform get_thread_area on behalf of the traced child.
351 */
352int ptrace_get_thread_area(struct task_struct *child, int idx,
353		struct user_desc __user *user_desc)
354{
355	struct user_desc info;
356	int ret;
357
358	if (!host_supports_tls)
359		return -EIO;
360
361	ret = get_tls_entry(child, &info, idx);
362	if (ret < 0)
363		goto out;
364
365	if (copy_to_user(user_desc, &info, sizeof(info)))
366		ret = -EFAULT;
367out:
368	return ret;
369}
370
371/*
372 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
373 * if a 32-bit UML is running on a 64-bit host.
374 */
375static int __init __setup_host_supports_tls(void)
376{
377	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
378	if (host_supports_tls) {
379		printk(KERN_INFO "Host TLS support detected\n");
380		printk(KERN_INFO "Detected host type: ");
381		switch (host_gdt_entry_tls_min) {
382		case GDT_ENTRY_TLS_MIN_I386:
383			printk(KERN_CONT "i386");
384			break;
385		case GDT_ENTRY_TLS_MIN_X86_64:
386			printk(KERN_CONT "x86_64");
387			break;
388		}
389		printk(KERN_CONT " (GDT indexes %d to %d)\n",
390		       host_gdt_entry_tls_min,
391		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
392	} else
393		printk(KERN_ERR "  Host TLS support NOT detected! "
394				"TLS support inside UML will not work\n");
395	return 0;
396}
397
398__initcall(__setup_host_supports_tls);