Linux Audio

Check our new training course

Loading...
v3.15
 
  1#include <linux/kernel.h>
  2#include <linux/syscalls.h>
  3#include <linux/fdtable.h>
  4#include <linux/string.h>
  5#include <linux/random.h>
  6#include <linux/module.h>
  7#include <linux/ptrace.h>
  8#include <linux/init.h>
  9#include <linux/errno.h>
 10#include <linux/cache.h>
 11#include <linux/bug.h>
 12#include <linux/err.h>
 13#include <linux/kcmp.h>
 
 
 
 
 14
 15#include <asm/unistd.h>
 16
 17/*
 18 * We don't expose the real in-memory order of objects for security reasons.
 19 * But still the comparison results should be suitable for sorting. So we
 20 * obfuscate kernel pointers values and compare the production instead.
 21 *
 22 * The obfuscation is done in two steps. First we xor the kernel pointer with
 23 * a random value, which puts pointer into a new position in a reordered space.
 24 * Secondly we multiply the xor production with a large odd random number to
 25 * permute its bits even more (the odd multiplier guarantees that the product
 26 * is unique ever after the high bits are truncated, since any odd number is
 27 * relative prime to 2^n).
 28 *
 29 * Note also that the obfuscation itself is invisible to userspace and if needed
 30 * it can be changed to an alternate scheme.
 31 */
 32static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
 33
 34static long kptr_obfuscate(long v, int type)
 35{
 36	return (v ^ cookies[type][0]) * cookies[type][1];
 37}
 38
 39/*
 40 * 0 - equal, i.e. v1 = v2
 41 * 1 - less than, i.e. v1 < v2
 42 * 2 - greater than, i.e. v1 > v2
 43 * 3 - not equal but ordering unavailable (reserved for future)
 44 */
 45static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
 46{
 47	long ret;
 48
 49	ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
 
 50
 51	return (ret < 0) | ((ret > 0) << 1);
 52}
 53
 54/* The caller must have pinned the task */
 55static struct file *
 56get_file_raw_ptr(struct task_struct *task, unsigned int idx)
 57{
 58	struct file *file = NULL;
 59
 60	task_lock(task);
 61	rcu_read_lock();
 62
 63	if (task->files)
 64		file = fcheck_files(task->files, idx);
 65
 66	rcu_read_unlock();
 67	task_unlock(task);
 68
 69	return file;
 70}
 71
 72static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
 73{
 74	if (likely(m2 != m1))
 75		mutex_unlock(m2);
 76	mutex_unlock(m1);
 77}
 78
 79static int kcmp_lock(struct mutex *m1, struct mutex *m2)
 80{
 81	int err;
 82
 83	if (m2 > m1)
 84		swap(m1, m2);
 85
 86	err = mutex_lock_killable(m1);
 87	if (!err && likely(m1 != m2)) {
 88		err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
 89		if (err)
 90			mutex_unlock(m1);
 91	}
 92
 93	return err;
 94}
 95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
 97		unsigned long, idx1, unsigned long, idx2)
 98{
 99	struct task_struct *task1, *task2;
100	int ret;
101
102	rcu_read_lock();
103
104	/*
105	 * Tasks are looked up in caller's PID namespace only.
106	 */
107	task1 = find_task_by_vpid(pid1);
108	task2 = find_task_by_vpid(pid2);
109	if (!task1 || !task2)
110		goto err_no_task;
111
112	get_task_struct(task1);
113	get_task_struct(task2);
114
115	rcu_read_unlock();
116
117	/*
118	 * One should have enough rights to inspect task details.
119	 */
120	ret = kcmp_lock(&task1->signal->cred_guard_mutex,
121			&task2->signal->cred_guard_mutex);
122	if (ret)
123		goto err;
124	if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
125	    !ptrace_may_access(task2, PTRACE_MODE_READ)) {
126		ret = -EPERM;
127		goto err_unlock;
128	}
129
130	switch (type) {
131	case KCMP_FILE: {
132		struct file *filp1, *filp2;
133
134		filp1 = get_file_raw_ptr(task1, idx1);
135		filp2 = get_file_raw_ptr(task2, idx2);
136
137		if (filp1 && filp2)
138			ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
139		else
140			ret = -EBADF;
141		break;
142	}
143	case KCMP_VM:
144		ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
145		break;
146	case KCMP_FILES:
147		ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
148		break;
149	case KCMP_FS:
150		ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
151		break;
152	case KCMP_SIGHAND:
153		ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
154		break;
155	case KCMP_IO:
156		ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
157		break;
158	case KCMP_SYSVSEM:
159#ifdef CONFIG_SYSVIPC
160		ret = kcmp_ptr(task1->sysvsem.undo_list,
161			       task2->sysvsem.undo_list,
162			       KCMP_SYSVSEM);
163#else
164		ret = -EOPNOTSUPP;
165#endif
 
 
 
166		break;
167	default:
168		ret = -EINVAL;
169		break;
170	}
171
172err_unlock:
173	kcmp_unlock(&task1->signal->cred_guard_mutex,
174		    &task2->signal->cred_guard_mutex);
175err:
176	put_task_struct(task1);
177	put_task_struct(task2);
178
179	return ret;
180
181err_no_task:
182	rcu_read_unlock();
183	return -ESRCH;
184}
185
186static __init int kcmp_cookies_init(void)
187{
188	int i;
189
190	get_random_bytes(cookies, sizeof(cookies));
191
192	for (i = 0; i < KCMP_TYPES; i++)
193		cookies[i][1] |= (~(~0UL >>  1) | 1);
194
195	return 0;
196}
197arch_initcall(kcmp_cookies_init);
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/syscalls.h>
  4#include <linux/fdtable.h>
  5#include <linux/string.h>
  6#include <linux/random.h>
  7#include <linux/module.h>
  8#include <linux/ptrace.h>
  9#include <linux/init.h>
 10#include <linux/errno.h>
 11#include <linux/cache.h>
 12#include <linux/bug.h>
 13#include <linux/err.h>
 14#include <linux/kcmp.h>
 15#include <linux/capability.h>
 16#include <linux/list.h>
 17#include <linux/eventpoll.h>
 18#include <linux/file.h>
 19
 20#include <asm/unistd.h>
 21
 22/*
 23 * We don't expose the real in-memory order of objects for security reasons.
 24 * But still the comparison results should be suitable for sorting. So we
 25 * obfuscate kernel pointers values and compare the production instead.
 26 *
 27 * The obfuscation is done in two steps. First we xor the kernel pointer with
 28 * a random value, which puts pointer into a new position in a reordered space.
 29 * Secondly we multiply the xor production with a large odd random number to
 30 * permute its bits even more (the odd multiplier guarantees that the product
 31 * is unique ever after the high bits are truncated, since any odd number is
 32 * relative prime to 2^n).
 33 *
 34 * Note also that the obfuscation itself is invisible to userspace and if needed
 35 * it can be changed to an alternate scheme.
 36 */
 37static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
 38
 39static long kptr_obfuscate(long v, int type)
 40{
 41	return (v ^ cookies[type][0]) * cookies[type][1];
 42}
 43
 44/*
 45 * 0 - equal, i.e. v1 = v2
 46 * 1 - less than, i.e. v1 < v2
 47 * 2 - greater than, i.e. v1 > v2
 48 * 3 - not equal but ordering unavailable (reserved for future)
 49 */
 50static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
 51{
 52	long t1, t2;
 53
 54	t1 = kptr_obfuscate((long)v1, type);
 55	t2 = kptr_obfuscate((long)v2, type);
 56
 57	return (t1 < t2) | ((t1 > t2) << 1);
 58}
 59
 60/* The caller must have pinned the task */
 61static struct file *
 62get_file_raw_ptr(struct task_struct *task, unsigned int idx)
 63{
 64	struct file *file = NULL;
 65
 66	task_lock(task);
 67	rcu_read_lock();
 68
 69	if (task->files)
 70		file = fcheck_files(task->files, idx);
 71
 72	rcu_read_unlock();
 73	task_unlock(task);
 74
 75	return file;
 76}
 77
 78static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
 79{
 80	if (likely(m2 != m1))
 81		mutex_unlock(m2);
 82	mutex_unlock(m1);
 83}
 84
 85static int kcmp_lock(struct mutex *m1, struct mutex *m2)
 86{
 87	int err;
 88
 89	if (m2 > m1)
 90		swap(m1, m2);
 91
 92	err = mutex_lock_killable(m1);
 93	if (!err && likely(m1 != m2)) {
 94		err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
 95		if (err)
 96			mutex_unlock(m1);
 97	}
 98
 99	return err;
100}
101
102#ifdef CONFIG_EPOLL
103static int kcmp_epoll_target(struct task_struct *task1,
104			     struct task_struct *task2,
105			     unsigned long idx1,
106			     struct kcmp_epoll_slot __user *uslot)
107{
108	struct file *filp, *filp_epoll, *filp_tgt;
109	struct kcmp_epoll_slot slot;
110	struct files_struct *files;
111
112	if (copy_from_user(&slot, uslot, sizeof(slot)))
113		return -EFAULT;
114
115	filp = get_file_raw_ptr(task1, idx1);
116	if (!filp)
117		return -EBADF;
118
119	files = get_files_struct(task2);
120	if (!files)
121		return -EBADF;
122
123	spin_lock(&files->file_lock);
124	filp_epoll = fcheck_files(files, slot.efd);
125	if (filp_epoll)
126		get_file(filp_epoll);
127	else
128		filp_tgt = ERR_PTR(-EBADF);
129	spin_unlock(&files->file_lock);
130	put_files_struct(files);
131
132	if (filp_epoll) {
133		filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
134		fput(filp_epoll);
135	}
136
137	if (IS_ERR(filp_tgt))
138		return PTR_ERR(filp_tgt);
139
140	return kcmp_ptr(filp, filp_tgt, KCMP_FILE);
141}
142#else
143static int kcmp_epoll_target(struct task_struct *task1,
144			     struct task_struct *task2,
145			     unsigned long idx1,
146			     struct kcmp_epoll_slot __user *uslot)
147{
148	return -EOPNOTSUPP;
149}
150#endif
151
152SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
153		unsigned long, idx1, unsigned long, idx2)
154{
155	struct task_struct *task1, *task2;
156	int ret;
157
158	rcu_read_lock();
159
160	/*
161	 * Tasks are looked up in caller's PID namespace only.
162	 */
163	task1 = find_task_by_vpid(pid1);
164	task2 = find_task_by_vpid(pid2);
165	if (!task1 || !task2)
166		goto err_no_task;
167
168	get_task_struct(task1);
169	get_task_struct(task2);
170
171	rcu_read_unlock();
172
173	/*
174	 * One should have enough rights to inspect task details.
175	 */
176	ret = kcmp_lock(&task1->signal->cred_guard_mutex,
177			&task2->signal->cred_guard_mutex);
178	if (ret)
179		goto err;
180	if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
181	    !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
182		ret = -EPERM;
183		goto err_unlock;
184	}
185
186	switch (type) {
187	case KCMP_FILE: {
188		struct file *filp1, *filp2;
189
190		filp1 = get_file_raw_ptr(task1, idx1);
191		filp2 = get_file_raw_ptr(task2, idx2);
192
193		if (filp1 && filp2)
194			ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
195		else
196			ret = -EBADF;
197		break;
198	}
199	case KCMP_VM:
200		ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
201		break;
202	case KCMP_FILES:
203		ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
204		break;
205	case KCMP_FS:
206		ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
207		break;
208	case KCMP_SIGHAND:
209		ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
210		break;
211	case KCMP_IO:
212		ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
213		break;
214	case KCMP_SYSVSEM:
215#ifdef CONFIG_SYSVIPC
216		ret = kcmp_ptr(task1->sysvsem.undo_list,
217			       task2->sysvsem.undo_list,
218			       KCMP_SYSVSEM);
219#else
220		ret = -EOPNOTSUPP;
221#endif
222		break;
223	case KCMP_EPOLL_TFD:
224		ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
225		break;
226	default:
227		ret = -EINVAL;
228		break;
229	}
230
231err_unlock:
232	kcmp_unlock(&task1->signal->cred_guard_mutex,
233		    &task2->signal->cred_guard_mutex);
234err:
235	put_task_struct(task1);
236	put_task_struct(task2);
237
238	return ret;
239
240err_no_task:
241	rcu_read_unlock();
242	return -ESRCH;
243}
244
245static __init int kcmp_cookies_init(void)
246{
247	int i;
248
249	get_random_bytes(cookies, sizeof(cookies));
250
251	for (i = 0; i < KCMP_TYPES; i++)
252		cookies[i][1] |= (~(~0UL >>  1) | 1);
253
254	return 0;
255}
256arch_initcall(kcmp_cookies_init);