Loading...
1#include <linux/kernel.h>
2#include <linux/syscalls.h>
3#include <linux/fdtable.h>
4#include <linux/string.h>
5#include <linux/random.h>
6#include <linux/module.h>
7#include <linux/ptrace.h>
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/cache.h>
11#include <linux/bug.h>
12#include <linux/err.h>
13#include <linux/kcmp.h>
14
15#include <asm/unistd.h>
16
17/*
18 * We don't expose the real in-memory order of objects for security reasons.
19 * But still the comparison results should be suitable for sorting. So we
20 * obfuscate kernel pointers values and compare the production instead.
21 *
22 * The obfuscation is done in two steps. First we xor the kernel pointer with
23 * a random value, which puts pointer into a new position in a reordered space.
24 * Secondly we multiply the xor production with a large odd random number to
25 * permute its bits even more (the odd multiplier guarantees that the product
26 * is unique ever after the high bits are truncated, since any odd number is
27 * relative prime to 2^n).
28 *
29 * Note also that the obfuscation itself is invisible to userspace and if needed
30 * it can be changed to an alternate scheme.
31 */
32static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
33
34static long kptr_obfuscate(long v, int type)
35{
36 return (v ^ cookies[type][0]) * cookies[type][1];
37}
38
39/*
40 * 0 - equal, i.e. v1 = v2
41 * 1 - less than, i.e. v1 < v2
42 * 2 - greater than, i.e. v1 > v2
43 * 3 - not equal but ordering unavailable (reserved for future)
44 */
45static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
46{
47 long t1, t2;
48
49 t1 = kptr_obfuscate((long)v1, type);
50 t2 = kptr_obfuscate((long)v2, type);
51
52 return (t1 < t2) | ((t1 > t2) << 1);
53}
54
55/* The caller must have pinned the task */
56static struct file *
57get_file_raw_ptr(struct task_struct *task, unsigned int idx)
58{
59 struct file *file = NULL;
60
61 task_lock(task);
62 rcu_read_lock();
63
64 if (task->files)
65 file = fcheck_files(task->files, idx);
66
67 rcu_read_unlock();
68 task_unlock(task);
69
70 return file;
71}
72
73static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
74{
75 if (likely(m2 != m1))
76 mutex_unlock(m2);
77 mutex_unlock(m1);
78}
79
80static int kcmp_lock(struct mutex *m1, struct mutex *m2)
81{
82 int err;
83
84 if (m2 > m1)
85 swap(m1, m2);
86
87 err = mutex_lock_killable(m1);
88 if (!err && likely(m1 != m2)) {
89 err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
90 if (err)
91 mutex_unlock(m1);
92 }
93
94 return err;
95}
96
97SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
98 unsigned long, idx1, unsigned long, idx2)
99{
100 struct task_struct *task1, *task2;
101 int ret;
102
103 rcu_read_lock();
104
105 /*
106 * Tasks are looked up in caller's PID namespace only.
107 */
108 task1 = find_task_by_vpid(pid1);
109 task2 = find_task_by_vpid(pid2);
110 if (!task1 || !task2)
111 goto err_no_task;
112
113 get_task_struct(task1);
114 get_task_struct(task2);
115
116 rcu_read_unlock();
117
118 /*
119 * One should have enough rights to inspect task details.
120 */
121 ret = kcmp_lock(&task1->signal->cred_guard_mutex,
122 &task2->signal->cred_guard_mutex);
123 if (ret)
124 goto err;
125 if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
126 !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
127 ret = -EPERM;
128 goto err_unlock;
129 }
130
131 switch (type) {
132 case KCMP_FILE: {
133 struct file *filp1, *filp2;
134
135 filp1 = get_file_raw_ptr(task1, idx1);
136 filp2 = get_file_raw_ptr(task2, idx2);
137
138 if (filp1 && filp2)
139 ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
140 else
141 ret = -EBADF;
142 break;
143 }
144 case KCMP_VM:
145 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
146 break;
147 case KCMP_FILES:
148 ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
149 break;
150 case KCMP_FS:
151 ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
152 break;
153 case KCMP_SIGHAND:
154 ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
155 break;
156 case KCMP_IO:
157 ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
158 break;
159 case KCMP_SYSVSEM:
160#ifdef CONFIG_SYSVIPC
161 ret = kcmp_ptr(task1->sysvsem.undo_list,
162 task2->sysvsem.undo_list,
163 KCMP_SYSVSEM);
164#else
165 ret = -EOPNOTSUPP;
166#endif
167 break;
168 default:
169 ret = -EINVAL;
170 break;
171 }
172
173err_unlock:
174 kcmp_unlock(&task1->signal->cred_guard_mutex,
175 &task2->signal->cred_guard_mutex);
176err:
177 put_task_struct(task1);
178 put_task_struct(task2);
179
180 return ret;
181
182err_no_task:
183 rcu_read_unlock();
184 return -ESRCH;
185}
186
187static __init int kcmp_cookies_init(void)
188{
189 int i;
190
191 get_random_bytes(cookies, sizeof(cookies));
192
193 for (i = 0; i < KCMP_TYPES; i++)
194 cookies[i][1] |= (~(~0UL >> 1) | 1);
195
196 return 0;
197}
198arch_initcall(kcmp_cookies_init);
1#include <linux/kernel.h>
2#include <linux/syscalls.h>
3#include <linux/fdtable.h>
4#include <linux/string.h>
5#include <linux/random.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/cache.h>
10#include <linux/bug.h>
11#include <linux/err.h>
12#include <linux/kcmp.h>
13
14#include <asm/unistd.h>
15
16/*
17 * We don't expose the real in-memory order of objects for security reasons.
18 * But still the comparison results should be suitable for sorting. So we
19 * obfuscate kernel pointers values and compare the production instead.
20 *
21 * The obfuscation is done in two steps. First we xor the kernel pointer with
22 * a random value, which puts pointer into a new position in a reordered space.
23 * Secondly we multiply the xor production with a large odd random number to
24 * permute its bits even more (the odd multiplier guarantees that the product
25 * is unique ever after the high bits are truncated, since any odd number is
26 * relative prime to 2^n).
27 *
28 * Note also that the obfuscation itself is invisible to userspace and if needed
29 * it can be changed to an alternate scheme.
30 */
31static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
32
33static long kptr_obfuscate(long v, int type)
34{
35 return (v ^ cookies[type][0]) * cookies[type][1];
36}
37
38/*
39 * 0 - equal, i.e. v1 = v2
40 * 1 - less than, i.e. v1 < v2
41 * 2 - greater than, i.e. v1 > v2
42 * 3 - not equal but ordering unavailable (reserved for future)
43 */
44static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
45{
46 long ret;
47
48 ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
49
50 return (ret < 0) | ((ret > 0) << 1);
51}
52
53/* The caller must have pinned the task */
54static struct file *
55get_file_raw_ptr(struct task_struct *task, unsigned int idx)
56{
57 struct file *file = NULL;
58
59 task_lock(task);
60 rcu_read_lock();
61
62 if (task->files)
63 file = fcheck_files(task->files, idx);
64
65 rcu_read_unlock();
66 task_unlock(task);
67
68 return file;
69}
70
71static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
72{
73 if (likely(m2 != m1))
74 mutex_unlock(m2);
75 mutex_unlock(m1);
76}
77
78static int kcmp_lock(struct mutex *m1, struct mutex *m2)
79{
80 int err;
81
82 if (m2 > m1)
83 swap(m1, m2);
84
85 err = mutex_lock_killable(m1);
86 if (!err && likely(m1 != m2)) {
87 err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
88 if (err)
89 mutex_unlock(m1);
90 }
91
92 return err;
93}
94
95SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
96 unsigned long, idx1, unsigned long, idx2)
97{
98 struct task_struct *task1, *task2;
99 int ret;
100
101 rcu_read_lock();
102
103 /*
104 * Tasks are looked up in caller's PID namespace only.
105 */
106 task1 = find_task_by_vpid(pid1);
107 task2 = find_task_by_vpid(pid2);
108 if (!task1 || !task2)
109 goto err_no_task;
110
111 get_task_struct(task1);
112 get_task_struct(task2);
113
114 rcu_read_unlock();
115
116 /*
117 * One should have enough rights to inspect task details.
118 */
119 ret = kcmp_lock(&task1->signal->cred_guard_mutex,
120 &task2->signal->cred_guard_mutex);
121 if (ret)
122 goto err;
123 if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
124 !ptrace_may_access(task2, PTRACE_MODE_READ)) {
125 ret = -EPERM;
126 goto err_unlock;
127 }
128
129 switch (type) {
130 case KCMP_FILE: {
131 struct file *filp1, *filp2;
132
133 filp1 = get_file_raw_ptr(task1, idx1);
134 filp2 = get_file_raw_ptr(task2, idx2);
135
136 if (filp1 && filp2)
137 ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
138 else
139 ret = -EBADF;
140 break;
141 }
142 case KCMP_VM:
143 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
144 break;
145 case KCMP_FILES:
146 ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
147 break;
148 case KCMP_FS:
149 ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
150 break;
151 case KCMP_SIGHAND:
152 ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
153 break;
154 case KCMP_IO:
155 ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
156 break;
157 case KCMP_SYSVSEM:
158#ifdef CONFIG_SYSVIPC
159 ret = kcmp_ptr(task1->sysvsem.undo_list,
160 task2->sysvsem.undo_list,
161 KCMP_SYSVSEM);
162#else
163 ret = -EOPNOTSUPP;
164#endif
165 break;
166 default:
167 ret = -EINVAL;
168 break;
169 }
170
171err_unlock:
172 kcmp_unlock(&task1->signal->cred_guard_mutex,
173 &task2->signal->cred_guard_mutex);
174err:
175 put_task_struct(task1);
176 put_task_struct(task2);
177
178 return ret;
179
180err_no_task:
181 rcu_read_unlock();
182 return -ESRCH;
183}
184
185static __init int kcmp_cookies_init(void)
186{
187 int i;
188
189 get_random_bytes(cookies, sizeof(cookies));
190
191 for (i = 0; i < KCMP_TYPES; i++)
192 cookies[i][1] |= (~(~0UL >> 1) | 1);
193
194 return 0;
195}
196arch_initcall(kcmp_cookies_init);