Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/syscalls.h>
4#include <linux/fdtable.h>
5#include <linux/string.h>
6#include <linux/random.h>
7#include <linux/module.h>
8#include <linux/ptrace.h>
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/cache.h>
12#include <linux/bug.h>
13#include <linux/err.h>
14#include <linux/kcmp.h>
15#include <linux/capability.h>
16#include <linux/list.h>
17#include <linux/eventpoll.h>
18#include <linux/file.h>
19
20#include <asm/unistd.h>
21
22/*
23 * We don't expose the real in-memory order of objects for security reasons.
24 * But still the comparison results should be suitable for sorting. So we
25 * obfuscate kernel pointers values and compare the production instead.
26 *
27 * The obfuscation is done in two steps. First we xor the kernel pointer with
28 * a random value, which puts pointer into a new position in a reordered space.
29 * Secondly we multiply the xor production with a large odd random number to
30 * permute its bits even more (the odd multiplier guarantees that the product
31 * is unique ever after the high bits are truncated, since any odd number is
32 * relative prime to 2^n).
33 *
34 * Note also that the obfuscation itself is invisible to userspace and if needed
35 * it can be changed to an alternate scheme.
36 */
37static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
38
39static long kptr_obfuscate(long v, int type)
40{
41 return (v ^ cookies[type][0]) * cookies[type][1];
42}
43
44/*
45 * 0 - equal, i.e. v1 = v2
46 * 1 - less than, i.e. v1 < v2
47 * 2 - greater than, i.e. v1 > v2
48 * 3 - not equal but ordering unavailable (reserved for future)
49 */
50static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
51{
52 long t1, t2;
53
54 t1 = kptr_obfuscate((long)v1, type);
55 t2 = kptr_obfuscate((long)v2, type);
56
57 return (t1 < t2) | ((t1 > t2) << 1);
58}
59
60/* The caller must have pinned the task */
61static struct file *
62get_file_raw_ptr(struct task_struct *task, unsigned int idx)
63{
64 struct file *file;
65
66 file = fget_task(task, idx);
67 if (file)
68 fput(file);
69
70 return file;
71}
72
73static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
74{
75 if (likely(l2 != l1))
76 up_read(l2);
77 up_read(l1);
78}
79
80static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
81{
82 int err;
83
84 if (l2 > l1)
85 swap(l1, l2);
86
87 err = down_read_killable(l1);
88 if (!err && likely(l1 != l2)) {
89 err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
90 if (err)
91 up_read(l1);
92 }
93
94 return err;
95}
96
97#ifdef CONFIG_EPOLL
98static int kcmp_epoll_target(struct task_struct *task1,
99 struct task_struct *task2,
100 unsigned long idx1,
101 struct kcmp_epoll_slot __user *uslot)
102{
103 struct file *filp, *filp_epoll, *filp_tgt;
104 struct kcmp_epoll_slot slot;
105
106 if (copy_from_user(&slot, uslot, sizeof(slot)))
107 return -EFAULT;
108
109 filp = get_file_raw_ptr(task1, idx1);
110 if (!filp)
111 return -EBADF;
112
113 filp_epoll = fget_task(task2, slot.efd);
114 if (!filp_epoll)
115 return -EBADF;
116
117 filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
118 fput(filp_epoll);
119
120 if (IS_ERR(filp_tgt))
121 return PTR_ERR(filp_tgt);
122
123 return kcmp_ptr(filp, filp_tgt, KCMP_FILE);
124}
125#else
126static int kcmp_epoll_target(struct task_struct *task1,
127 struct task_struct *task2,
128 unsigned long idx1,
129 struct kcmp_epoll_slot __user *uslot)
130{
131 return -EOPNOTSUPP;
132}
133#endif
134
135SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
136 unsigned long, idx1, unsigned long, idx2)
137{
138 struct task_struct *task1, *task2;
139 int ret;
140
141 rcu_read_lock();
142
143 /*
144 * Tasks are looked up in caller's PID namespace only.
145 */
146 task1 = find_task_by_vpid(pid1);
147 task2 = find_task_by_vpid(pid2);
148 if (!task1 || !task2)
149 goto err_no_task;
150
151 get_task_struct(task1);
152 get_task_struct(task2);
153
154 rcu_read_unlock();
155
156 /*
157 * One should have enough rights to inspect task details.
158 */
159 ret = kcmp_lock(&task1->signal->exec_update_lock,
160 &task2->signal->exec_update_lock);
161 if (ret)
162 goto err;
163 if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
164 !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
165 ret = -EPERM;
166 goto err_unlock;
167 }
168
169 switch (type) {
170 case KCMP_FILE: {
171 struct file *filp1, *filp2;
172
173 filp1 = get_file_raw_ptr(task1, idx1);
174 filp2 = get_file_raw_ptr(task2, idx2);
175
176 if (filp1 && filp2)
177 ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
178 else
179 ret = -EBADF;
180 break;
181 }
182 case KCMP_VM:
183 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
184 break;
185 case KCMP_FILES:
186 ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
187 break;
188 case KCMP_FS:
189 ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
190 break;
191 case KCMP_SIGHAND:
192 ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
193 break;
194 case KCMP_IO:
195 ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
196 break;
197 case KCMP_SYSVSEM:
198#ifdef CONFIG_SYSVIPC
199 ret = kcmp_ptr(task1->sysvsem.undo_list,
200 task2->sysvsem.undo_list,
201 KCMP_SYSVSEM);
202#else
203 ret = -EOPNOTSUPP;
204#endif
205 break;
206 case KCMP_EPOLL_TFD:
207 ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
208 break;
209 default:
210 ret = -EINVAL;
211 break;
212 }
213
214err_unlock:
215 kcmp_unlock(&task1->signal->exec_update_lock,
216 &task2->signal->exec_update_lock);
217err:
218 put_task_struct(task1);
219 put_task_struct(task2);
220
221 return ret;
222
223err_no_task:
224 rcu_read_unlock();
225 return -ESRCH;
226}
227
228static __init int kcmp_cookies_init(void)
229{
230 int i;
231
232 get_random_bytes(cookies, sizeof(cookies));
233
234 for (i = 0; i < KCMP_TYPES; i++)
235 cookies[i][1] |= (~(~0UL >> 1) | 1);
236
237 return 0;
238}
239arch_initcall(kcmp_cookies_init);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/syscalls.h>
4#include <linux/fdtable.h>
5#include <linux/string.h>
6#include <linux/random.h>
7#include <linux/module.h>
8#include <linux/ptrace.h>
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/cache.h>
12#include <linux/bug.h>
13#include <linux/err.h>
14#include <linux/kcmp.h>
15#include <linux/capability.h>
16#include <linux/list.h>
17#include <linux/eventpoll.h>
18#include <linux/file.h>
19
20#include <asm/unistd.h>
21
22/*
23 * We don't expose the real in-memory order of objects for security reasons.
24 * But still the comparison results should be suitable for sorting. So we
25 * obfuscate kernel pointers values and compare the production instead.
26 *
27 * The obfuscation is done in two steps. First we xor the kernel pointer with
28 * a random value, which puts pointer into a new position in a reordered space.
29 * Secondly we multiply the xor production with a large odd random number to
30 * permute its bits even more (the odd multiplier guarantees that the product
31 * is unique ever after the high bits are truncated, since any odd number is
32 * relative prime to 2^n).
33 *
34 * Note also that the obfuscation itself is invisible to userspace and if needed
35 * it can be changed to an alternate scheme.
36 */
37static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
38
39static long kptr_obfuscate(long v, int type)
40{
41 return (v ^ cookies[type][0]) * cookies[type][1];
42}
43
44/*
45 * 0 - equal, i.e. v1 = v2
46 * 1 - less than, i.e. v1 < v2
47 * 2 - greater than, i.e. v1 > v2
48 * 3 - not equal but ordering unavailable (reserved for future)
49 */
50static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
51{
52 long t1, t2;
53
54 t1 = kptr_obfuscate((long)v1, type);
55 t2 = kptr_obfuscate((long)v2, type);
56
57 return (t1 < t2) | ((t1 > t2) << 1);
58}
59
60/* The caller must have pinned the task */
61static struct file *
62get_file_raw_ptr(struct task_struct *task, unsigned int idx)
63{
64 struct file *file;
65
66 rcu_read_lock();
67 file = task_lookup_fdget_rcu(task, idx);
68 rcu_read_unlock();
69 if (file)
70 fput(file);
71
72 return file;
73}
74
75static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
76{
77 if (likely(l2 != l1))
78 up_read(l2);
79 up_read(l1);
80}
81
82static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
83{
84 int err;
85
86 if (l2 > l1)
87 swap(l1, l2);
88
89 err = down_read_killable(l1);
90 if (!err && likely(l1 != l2)) {
91 err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
92 if (err)
93 up_read(l1);
94 }
95
96 return err;
97}
98
99#ifdef CONFIG_EPOLL
100static int kcmp_epoll_target(struct task_struct *task1,
101 struct task_struct *task2,
102 unsigned long idx1,
103 struct kcmp_epoll_slot __user *uslot)
104{
105 struct file *filp, *filp_epoll, *filp_tgt;
106 struct kcmp_epoll_slot slot;
107
108 if (copy_from_user(&slot, uslot, sizeof(slot)))
109 return -EFAULT;
110
111 filp = get_file_raw_ptr(task1, idx1);
112 if (!filp)
113 return -EBADF;
114
115 filp_epoll = fget_task(task2, slot.efd);
116 if (!filp_epoll)
117 return -EBADF;
118
119 filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
120 fput(filp_epoll);
121
122 if (IS_ERR(filp_tgt))
123 return PTR_ERR(filp_tgt);
124
125 return kcmp_ptr(filp, filp_tgt, KCMP_FILE);
126}
127#else
128static int kcmp_epoll_target(struct task_struct *task1,
129 struct task_struct *task2,
130 unsigned long idx1,
131 struct kcmp_epoll_slot __user *uslot)
132{
133 return -EOPNOTSUPP;
134}
135#endif
136
137SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
138 unsigned long, idx1, unsigned long, idx2)
139{
140 struct task_struct *task1, *task2;
141 int ret;
142
143 rcu_read_lock();
144
145 /*
146 * Tasks are looked up in caller's PID namespace only.
147 */
148 task1 = find_task_by_vpid(pid1);
149 task2 = find_task_by_vpid(pid2);
150 if (!task1 || !task2)
151 goto err_no_task;
152
153 get_task_struct(task1);
154 get_task_struct(task2);
155
156 rcu_read_unlock();
157
158 /*
159 * One should have enough rights to inspect task details.
160 */
161 ret = kcmp_lock(&task1->signal->exec_update_lock,
162 &task2->signal->exec_update_lock);
163 if (ret)
164 goto err;
165 if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
166 !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
167 ret = -EPERM;
168 goto err_unlock;
169 }
170
171 switch (type) {
172 case KCMP_FILE: {
173 struct file *filp1, *filp2;
174
175 filp1 = get_file_raw_ptr(task1, idx1);
176 filp2 = get_file_raw_ptr(task2, idx2);
177
178 if (filp1 && filp2)
179 ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
180 else
181 ret = -EBADF;
182 break;
183 }
184 case KCMP_VM:
185 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
186 break;
187 case KCMP_FILES:
188 ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
189 break;
190 case KCMP_FS:
191 ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
192 break;
193 case KCMP_SIGHAND:
194 ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
195 break;
196 case KCMP_IO:
197 ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
198 break;
199 case KCMP_SYSVSEM:
200#ifdef CONFIG_SYSVIPC
201 ret = kcmp_ptr(task1->sysvsem.undo_list,
202 task2->sysvsem.undo_list,
203 KCMP_SYSVSEM);
204#else
205 ret = -EOPNOTSUPP;
206#endif
207 break;
208 case KCMP_EPOLL_TFD:
209 ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
210 break;
211 default:
212 ret = -EINVAL;
213 break;
214 }
215
216err_unlock:
217 kcmp_unlock(&task1->signal->exec_update_lock,
218 &task2->signal->exec_update_lock);
219err:
220 put_task_struct(task1);
221 put_task_struct(task2);
222
223 return ret;
224
225err_no_task:
226 rcu_read_unlock();
227 return -ESRCH;
228}
229
230static __init int kcmp_cookies_init(void)
231{
232 int i;
233
234 get_random_bytes(cookies, sizeof(cookies));
235
236 for (i = 0; i < KCMP_TYPES; i++)
237 cookies[i][1] |= (~(~0UL >> 1) | 1);
238
239 return 0;
240}
241arch_initcall(kcmp_cookies_init);