Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69#include <linux/ktime.h>
70
71#include <uapi/linux/android/binder.h>
72
73#include <linux/cacheflush.h>
74
75#include "binder_internal.h"
76#include "binder_trace.h"
77
78static HLIST_HEAD(binder_deferred_list);
79static DEFINE_MUTEX(binder_deferred_lock);
80
81static HLIST_HEAD(binder_devices);
82static HLIST_HEAD(binder_procs);
83static DEFINE_MUTEX(binder_procs_lock);
84
85static HLIST_HEAD(binder_dead_nodes);
86static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88static struct dentry *binder_debugfs_dir_entry_root;
89static struct dentry *binder_debugfs_dir_entry_proc;
90static atomic_t binder_last_id;
91
92static int proc_show(struct seq_file *m, void *unused);
93DEFINE_SHOW_ATTRIBUTE(proc);
94
95#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
96
97enum {
98 BINDER_DEBUG_USER_ERROR = 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
104 BINDER_DEBUG_READ_WRITE = 1U << 6,
105 BINDER_DEBUG_USER_REFS = 1U << 7,
106 BINDER_DEBUG_THREADS = 1U << 8,
107 BINDER_DEBUG_TRANSACTION = 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
112 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113};
114static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117
118char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119module_param_named(devices, binder_devices_param, charp, 0444);
120
121static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122static int binder_stop_on_user_error;
123
124static int binder_set_stop_on_user_error(const char *val,
125 const struct kernel_param *kp)
126{
127 int ret;
128
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
132 return ret;
133}
134module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, 0644);
136
137static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138{
139 struct va_format vaf;
140 va_list args;
141
142 if (binder_debug_mask & mask) {
143 va_start(args, format);
144 vaf.va = &args;
145 vaf.fmt = format;
146 pr_info_ratelimited("%pV", &vaf);
147 va_end(args);
148 }
149}
150
151#define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153
154static __printf(1, 2) void binder_user_error(const char *format, ...)
155{
156 struct va_format vaf;
157 va_list args;
158
159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 va_start(args, format);
161 vaf.va = &args;
162 vaf.fmt = format;
163 pr_info_ratelimited("%pV", &vaf);
164 va_end(args);
165 }
166
167 if (binder_stop_on_user_error)
168 binder_stop_on_user_error = 2;
169}
170
171#define binder_set_extended_error(ee, _id, _command, _param) \
172 do { \
173 (ee)->id = _id; \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
176 } while (0)
177
178#define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
180
181#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182
183#define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
185
186#define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
188
189static struct binder_stats binder_stats;
190
191static inline void binder_stats_deleted(enum binder_stat_types type)
192{
193 atomic_inc(&binder_stats.obj_deleted[type]);
194}
195
196static inline void binder_stats_created(enum binder_stat_types type)
197{
198 atomic_inc(&binder_stats.obj_created[type]);
199}
200
201struct binder_transaction_log_entry {
202 int debug_id;
203 int debug_id_done;
204 int call_type;
205 int from_proc;
206 int from_thread;
207 int target_handle;
208 int to_proc;
209 int to_thread;
210 int to_node;
211 int data_size;
212 int offsets_size;
213 int return_error_line;
214 uint32_t return_error;
215 uint32_t return_error_param;
216 char context_name[BINDERFS_MAX_NAME + 1];
217};
218
219struct binder_transaction_log {
220 atomic_t cur;
221 bool full;
222 struct binder_transaction_log_entry entry[32];
223};
224
225static struct binder_transaction_log binder_transaction_log;
226static struct binder_transaction_log binder_transaction_log_failed;
227
228static struct binder_transaction_log_entry *binder_transaction_log_add(
229 struct binder_transaction_log *log)
230{
231 struct binder_transaction_log_entry *e;
232 unsigned int cur = atomic_inc_return(&log->cur);
233
234 if (cur >= ARRAY_SIZE(log->entry))
235 log->full = true;
236 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 WRITE_ONCE(e->debug_id_done, 0);
238 /*
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
242 */
243 smp_wmb();
244 memset(e, 0, sizeof(*e));
245 return e;
246}
247
248enum binder_deferred_state {
249 BINDER_DEFERRED_FLUSH = 0x01,
250 BINDER_DEFERRED_RELEASE = 0x02,
251};
252
253enum {
254 BINDER_LOOPER_STATE_REGISTERED = 0x01,
255 BINDER_LOOPER_STATE_ENTERED = 0x02,
256 BINDER_LOOPER_STATE_EXITED = 0x04,
257 BINDER_LOOPER_STATE_INVALID = 0x08,
258 BINDER_LOOPER_STATE_WAITING = 0x10,
259 BINDER_LOOPER_STATE_POLL = 0x20,
260};
261
262/**
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
265 *
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
268 */
269#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270static void
271_binder_proc_lock(struct binder_proc *proc, int line)
272 __acquires(&proc->outer_lock)
273{
274 binder_debug(BINDER_DEBUG_SPINLOCKS,
275 "%s: line=%d\n", __func__, line);
276 spin_lock(&proc->outer_lock);
277}
278
279/**
280 * binder_proc_unlock() - Release spinlock for given binder_proc
281 * @proc: struct binder_proc to acquire
282 *
283 * Release lock acquired via binder_proc_lock()
284 */
285#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286static void
287_binder_proc_unlock(struct binder_proc *proc, int line)
288 __releases(&proc->outer_lock)
289{
290 binder_debug(BINDER_DEBUG_SPINLOCKS,
291 "%s: line=%d\n", __func__, line);
292 spin_unlock(&proc->outer_lock);
293}
294
295/**
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
298 *
299 * Acquires proc->inner_lock. Used to protect todo lists
300 */
301#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302static void
303_binder_inner_proc_lock(struct binder_proc *proc, int line)
304 __acquires(&proc->inner_lock)
305{
306 binder_debug(BINDER_DEBUG_SPINLOCKS,
307 "%s: line=%d\n", __func__, line);
308 spin_lock(&proc->inner_lock);
309}
310
311/**
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
314 *
315 * Release lock acquired via binder_inner_proc_lock()
316 */
317#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318static void
319_binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 __releases(&proc->inner_lock)
321{
322 binder_debug(BINDER_DEBUG_SPINLOCKS,
323 "%s: line=%d\n", __func__, line);
324 spin_unlock(&proc->inner_lock);
325}
326
327/**
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
330 *
331 * Acquires node->lock. Used to protect binder_node fields
332 */
333#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334static void
335_binder_node_lock(struct binder_node *node, int line)
336 __acquires(&node->lock)
337{
338 binder_debug(BINDER_DEBUG_SPINLOCKS,
339 "%s: line=%d\n", __func__, line);
340 spin_lock(&node->lock);
341}
342
343/**
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
346 *
347 * Release lock acquired via binder_node_lock()
348 */
349#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350static void
351_binder_node_unlock(struct binder_node *node, int line)
352 __releases(&node->lock)
353{
354 binder_debug(BINDER_DEBUG_SPINLOCKS,
355 "%s: line=%d\n", __func__, line);
356 spin_unlock(&node->lock);
357}
358
359/**
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
362 *
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
365 */
366#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367static void
368_binder_node_inner_lock(struct binder_node *node, int line)
369 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
370{
371 binder_debug(BINDER_DEBUG_SPINLOCKS,
372 "%s: line=%d\n", __func__, line);
373 spin_lock(&node->lock);
374 if (node->proc)
375 binder_inner_proc_lock(node->proc);
376 else
377 /* annotation for sparse */
378 __acquire(&node->proc->inner_lock);
379}
380
381/**
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
384 *
385 * Release lock acquired via binder_node_lock()
386 */
387#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388static void
389_binder_node_inner_unlock(struct binder_node *node, int line)
390 __releases(&node->lock) __releases(&node->proc->inner_lock)
391{
392 struct binder_proc *proc = node->proc;
393
394 binder_debug(BINDER_DEBUG_SPINLOCKS,
395 "%s: line=%d\n", __func__, line);
396 if (proc)
397 binder_inner_proc_unlock(proc);
398 else
399 /* annotation for sparse */
400 __release(&node->proc->inner_lock);
401 spin_unlock(&node->lock);
402}
403
404static bool binder_worklist_empty_ilocked(struct list_head *list)
405{
406 return list_empty(list);
407}
408
409/**
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
413 *
414 * Return: true if there are no items on list, else false
415 */
416static bool binder_worklist_empty(struct binder_proc *proc,
417 struct list_head *list)
418{
419 bool ret;
420
421 binder_inner_proc_lock(proc);
422 ret = binder_worklist_empty_ilocked(list);
423 binder_inner_proc_unlock(proc);
424 return ret;
425}
426
427/**
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
431 *
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
434 *
435 * Requires the proc->inner_lock to be held.
436 */
437static void
438binder_enqueue_work_ilocked(struct binder_work *work,
439 struct list_head *target_list)
440{
441 BUG_ON(target_list == NULL);
442 BUG_ON(work->entry.next && !list_empty(&work->entry));
443 list_add_tail(&work->entry, target_list);
444}
445
446/**
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
450 *
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
454 *
455 * Requires the proc->inner_lock to be held.
456 */
457static void
458binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 struct binder_work *work)
460{
461 WARN_ON(!list_empty(&thread->waiting_thread_node));
462 binder_enqueue_work_ilocked(work, &thread->todo);
463}
464
465/**
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
469 *
470 * Adds the work to the todo list of the thread, and enables processing
471 * of the todo queue.
472 *
473 * Requires the proc->inner_lock to be held.
474 */
475static void
476binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 struct binder_work *work)
478{
479 WARN_ON(!list_empty(&thread->waiting_thread_node));
480 binder_enqueue_work_ilocked(work, &thread->todo);
481
482 /* (e)poll-based threads require an explicit wakeup signal when
483 * queuing their own work; they rely on these events to consume
484 * messages without I/O block. Without it, threads risk waiting
485 * indefinitely without handling the work.
486 */
487 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 thread->pid == current->pid && !thread->process_todo)
489 wake_up_interruptible_sync(&thread->wait);
490
491 thread->process_todo = true;
492}
493
494/**
495 * binder_enqueue_thread_work() - Add an item to the thread work list
496 * @thread: thread to queue work to
497 * @work: struct binder_work to add to list
498 *
499 * Adds the work to the todo list of the thread, and enables processing
500 * of the todo queue.
501 */
502static void
503binder_enqueue_thread_work(struct binder_thread *thread,
504 struct binder_work *work)
505{
506 binder_inner_proc_lock(thread->proc);
507 binder_enqueue_thread_work_ilocked(thread, work);
508 binder_inner_proc_unlock(thread->proc);
509}
510
511static void
512binder_dequeue_work_ilocked(struct binder_work *work)
513{
514 list_del_init(&work->entry);
515}
516
517/**
518 * binder_dequeue_work() - Removes an item from the work list
519 * @proc: binder_proc associated with list
520 * @work: struct binder_work to remove from list
521 *
522 * Removes the specified work item from whatever list it is on.
523 * Can safely be called if work is not on any list.
524 */
525static void
526binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
527{
528 binder_inner_proc_lock(proc);
529 binder_dequeue_work_ilocked(work);
530 binder_inner_proc_unlock(proc);
531}
532
533static struct binder_work *binder_dequeue_work_head_ilocked(
534 struct list_head *list)
535{
536 struct binder_work *w;
537
538 w = list_first_entry_or_null(list, struct binder_work, entry);
539 if (w)
540 list_del_init(&w->entry);
541 return w;
542}
543
544static void
545binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546static void binder_free_thread(struct binder_thread *thread);
547static void binder_free_proc(struct binder_proc *proc);
548static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
549
550static bool binder_has_work_ilocked(struct binder_thread *thread,
551 bool do_proc_work)
552{
553 return thread->process_todo ||
554 thread->looper_need_return ||
555 (do_proc_work &&
556 !binder_worklist_empty_ilocked(&thread->proc->todo));
557}
558
559static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
560{
561 bool has_work;
562
563 binder_inner_proc_lock(thread->proc);
564 has_work = binder_has_work_ilocked(thread, do_proc_work);
565 binder_inner_proc_unlock(thread->proc);
566
567 return has_work;
568}
569
570static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
571{
572 return !thread->transaction_stack &&
573 binder_worklist_empty_ilocked(&thread->todo) &&
574 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
575 BINDER_LOOPER_STATE_REGISTERED));
576}
577
578static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
579 bool sync)
580{
581 struct rb_node *n;
582 struct binder_thread *thread;
583
584 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
585 thread = rb_entry(n, struct binder_thread, rb_node);
586 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
587 binder_available_for_proc_work_ilocked(thread)) {
588 if (sync)
589 wake_up_interruptible_sync(&thread->wait);
590 else
591 wake_up_interruptible(&thread->wait);
592 }
593 }
594}
595
596/**
597 * binder_select_thread_ilocked() - selects a thread for doing proc work.
598 * @proc: process to select a thread from
599 *
600 * Note that calling this function moves the thread off the waiting_threads
601 * list, so it can only be woken up by the caller of this function, or a
602 * signal. Therefore, callers *should* always wake up the thread this function
603 * returns.
604 *
605 * Return: If there's a thread currently waiting for process work,
606 * returns that thread. Otherwise returns NULL.
607 */
608static struct binder_thread *
609binder_select_thread_ilocked(struct binder_proc *proc)
610{
611 struct binder_thread *thread;
612
613 assert_spin_locked(&proc->inner_lock);
614 thread = list_first_entry_or_null(&proc->waiting_threads,
615 struct binder_thread,
616 waiting_thread_node);
617
618 if (thread)
619 list_del_init(&thread->waiting_thread_node);
620
621 return thread;
622}
623
624/**
625 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
626 * @proc: process to wake up a thread in
627 * @thread: specific thread to wake-up (may be NULL)
628 * @sync: whether to do a synchronous wake-up
629 *
630 * This function wakes up a thread in the @proc process.
631 * The caller may provide a specific thread to wake-up in
632 * the @thread parameter. If @thread is NULL, this function
633 * will wake up threads that have called poll().
634 *
635 * Note that for this function to work as expected, callers
636 * should first call binder_select_thread() to find a thread
637 * to handle the work (if they don't have a thread already),
638 * and pass the result into the @thread parameter.
639 */
640static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
641 struct binder_thread *thread,
642 bool sync)
643{
644 assert_spin_locked(&proc->inner_lock);
645
646 if (thread) {
647 if (sync)
648 wake_up_interruptible_sync(&thread->wait);
649 else
650 wake_up_interruptible(&thread->wait);
651 return;
652 }
653
654 /* Didn't find a thread waiting for proc work; this can happen
655 * in two scenarios:
656 * 1. All threads are busy handling transactions
657 * In that case, one of those threads should call back into
658 * the kernel driver soon and pick up this work.
659 * 2. Threads are using the (e)poll interface, in which case
660 * they may be blocked on the waitqueue without having been
661 * added to waiting_threads. For this case, we just iterate
662 * over all threads not handling transaction work, and
663 * wake them all up. We wake all because we don't know whether
664 * a thread that called into (e)poll is handling non-binder
665 * work currently.
666 */
667 binder_wakeup_poll_threads_ilocked(proc, sync);
668}
669
670static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
671{
672 struct binder_thread *thread = binder_select_thread_ilocked(proc);
673
674 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
675}
676
677static void binder_set_nice(long nice)
678{
679 long min_nice;
680
681 if (can_nice(current, nice)) {
682 set_user_nice(current, nice);
683 return;
684 }
685 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
686 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
687 "%d: nice value %ld not allowed use %ld instead\n",
688 current->pid, nice, min_nice);
689 set_user_nice(current, min_nice);
690 if (min_nice <= MAX_NICE)
691 return;
692 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
693}
694
695static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
696 binder_uintptr_t ptr)
697{
698 struct rb_node *n = proc->nodes.rb_node;
699 struct binder_node *node;
700
701 assert_spin_locked(&proc->inner_lock);
702
703 while (n) {
704 node = rb_entry(n, struct binder_node, rb_node);
705
706 if (ptr < node->ptr)
707 n = n->rb_left;
708 else if (ptr > node->ptr)
709 n = n->rb_right;
710 else {
711 /*
712 * take an implicit weak reference
713 * to ensure node stays alive until
714 * call to binder_put_node()
715 */
716 binder_inc_node_tmpref_ilocked(node);
717 return node;
718 }
719 }
720 return NULL;
721}
722
723static struct binder_node *binder_get_node(struct binder_proc *proc,
724 binder_uintptr_t ptr)
725{
726 struct binder_node *node;
727
728 binder_inner_proc_lock(proc);
729 node = binder_get_node_ilocked(proc, ptr);
730 binder_inner_proc_unlock(proc);
731 return node;
732}
733
734static struct binder_node *binder_init_node_ilocked(
735 struct binder_proc *proc,
736 struct binder_node *new_node,
737 struct flat_binder_object *fp)
738{
739 struct rb_node **p = &proc->nodes.rb_node;
740 struct rb_node *parent = NULL;
741 struct binder_node *node;
742 binder_uintptr_t ptr = fp ? fp->binder : 0;
743 binder_uintptr_t cookie = fp ? fp->cookie : 0;
744 __u32 flags = fp ? fp->flags : 0;
745
746 assert_spin_locked(&proc->inner_lock);
747
748 while (*p) {
749
750 parent = *p;
751 node = rb_entry(parent, struct binder_node, rb_node);
752
753 if (ptr < node->ptr)
754 p = &(*p)->rb_left;
755 else if (ptr > node->ptr)
756 p = &(*p)->rb_right;
757 else {
758 /*
759 * A matching node is already in
760 * the rb tree. Abandon the init
761 * and return it.
762 */
763 binder_inc_node_tmpref_ilocked(node);
764 return node;
765 }
766 }
767 node = new_node;
768 binder_stats_created(BINDER_STAT_NODE);
769 node->tmp_refs++;
770 rb_link_node(&node->rb_node, parent, p);
771 rb_insert_color(&node->rb_node, &proc->nodes);
772 node->debug_id = atomic_inc_return(&binder_last_id);
773 node->proc = proc;
774 node->ptr = ptr;
775 node->cookie = cookie;
776 node->work.type = BINDER_WORK_NODE;
777 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
778 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
779 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
780 spin_lock_init(&node->lock);
781 INIT_LIST_HEAD(&node->work.entry);
782 INIT_LIST_HEAD(&node->async_todo);
783 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
784 "%d:%d node %d u%016llx c%016llx created\n",
785 proc->pid, current->pid, node->debug_id,
786 (u64)node->ptr, (u64)node->cookie);
787
788 return node;
789}
790
791static struct binder_node *binder_new_node(struct binder_proc *proc,
792 struct flat_binder_object *fp)
793{
794 struct binder_node *node;
795 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
796
797 if (!new_node)
798 return NULL;
799 binder_inner_proc_lock(proc);
800 node = binder_init_node_ilocked(proc, new_node, fp);
801 binder_inner_proc_unlock(proc);
802 if (node != new_node)
803 /*
804 * The node was already added by another thread
805 */
806 kfree(new_node);
807
808 return node;
809}
810
811static void binder_free_node(struct binder_node *node)
812{
813 kfree(node);
814 binder_stats_deleted(BINDER_STAT_NODE);
815}
816
817static int binder_inc_node_nilocked(struct binder_node *node, int strong,
818 int internal,
819 struct list_head *target_list)
820{
821 struct binder_proc *proc = node->proc;
822
823 assert_spin_locked(&node->lock);
824 if (proc)
825 assert_spin_locked(&proc->inner_lock);
826 if (strong) {
827 if (internal) {
828 if (target_list == NULL &&
829 node->internal_strong_refs == 0 &&
830 !(node->proc &&
831 node == node->proc->context->binder_context_mgr_node &&
832 node->has_strong_ref)) {
833 pr_err("invalid inc strong node for %d\n",
834 node->debug_id);
835 return -EINVAL;
836 }
837 node->internal_strong_refs++;
838 } else
839 node->local_strong_refs++;
840 if (!node->has_strong_ref && target_list) {
841 struct binder_thread *thread = container_of(target_list,
842 struct binder_thread, todo);
843 binder_dequeue_work_ilocked(&node->work);
844 BUG_ON(&thread->todo != target_list);
845 binder_enqueue_deferred_thread_work_ilocked(thread,
846 &node->work);
847 }
848 } else {
849 if (!internal)
850 node->local_weak_refs++;
851 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
852 if (target_list == NULL) {
853 pr_err("invalid inc weak node for %d\n",
854 node->debug_id);
855 return -EINVAL;
856 }
857 /*
858 * See comment above
859 */
860 binder_enqueue_work_ilocked(&node->work, target_list);
861 }
862 }
863 return 0;
864}
865
866static int binder_inc_node(struct binder_node *node, int strong, int internal,
867 struct list_head *target_list)
868{
869 int ret;
870
871 binder_node_inner_lock(node);
872 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
873 binder_node_inner_unlock(node);
874
875 return ret;
876}
877
878static bool binder_dec_node_nilocked(struct binder_node *node,
879 int strong, int internal)
880{
881 struct binder_proc *proc = node->proc;
882
883 assert_spin_locked(&node->lock);
884 if (proc)
885 assert_spin_locked(&proc->inner_lock);
886 if (strong) {
887 if (internal)
888 node->internal_strong_refs--;
889 else
890 node->local_strong_refs--;
891 if (node->local_strong_refs || node->internal_strong_refs)
892 return false;
893 } else {
894 if (!internal)
895 node->local_weak_refs--;
896 if (node->local_weak_refs || node->tmp_refs ||
897 !hlist_empty(&node->refs))
898 return false;
899 }
900
901 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
902 if (list_empty(&node->work.entry)) {
903 binder_enqueue_work_ilocked(&node->work, &proc->todo);
904 binder_wakeup_proc_ilocked(proc);
905 }
906 } else {
907 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
908 !node->local_weak_refs && !node->tmp_refs) {
909 if (proc) {
910 binder_dequeue_work_ilocked(&node->work);
911 rb_erase(&node->rb_node, &proc->nodes);
912 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
913 "refless node %d deleted\n",
914 node->debug_id);
915 } else {
916 BUG_ON(!list_empty(&node->work.entry));
917 spin_lock(&binder_dead_nodes_lock);
918 /*
919 * tmp_refs could have changed so
920 * check it again
921 */
922 if (node->tmp_refs) {
923 spin_unlock(&binder_dead_nodes_lock);
924 return false;
925 }
926 hlist_del(&node->dead_node);
927 spin_unlock(&binder_dead_nodes_lock);
928 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
929 "dead node %d deleted\n",
930 node->debug_id);
931 }
932 return true;
933 }
934 }
935 return false;
936}
937
938static void binder_dec_node(struct binder_node *node, int strong, int internal)
939{
940 bool free_node;
941
942 binder_node_inner_lock(node);
943 free_node = binder_dec_node_nilocked(node, strong, internal);
944 binder_node_inner_unlock(node);
945 if (free_node)
946 binder_free_node(node);
947}
948
949static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
950{
951 /*
952 * No call to binder_inc_node() is needed since we
953 * don't need to inform userspace of any changes to
954 * tmp_refs
955 */
956 node->tmp_refs++;
957}
958
959/**
960 * binder_inc_node_tmpref() - take a temporary reference on node
961 * @node: node to reference
962 *
963 * Take reference on node to prevent the node from being freed
964 * while referenced only by a local variable. The inner lock is
965 * needed to serialize with the node work on the queue (which
966 * isn't needed after the node is dead). If the node is dead
967 * (node->proc is NULL), use binder_dead_nodes_lock to protect
968 * node->tmp_refs against dead-node-only cases where the node
969 * lock cannot be acquired (eg traversing the dead node list to
970 * print nodes)
971 */
972static void binder_inc_node_tmpref(struct binder_node *node)
973{
974 binder_node_lock(node);
975 if (node->proc)
976 binder_inner_proc_lock(node->proc);
977 else
978 spin_lock(&binder_dead_nodes_lock);
979 binder_inc_node_tmpref_ilocked(node);
980 if (node->proc)
981 binder_inner_proc_unlock(node->proc);
982 else
983 spin_unlock(&binder_dead_nodes_lock);
984 binder_node_unlock(node);
985}
986
987/**
988 * binder_dec_node_tmpref() - remove a temporary reference on node
989 * @node: node to reference
990 *
991 * Release temporary reference on node taken via binder_inc_node_tmpref()
992 */
993static void binder_dec_node_tmpref(struct binder_node *node)
994{
995 bool free_node;
996
997 binder_node_inner_lock(node);
998 if (!node->proc)
999 spin_lock(&binder_dead_nodes_lock);
1000 else
1001 __acquire(&binder_dead_nodes_lock);
1002 node->tmp_refs--;
1003 BUG_ON(node->tmp_refs < 0);
1004 if (!node->proc)
1005 spin_unlock(&binder_dead_nodes_lock);
1006 else
1007 __release(&binder_dead_nodes_lock);
1008 /*
1009 * Call binder_dec_node() to check if all refcounts are 0
1010 * and cleanup is needed. Calling with strong=0 and internal=1
1011 * causes no actual reference to be released in binder_dec_node().
1012 * If that changes, a change is needed here too.
1013 */
1014 free_node = binder_dec_node_nilocked(node, 0, 1);
1015 binder_node_inner_unlock(node);
1016 if (free_node)
1017 binder_free_node(node);
1018}
1019
1020static void binder_put_node(struct binder_node *node)
1021{
1022 binder_dec_node_tmpref(node);
1023}
1024
1025static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1026 u32 desc, bool need_strong_ref)
1027{
1028 struct rb_node *n = proc->refs_by_desc.rb_node;
1029 struct binder_ref *ref;
1030
1031 while (n) {
1032 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1033
1034 if (desc < ref->data.desc) {
1035 n = n->rb_left;
1036 } else if (desc > ref->data.desc) {
1037 n = n->rb_right;
1038 } else if (need_strong_ref && !ref->data.strong) {
1039 binder_user_error("tried to use weak ref as strong ref\n");
1040 return NULL;
1041 } else {
1042 return ref;
1043 }
1044 }
1045 return NULL;
1046}
1047
1048/**
1049 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1050 * @proc: binder_proc that owns the ref
1051 * @node: binder_node of target
1052 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1053 *
1054 * Look up the ref for the given node and return it if it exists
1055 *
1056 * If it doesn't exist and the caller provides a newly allocated
1057 * ref, initialize the fields of the newly allocated ref and insert
1058 * into the given proc rb_trees and node refs list.
1059 *
1060 * Return: the ref for node. It is possible that another thread
1061 * allocated/initialized the ref first in which case the
1062 * returned ref would be different than the passed-in
1063 * new_ref. new_ref must be kfree'd by the caller in
1064 * this case.
1065 */
1066static struct binder_ref *binder_get_ref_for_node_olocked(
1067 struct binder_proc *proc,
1068 struct binder_node *node,
1069 struct binder_ref *new_ref)
1070{
1071 struct binder_context *context = proc->context;
1072 struct rb_node **p = &proc->refs_by_node.rb_node;
1073 struct rb_node *parent = NULL;
1074 struct binder_ref *ref;
1075 struct rb_node *n;
1076
1077 while (*p) {
1078 parent = *p;
1079 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1080
1081 if (node < ref->node)
1082 p = &(*p)->rb_left;
1083 else if (node > ref->node)
1084 p = &(*p)->rb_right;
1085 else
1086 return ref;
1087 }
1088 if (!new_ref)
1089 return NULL;
1090
1091 binder_stats_created(BINDER_STAT_REF);
1092 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1093 new_ref->proc = proc;
1094 new_ref->node = node;
1095 rb_link_node(&new_ref->rb_node_node, parent, p);
1096 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1097
1098 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1099 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1100 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1101 if (ref->data.desc > new_ref->data.desc)
1102 break;
1103 new_ref->data.desc = ref->data.desc + 1;
1104 }
1105
1106 p = &proc->refs_by_desc.rb_node;
1107 while (*p) {
1108 parent = *p;
1109 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1110
1111 if (new_ref->data.desc < ref->data.desc)
1112 p = &(*p)->rb_left;
1113 else if (new_ref->data.desc > ref->data.desc)
1114 p = &(*p)->rb_right;
1115 else
1116 BUG();
1117 }
1118 rb_link_node(&new_ref->rb_node_desc, parent, p);
1119 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1120
1121 binder_node_lock(node);
1122 hlist_add_head(&new_ref->node_entry, &node->refs);
1123
1124 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1125 "%d new ref %d desc %d for node %d\n",
1126 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1127 node->debug_id);
1128 binder_node_unlock(node);
1129 return new_ref;
1130}
1131
1132static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1133{
1134 bool delete_node = false;
1135
1136 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1137 "%d delete ref %d desc %d for node %d\n",
1138 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1139 ref->node->debug_id);
1140
1141 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1142 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1143
1144 binder_node_inner_lock(ref->node);
1145 if (ref->data.strong)
1146 binder_dec_node_nilocked(ref->node, 1, 1);
1147
1148 hlist_del(&ref->node_entry);
1149 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1150 binder_node_inner_unlock(ref->node);
1151 /*
1152 * Clear ref->node unless we want the caller to free the node
1153 */
1154 if (!delete_node) {
1155 /*
1156 * The caller uses ref->node to determine
1157 * whether the node needs to be freed. Clear
1158 * it since the node is still alive.
1159 */
1160 ref->node = NULL;
1161 }
1162
1163 if (ref->death) {
1164 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1165 "%d delete ref %d desc %d has death notification\n",
1166 ref->proc->pid, ref->data.debug_id,
1167 ref->data.desc);
1168 binder_dequeue_work(ref->proc, &ref->death->work);
1169 binder_stats_deleted(BINDER_STAT_DEATH);
1170 }
1171 binder_stats_deleted(BINDER_STAT_REF);
1172}
1173
1174/**
1175 * binder_inc_ref_olocked() - increment the ref for given handle
1176 * @ref: ref to be incremented
1177 * @strong: if true, strong increment, else weak
1178 * @target_list: list to queue node work on
1179 *
1180 * Increment the ref. @ref->proc->outer_lock must be held on entry
1181 *
1182 * Return: 0, if successful, else errno
1183 */
1184static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1185 struct list_head *target_list)
1186{
1187 int ret;
1188
1189 if (strong) {
1190 if (ref->data.strong == 0) {
1191 ret = binder_inc_node(ref->node, 1, 1, target_list);
1192 if (ret)
1193 return ret;
1194 }
1195 ref->data.strong++;
1196 } else {
1197 if (ref->data.weak == 0) {
1198 ret = binder_inc_node(ref->node, 0, 1, target_list);
1199 if (ret)
1200 return ret;
1201 }
1202 ref->data.weak++;
1203 }
1204 return 0;
1205}
1206
1207/**
1208 * binder_dec_ref_olocked() - dec the ref for given handle
1209 * @ref: ref to be decremented
1210 * @strong: if true, strong decrement, else weak
1211 *
1212 * Decrement the ref.
1213 *
1214 * Return: %true if ref is cleaned up and ready to be freed.
1215 */
1216static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1217{
1218 if (strong) {
1219 if (ref->data.strong == 0) {
1220 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1221 ref->proc->pid, ref->data.debug_id,
1222 ref->data.desc, ref->data.strong,
1223 ref->data.weak);
1224 return false;
1225 }
1226 ref->data.strong--;
1227 if (ref->data.strong == 0)
1228 binder_dec_node(ref->node, strong, 1);
1229 } else {
1230 if (ref->data.weak == 0) {
1231 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1232 ref->proc->pid, ref->data.debug_id,
1233 ref->data.desc, ref->data.strong,
1234 ref->data.weak);
1235 return false;
1236 }
1237 ref->data.weak--;
1238 }
1239 if (ref->data.strong == 0 && ref->data.weak == 0) {
1240 binder_cleanup_ref_olocked(ref);
1241 return true;
1242 }
1243 return false;
1244}
1245
1246/**
1247 * binder_get_node_from_ref() - get the node from the given proc/desc
1248 * @proc: proc containing the ref
1249 * @desc: the handle associated with the ref
1250 * @need_strong_ref: if true, only return node if ref is strong
1251 * @rdata: the id/refcount data for the ref
1252 *
1253 * Given a proc and ref handle, return the associated binder_node
1254 *
1255 * Return: a binder_node or NULL if not found or not strong when strong required
1256 */
1257static struct binder_node *binder_get_node_from_ref(
1258 struct binder_proc *proc,
1259 u32 desc, bool need_strong_ref,
1260 struct binder_ref_data *rdata)
1261{
1262 struct binder_node *node;
1263 struct binder_ref *ref;
1264
1265 binder_proc_lock(proc);
1266 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1267 if (!ref)
1268 goto err_no_ref;
1269 node = ref->node;
1270 /*
1271 * Take an implicit reference on the node to ensure
1272 * it stays alive until the call to binder_put_node()
1273 */
1274 binder_inc_node_tmpref(node);
1275 if (rdata)
1276 *rdata = ref->data;
1277 binder_proc_unlock(proc);
1278
1279 return node;
1280
1281err_no_ref:
1282 binder_proc_unlock(proc);
1283 return NULL;
1284}
1285
1286/**
1287 * binder_free_ref() - free the binder_ref
1288 * @ref: ref to free
1289 *
1290 * Free the binder_ref. Free the binder_node indicated by ref->node
1291 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1292 */
1293static void binder_free_ref(struct binder_ref *ref)
1294{
1295 if (ref->node)
1296 binder_free_node(ref->node);
1297 kfree(ref->death);
1298 kfree(ref);
1299}
1300
1301/**
1302 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1303 * @proc: proc containing the ref
1304 * @desc: the handle associated with the ref
1305 * @increment: true=inc reference, false=dec reference
1306 * @strong: true=strong reference, false=weak reference
1307 * @rdata: the id/refcount data for the ref
1308 *
1309 * Given a proc and ref handle, increment or decrement the ref
1310 * according to "increment" arg.
1311 *
1312 * Return: 0 if successful, else errno
1313 */
1314static int binder_update_ref_for_handle(struct binder_proc *proc,
1315 uint32_t desc, bool increment, bool strong,
1316 struct binder_ref_data *rdata)
1317{
1318 int ret = 0;
1319 struct binder_ref *ref;
1320 bool delete_ref = false;
1321
1322 binder_proc_lock(proc);
1323 ref = binder_get_ref_olocked(proc, desc, strong);
1324 if (!ref) {
1325 ret = -EINVAL;
1326 goto err_no_ref;
1327 }
1328 if (increment)
1329 ret = binder_inc_ref_olocked(ref, strong, NULL);
1330 else
1331 delete_ref = binder_dec_ref_olocked(ref, strong);
1332
1333 if (rdata)
1334 *rdata = ref->data;
1335 binder_proc_unlock(proc);
1336
1337 if (delete_ref)
1338 binder_free_ref(ref);
1339 return ret;
1340
1341err_no_ref:
1342 binder_proc_unlock(proc);
1343 return ret;
1344}
1345
1346/**
1347 * binder_dec_ref_for_handle() - dec the ref for given handle
1348 * @proc: proc containing the ref
1349 * @desc: the handle associated with the ref
1350 * @strong: true=strong reference, false=weak reference
1351 * @rdata: the id/refcount data for the ref
1352 *
1353 * Just calls binder_update_ref_for_handle() to decrement the ref.
1354 *
1355 * Return: 0 if successful, else errno
1356 */
1357static int binder_dec_ref_for_handle(struct binder_proc *proc,
1358 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1359{
1360 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1361}
1362
1363
1364/**
1365 * binder_inc_ref_for_node() - increment the ref for given proc/node
1366 * @proc: proc containing the ref
1367 * @node: target node
1368 * @strong: true=strong reference, false=weak reference
1369 * @target_list: worklist to use if node is incremented
1370 * @rdata: the id/refcount data for the ref
1371 *
1372 * Given a proc and node, increment the ref. Create the ref if it
1373 * doesn't already exist
1374 *
1375 * Return: 0 if successful, else errno
1376 */
1377static int binder_inc_ref_for_node(struct binder_proc *proc,
1378 struct binder_node *node,
1379 bool strong,
1380 struct list_head *target_list,
1381 struct binder_ref_data *rdata)
1382{
1383 struct binder_ref *ref;
1384 struct binder_ref *new_ref = NULL;
1385 int ret = 0;
1386
1387 binder_proc_lock(proc);
1388 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1389 if (!ref) {
1390 binder_proc_unlock(proc);
1391 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1392 if (!new_ref)
1393 return -ENOMEM;
1394 binder_proc_lock(proc);
1395 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1396 }
1397 ret = binder_inc_ref_olocked(ref, strong, target_list);
1398 *rdata = ref->data;
1399 if (ret && ref == new_ref) {
1400 /*
1401 * Cleanup the failed reference here as the target
1402 * could now be dead and have already released its
1403 * references by now. Calling on the new reference
1404 * with strong=0 and a tmp_refs will not decrement
1405 * the node. The new_ref gets kfree'd below.
1406 */
1407 binder_cleanup_ref_olocked(new_ref);
1408 ref = NULL;
1409 }
1410
1411 binder_proc_unlock(proc);
1412 if (new_ref && ref != new_ref)
1413 /*
1414 * Another thread created the ref first so
1415 * free the one we allocated
1416 */
1417 kfree(new_ref);
1418 return ret;
1419}
1420
1421static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1422 struct binder_transaction *t)
1423{
1424 BUG_ON(!target_thread);
1425 assert_spin_locked(&target_thread->proc->inner_lock);
1426 BUG_ON(target_thread->transaction_stack != t);
1427 BUG_ON(target_thread->transaction_stack->from != target_thread);
1428 target_thread->transaction_stack =
1429 target_thread->transaction_stack->from_parent;
1430 t->from = NULL;
1431}
1432
1433/**
1434 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1435 * @thread: thread to decrement
1436 *
1437 * A thread needs to be kept alive while being used to create or
1438 * handle a transaction. binder_get_txn_from() is used to safely
1439 * extract t->from from a binder_transaction and keep the thread
1440 * indicated by t->from from being freed. When done with that
1441 * binder_thread, this function is called to decrement the
1442 * tmp_ref and free if appropriate (thread has been released
1443 * and no transaction being processed by the driver)
1444 */
1445static void binder_thread_dec_tmpref(struct binder_thread *thread)
1446{
1447 /*
1448 * atomic is used to protect the counter value while
1449 * it cannot reach zero or thread->is_dead is false
1450 */
1451 binder_inner_proc_lock(thread->proc);
1452 atomic_dec(&thread->tmp_ref);
1453 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1454 binder_inner_proc_unlock(thread->proc);
1455 binder_free_thread(thread);
1456 return;
1457 }
1458 binder_inner_proc_unlock(thread->proc);
1459}
1460
1461/**
1462 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1463 * @proc: proc to decrement
1464 *
1465 * A binder_proc needs to be kept alive while being used to create or
1466 * handle a transaction. proc->tmp_ref is incremented when
1467 * creating a new transaction or the binder_proc is currently in-use
1468 * by threads that are being released. When done with the binder_proc,
1469 * this function is called to decrement the counter and free the
1470 * proc if appropriate (proc has been released, all threads have
1471 * been released and not currenly in-use to process a transaction).
1472 */
1473static void binder_proc_dec_tmpref(struct binder_proc *proc)
1474{
1475 binder_inner_proc_lock(proc);
1476 proc->tmp_ref--;
1477 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1478 !proc->tmp_ref) {
1479 binder_inner_proc_unlock(proc);
1480 binder_free_proc(proc);
1481 return;
1482 }
1483 binder_inner_proc_unlock(proc);
1484}
1485
1486/**
1487 * binder_get_txn_from() - safely extract the "from" thread in transaction
1488 * @t: binder transaction for t->from
1489 *
1490 * Atomically return the "from" thread and increment the tmp_ref
1491 * count for the thread to ensure it stays alive until
1492 * binder_thread_dec_tmpref() is called.
1493 *
1494 * Return: the value of t->from
1495 */
1496static struct binder_thread *binder_get_txn_from(
1497 struct binder_transaction *t)
1498{
1499 struct binder_thread *from;
1500
1501 spin_lock(&t->lock);
1502 from = t->from;
1503 if (from)
1504 atomic_inc(&from->tmp_ref);
1505 spin_unlock(&t->lock);
1506 return from;
1507}
1508
1509/**
1510 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1511 * @t: binder transaction for t->from
1512 *
1513 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1514 * to guarantee that the thread cannot be released while operating on it.
1515 * The caller must call binder_inner_proc_unlock() to release the inner lock
1516 * as well as call binder_dec_thread_txn() to release the reference.
1517 *
1518 * Return: the value of t->from
1519 */
1520static struct binder_thread *binder_get_txn_from_and_acq_inner(
1521 struct binder_transaction *t)
1522 __acquires(&t->from->proc->inner_lock)
1523{
1524 struct binder_thread *from;
1525
1526 from = binder_get_txn_from(t);
1527 if (!from) {
1528 __acquire(&from->proc->inner_lock);
1529 return NULL;
1530 }
1531 binder_inner_proc_lock(from->proc);
1532 if (t->from) {
1533 BUG_ON(from != t->from);
1534 return from;
1535 }
1536 binder_inner_proc_unlock(from->proc);
1537 __acquire(&from->proc->inner_lock);
1538 binder_thread_dec_tmpref(from);
1539 return NULL;
1540}
1541
1542/**
1543 * binder_free_txn_fixups() - free unprocessed fd fixups
1544 * @t: binder transaction for t->from
1545 *
1546 * If the transaction is being torn down prior to being
1547 * processed by the target process, free all of the
1548 * fd fixups and fput the file structs. It is safe to
1549 * call this function after the fixups have been
1550 * processed -- in that case, the list will be empty.
1551 */
1552static void binder_free_txn_fixups(struct binder_transaction *t)
1553{
1554 struct binder_txn_fd_fixup *fixup, *tmp;
1555
1556 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1557 fput(fixup->file);
1558 if (fixup->target_fd >= 0)
1559 put_unused_fd(fixup->target_fd);
1560 list_del(&fixup->fixup_entry);
1561 kfree(fixup);
1562 }
1563}
1564
1565static void binder_txn_latency_free(struct binder_transaction *t)
1566{
1567 int from_proc, from_thread, to_proc, to_thread;
1568
1569 spin_lock(&t->lock);
1570 from_proc = t->from ? t->from->proc->pid : 0;
1571 from_thread = t->from ? t->from->pid : 0;
1572 to_proc = t->to_proc ? t->to_proc->pid : 0;
1573 to_thread = t->to_thread ? t->to_thread->pid : 0;
1574 spin_unlock(&t->lock);
1575
1576 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1577}
1578
1579static void binder_free_transaction(struct binder_transaction *t)
1580{
1581 struct binder_proc *target_proc = t->to_proc;
1582
1583 if (target_proc) {
1584 binder_inner_proc_lock(target_proc);
1585 target_proc->outstanding_txns--;
1586 if (target_proc->outstanding_txns < 0)
1587 pr_warn("%s: Unexpected outstanding_txns %d\n",
1588 __func__, target_proc->outstanding_txns);
1589 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1590 wake_up_interruptible_all(&target_proc->freeze_wait);
1591 if (t->buffer)
1592 t->buffer->transaction = NULL;
1593 binder_inner_proc_unlock(target_proc);
1594 }
1595 if (trace_binder_txn_latency_free_enabled())
1596 binder_txn_latency_free(t);
1597 /*
1598 * If the transaction has no target_proc, then
1599 * t->buffer->transaction has already been cleared.
1600 */
1601 binder_free_txn_fixups(t);
1602 kfree(t);
1603 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1604}
1605
1606static void binder_send_failed_reply(struct binder_transaction *t,
1607 uint32_t error_code)
1608{
1609 struct binder_thread *target_thread;
1610 struct binder_transaction *next;
1611
1612 BUG_ON(t->flags & TF_ONE_WAY);
1613 while (1) {
1614 target_thread = binder_get_txn_from_and_acq_inner(t);
1615 if (target_thread) {
1616 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1617 "send failed reply for transaction %d to %d:%d\n",
1618 t->debug_id,
1619 target_thread->proc->pid,
1620 target_thread->pid);
1621
1622 binder_pop_transaction_ilocked(target_thread, t);
1623 if (target_thread->reply_error.cmd == BR_OK) {
1624 target_thread->reply_error.cmd = error_code;
1625 binder_enqueue_thread_work_ilocked(
1626 target_thread,
1627 &target_thread->reply_error.work);
1628 wake_up_interruptible(&target_thread->wait);
1629 } else {
1630 /*
1631 * Cannot get here for normal operation, but
1632 * we can if multiple synchronous transactions
1633 * are sent without blocking for responses.
1634 * Just ignore the 2nd error in this case.
1635 */
1636 pr_warn("Unexpected reply error: %u\n",
1637 target_thread->reply_error.cmd);
1638 }
1639 binder_inner_proc_unlock(target_thread->proc);
1640 binder_thread_dec_tmpref(target_thread);
1641 binder_free_transaction(t);
1642 return;
1643 }
1644 __release(&target_thread->proc->inner_lock);
1645 next = t->from_parent;
1646
1647 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1648 "send failed reply for transaction %d, target dead\n",
1649 t->debug_id);
1650
1651 binder_free_transaction(t);
1652 if (next == NULL) {
1653 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1654 "reply failed, no target thread at root\n");
1655 return;
1656 }
1657 t = next;
1658 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1659 "reply failed, no target thread -- retry %d\n",
1660 t->debug_id);
1661 }
1662}
1663
1664/**
1665 * binder_cleanup_transaction() - cleans up undelivered transaction
1666 * @t: transaction that needs to be cleaned up
1667 * @reason: reason the transaction wasn't delivered
1668 * @error_code: error to return to caller (if synchronous call)
1669 */
1670static void binder_cleanup_transaction(struct binder_transaction *t,
1671 const char *reason,
1672 uint32_t error_code)
1673{
1674 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1675 binder_send_failed_reply(t, error_code);
1676 } else {
1677 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1678 "undelivered transaction %d, %s\n",
1679 t->debug_id, reason);
1680 binder_free_transaction(t);
1681 }
1682}
1683
1684/**
1685 * binder_get_object() - gets object and checks for valid metadata
1686 * @proc: binder_proc owning the buffer
1687 * @u: sender's user pointer to base of buffer
1688 * @buffer: binder_buffer that we're parsing.
1689 * @offset: offset in the @buffer at which to validate an object.
1690 * @object: struct binder_object to read into
1691 *
1692 * Copy the binder object at the given offset into @object. If @u is
1693 * provided then the copy is from the sender's buffer. If not, then
1694 * it is copied from the target's @buffer.
1695 *
1696 * Return: If there's a valid metadata object at @offset, the
1697 * size of that object. Otherwise, it returns zero. The object
1698 * is read into the struct binder_object pointed to by @object.
1699 */
1700static size_t binder_get_object(struct binder_proc *proc,
1701 const void __user *u,
1702 struct binder_buffer *buffer,
1703 unsigned long offset,
1704 struct binder_object *object)
1705{
1706 size_t read_size;
1707 struct binder_object_header *hdr;
1708 size_t object_size = 0;
1709
1710 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1711 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1712 return 0;
1713 if (u) {
1714 if (copy_from_user(object, u + offset, read_size))
1715 return 0;
1716 } else {
1717 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1718 offset, read_size))
1719 return 0;
1720 }
1721
1722 /* Ok, now see if we read a complete object. */
1723 hdr = &object->hdr;
1724 switch (hdr->type) {
1725 case BINDER_TYPE_BINDER:
1726 case BINDER_TYPE_WEAK_BINDER:
1727 case BINDER_TYPE_HANDLE:
1728 case BINDER_TYPE_WEAK_HANDLE:
1729 object_size = sizeof(struct flat_binder_object);
1730 break;
1731 case BINDER_TYPE_FD:
1732 object_size = sizeof(struct binder_fd_object);
1733 break;
1734 case BINDER_TYPE_PTR:
1735 object_size = sizeof(struct binder_buffer_object);
1736 break;
1737 case BINDER_TYPE_FDA:
1738 object_size = sizeof(struct binder_fd_array_object);
1739 break;
1740 default:
1741 return 0;
1742 }
1743 if (offset <= buffer->data_size - object_size &&
1744 buffer->data_size >= object_size)
1745 return object_size;
1746 else
1747 return 0;
1748}
1749
1750/**
1751 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1752 * @proc: binder_proc owning the buffer
1753 * @b: binder_buffer containing the object
1754 * @object: struct binder_object to read into
1755 * @index: index in offset array at which the binder_buffer_object is
1756 * located
1757 * @start_offset: points to the start of the offset array
1758 * @object_offsetp: offset of @object read from @b
1759 * @num_valid: the number of valid offsets in the offset array
1760 *
1761 * Return: If @index is within the valid range of the offset array
1762 * described by @start and @num_valid, and if there's a valid
1763 * binder_buffer_object at the offset found in index @index
1764 * of the offset array, that object is returned. Otherwise,
1765 * %NULL is returned.
1766 * Note that the offset found in index @index itself is not
1767 * verified; this function assumes that @num_valid elements
1768 * from @start were previously verified to have valid offsets.
1769 * If @object_offsetp is non-NULL, then the offset within
1770 * @b is written to it.
1771 */
1772static struct binder_buffer_object *binder_validate_ptr(
1773 struct binder_proc *proc,
1774 struct binder_buffer *b,
1775 struct binder_object *object,
1776 binder_size_t index,
1777 binder_size_t start_offset,
1778 binder_size_t *object_offsetp,
1779 binder_size_t num_valid)
1780{
1781 size_t object_size;
1782 binder_size_t object_offset;
1783 unsigned long buffer_offset;
1784
1785 if (index >= num_valid)
1786 return NULL;
1787
1788 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1789 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1790 b, buffer_offset,
1791 sizeof(object_offset)))
1792 return NULL;
1793 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1794 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1795 return NULL;
1796 if (object_offsetp)
1797 *object_offsetp = object_offset;
1798
1799 return &object->bbo;
1800}
1801
1802/**
1803 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1804 * @proc: binder_proc owning the buffer
1805 * @b: transaction buffer
1806 * @objects_start_offset: offset to start of objects buffer
1807 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1808 * @fixup_offset: start offset in @buffer to fix up
1809 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1810 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1811 *
1812 * Return: %true if a fixup in buffer @buffer at offset @offset is
1813 * allowed.
1814 *
1815 * For safety reasons, we only allow fixups inside a buffer to happen
1816 * at increasing offsets; additionally, we only allow fixup on the last
1817 * buffer object that was verified, or one of its parents.
1818 *
1819 * Example of what is allowed:
1820 *
1821 * A
1822 * B (parent = A, offset = 0)
1823 * C (parent = A, offset = 16)
1824 * D (parent = C, offset = 0)
1825 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1826 *
1827 * Examples of what is not allowed:
1828 *
1829 * Decreasing offsets within the same parent:
1830 * A
1831 * C (parent = A, offset = 16)
1832 * B (parent = A, offset = 0) // decreasing offset within A
1833 *
1834 * Referring to a parent that wasn't the last object or any of its parents:
1835 * A
1836 * B (parent = A, offset = 0)
1837 * C (parent = A, offset = 0)
1838 * C (parent = A, offset = 16)
1839 * D (parent = B, offset = 0) // B is not A or any of A's parents
1840 */
1841static bool binder_validate_fixup(struct binder_proc *proc,
1842 struct binder_buffer *b,
1843 binder_size_t objects_start_offset,
1844 binder_size_t buffer_obj_offset,
1845 binder_size_t fixup_offset,
1846 binder_size_t last_obj_offset,
1847 binder_size_t last_min_offset)
1848{
1849 if (!last_obj_offset) {
1850 /* Nothing to fix up in */
1851 return false;
1852 }
1853
1854 while (last_obj_offset != buffer_obj_offset) {
1855 unsigned long buffer_offset;
1856 struct binder_object last_object;
1857 struct binder_buffer_object *last_bbo;
1858 size_t object_size = binder_get_object(proc, NULL, b,
1859 last_obj_offset,
1860 &last_object);
1861 if (object_size != sizeof(*last_bbo))
1862 return false;
1863
1864 last_bbo = &last_object.bbo;
1865 /*
1866 * Safe to retrieve the parent of last_obj, since it
1867 * was already previously verified by the driver.
1868 */
1869 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1870 return false;
1871 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1872 buffer_offset = objects_start_offset +
1873 sizeof(binder_size_t) * last_bbo->parent;
1874 if (binder_alloc_copy_from_buffer(&proc->alloc,
1875 &last_obj_offset,
1876 b, buffer_offset,
1877 sizeof(last_obj_offset)))
1878 return false;
1879 }
1880 return (fixup_offset >= last_min_offset);
1881}
1882
1883/**
1884 * struct binder_task_work_cb - for deferred close
1885 *
1886 * @twork: callback_head for task work
1887 * @fd: fd to close
1888 *
1889 * Structure to pass task work to be handled after
1890 * returning from binder_ioctl() via task_work_add().
1891 */
1892struct binder_task_work_cb {
1893 struct callback_head twork;
1894 struct file *file;
1895};
1896
1897/**
1898 * binder_do_fd_close() - close list of file descriptors
1899 * @twork: callback head for task work
1900 *
1901 * It is not safe to call ksys_close() during the binder_ioctl()
1902 * function if there is a chance that binder's own file descriptor
1903 * might be closed. This is to meet the requirements for using
1904 * fdget() (see comments for __fget_light()). Therefore use
1905 * task_work_add() to schedule the close operation once we have
1906 * returned from binder_ioctl(). This function is a callback
1907 * for that mechanism and does the actual ksys_close() on the
1908 * given file descriptor.
1909 */
1910static void binder_do_fd_close(struct callback_head *twork)
1911{
1912 struct binder_task_work_cb *twcb = container_of(twork,
1913 struct binder_task_work_cb, twork);
1914
1915 fput(twcb->file);
1916 kfree(twcb);
1917}
1918
1919/**
1920 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1921 * @fd: file-descriptor to close
1922 *
1923 * See comments in binder_do_fd_close(). This function is used to schedule
1924 * a file-descriptor to be closed after returning from binder_ioctl().
1925 */
1926static void binder_deferred_fd_close(int fd)
1927{
1928 struct binder_task_work_cb *twcb;
1929
1930 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1931 if (!twcb)
1932 return;
1933 init_task_work(&twcb->twork, binder_do_fd_close);
1934 twcb->file = file_close_fd(fd);
1935 if (twcb->file) {
1936 // pin it until binder_do_fd_close(); see comments there
1937 get_file(twcb->file);
1938 filp_close(twcb->file, current->files);
1939 task_work_add(current, &twcb->twork, TWA_RESUME);
1940 } else {
1941 kfree(twcb);
1942 }
1943}
1944
1945static void binder_transaction_buffer_release(struct binder_proc *proc,
1946 struct binder_thread *thread,
1947 struct binder_buffer *buffer,
1948 binder_size_t off_end_offset,
1949 bool is_failure)
1950{
1951 int debug_id = buffer->debug_id;
1952 binder_size_t off_start_offset, buffer_offset;
1953
1954 binder_debug(BINDER_DEBUG_TRANSACTION,
1955 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1956 proc->pid, buffer->debug_id,
1957 buffer->data_size, buffer->offsets_size,
1958 (unsigned long long)off_end_offset);
1959
1960 if (buffer->target_node)
1961 binder_dec_node(buffer->target_node, 1, 0);
1962
1963 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1964
1965 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1966 buffer_offset += sizeof(binder_size_t)) {
1967 struct binder_object_header *hdr;
1968 size_t object_size = 0;
1969 struct binder_object object;
1970 binder_size_t object_offset;
1971
1972 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1973 buffer, buffer_offset,
1974 sizeof(object_offset)))
1975 object_size = binder_get_object(proc, NULL, buffer,
1976 object_offset, &object);
1977 if (object_size == 0) {
1978 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1979 debug_id, (u64)object_offset, buffer->data_size);
1980 continue;
1981 }
1982 hdr = &object.hdr;
1983 switch (hdr->type) {
1984 case BINDER_TYPE_BINDER:
1985 case BINDER_TYPE_WEAK_BINDER: {
1986 struct flat_binder_object *fp;
1987 struct binder_node *node;
1988
1989 fp = to_flat_binder_object(hdr);
1990 node = binder_get_node(proc, fp->binder);
1991 if (node == NULL) {
1992 pr_err("transaction release %d bad node %016llx\n",
1993 debug_id, (u64)fp->binder);
1994 break;
1995 }
1996 binder_debug(BINDER_DEBUG_TRANSACTION,
1997 " node %d u%016llx\n",
1998 node->debug_id, (u64)node->ptr);
1999 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2000 0);
2001 binder_put_node(node);
2002 } break;
2003 case BINDER_TYPE_HANDLE:
2004 case BINDER_TYPE_WEAK_HANDLE: {
2005 struct flat_binder_object *fp;
2006 struct binder_ref_data rdata;
2007 int ret;
2008
2009 fp = to_flat_binder_object(hdr);
2010 ret = binder_dec_ref_for_handle(proc, fp->handle,
2011 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2012
2013 if (ret) {
2014 pr_err("transaction release %d bad handle %d, ret = %d\n",
2015 debug_id, fp->handle, ret);
2016 break;
2017 }
2018 binder_debug(BINDER_DEBUG_TRANSACTION,
2019 " ref %d desc %d\n",
2020 rdata.debug_id, rdata.desc);
2021 } break;
2022
2023 case BINDER_TYPE_FD: {
2024 /*
2025 * No need to close the file here since user-space
2026 * closes it for successfully delivered
2027 * transactions. For transactions that weren't
2028 * delivered, the new fd was never allocated so
2029 * there is no need to close and the fput on the
2030 * file is done when the transaction is torn
2031 * down.
2032 */
2033 } break;
2034 case BINDER_TYPE_PTR:
2035 /*
2036 * Nothing to do here, this will get cleaned up when the
2037 * transaction buffer gets freed
2038 */
2039 break;
2040 case BINDER_TYPE_FDA: {
2041 struct binder_fd_array_object *fda;
2042 struct binder_buffer_object *parent;
2043 struct binder_object ptr_object;
2044 binder_size_t fda_offset;
2045 size_t fd_index;
2046 binder_size_t fd_buf_size;
2047 binder_size_t num_valid;
2048
2049 if (is_failure) {
2050 /*
2051 * The fd fixups have not been applied so no
2052 * fds need to be closed.
2053 */
2054 continue;
2055 }
2056
2057 num_valid = (buffer_offset - off_start_offset) /
2058 sizeof(binder_size_t);
2059 fda = to_binder_fd_array_object(hdr);
2060 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2061 fda->parent,
2062 off_start_offset,
2063 NULL,
2064 num_valid);
2065 if (!parent) {
2066 pr_err("transaction release %d bad parent offset\n",
2067 debug_id);
2068 continue;
2069 }
2070 fd_buf_size = sizeof(u32) * fda->num_fds;
2071 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2072 pr_err("transaction release %d invalid number of fds (%lld)\n",
2073 debug_id, (u64)fda->num_fds);
2074 continue;
2075 }
2076 if (fd_buf_size > parent->length ||
2077 fda->parent_offset > parent->length - fd_buf_size) {
2078 /* No space for all file descriptors here. */
2079 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2080 debug_id, (u64)fda->num_fds);
2081 continue;
2082 }
2083 /*
2084 * the source data for binder_buffer_object is visible
2085 * to user-space and the @buffer element is the user
2086 * pointer to the buffer_object containing the fd_array.
2087 * Convert the address to an offset relative to
2088 * the base of the transaction buffer.
2089 */
2090 fda_offset = parent->buffer - buffer->user_data +
2091 fda->parent_offset;
2092 for (fd_index = 0; fd_index < fda->num_fds;
2093 fd_index++) {
2094 u32 fd;
2095 int err;
2096 binder_size_t offset = fda_offset +
2097 fd_index * sizeof(fd);
2098
2099 err = binder_alloc_copy_from_buffer(
2100 &proc->alloc, &fd, buffer,
2101 offset, sizeof(fd));
2102 WARN_ON(err);
2103 if (!err) {
2104 binder_deferred_fd_close(fd);
2105 /*
2106 * Need to make sure the thread goes
2107 * back to userspace to complete the
2108 * deferred close
2109 */
2110 if (thread)
2111 thread->looper_need_return = true;
2112 }
2113 }
2114 } break;
2115 default:
2116 pr_err("transaction release %d bad object type %x\n",
2117 debug_id, hdr->type);
2118 break;
2119 }
2120 }
2121}
2122
2123/* Clean up all the objects in the buffer */
2124static inline void binder_release_entire_buffer(struct binder_proc *proc,
2125 struct binder_thread *thread,
2126 struct binder_buffer *buffer,
2127 bool is_failure)
2128{
2129 binder_size_t off_end_offset;
2130
2131 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2132 off_end_offset += buffer->offsets_size;
2133
2134 binder_transaction_buffer_release(proc, thread, buffer,
2135 off_end_offset, is_failure);
2136}
2137
2138static int binder_translate_binder(struct flat_binder_object *fp,
2139 struct binder_transaction *t,
2140 struct binder_thread *thread)
2141{
2142 struct binder_node *node;
2143 struct binder_proc *proc = thread->proc;
2144 struct binder_proc *target_proc = t->to_proc;
2145 struct binder_ref_data rdata;
2146 int ret = 0;
2147
2148 node = binder_get_node(proc, fp->binder);
2149 if (!node) {
2150 node = binder_new_node(proc, fp);
2151 if (!node)
2152 return -ENOMEM;
2153 }
2154 if (fp->cookie != node->cookie) {
2155 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2156 proc->pid, thread->pid, (u64)fp->binder,
2157 node->debug_id, (u64)fp->cookie,
2158 (u64)node->cookie);
2159 ret = -EINVAL;
2160 goto done;
2161 }
2162 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2163 ret = -EPERM;
2164 goto done;
2165 }
2166
2167 ret = binder_inc_ref_for_node(target_proc, node,
2168 fp->hdr.type == BINDER_TYPE_BINDER,
2169 &thread->todo, &rdata);
2170 if (ret)
2171 goto done;
2172
2173 if (fp->hdr.type == BINDER_TYPE_BINDER)
2174 fp->hdr.type = BINDER_TYPE_HANDLE;
2175 else
2176 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2177 fp->binder = 0;
2178 fp->handle = rdata.desc;
2179 fp->cookie = 0;
2180
2181 trace_binder_transaction_node_to_ref(t, node, &rdata);
2182 binder_debug(BINDER_DEBUG_TRANSACTION,
2183 " node %d u%016llx -> ref %d desc %d\n",
2184 node->debug_id, (u64)node->ptr,
2185 rdata.debug_id, rdata.desc);
2186done:
2187 binder_put_node(node);
2188 return ret;
2189}
2190
2191static int binder_translate_handle(struct flat_binder_object *fp,
2192 struct binder_transaction *t,
2193 struct binder_thread *thread)
2194{
2195 struct binder_proc *proc = thread->proc;
2196 struct binder_proc *target_proc = t->to_proc;
2197 struct binder_node *node;
2198 struct binder_ref_data src_rdata;
2199 int ret = 0;
2200
2201 node = binder_get_node_from_ref(proc, fp->handle,
2202 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2203 if (!node) {
2204 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2205 proc->pid, thread->pid, fp->handle);
2206 return -EINVAL;
2207 }
2208 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2209 ret = -EPERM;
2210 goto done;
2211 }
2212
2213 binder_node_lock(node);
2214 if (node->proc == target_proc) {
2215 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2216 fp->hdr.type = BINDER_TYPE_BINDER;
2217 else
2218 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2219 fp->binder = node->ptr;
2220 fp->cookie = node->cookie;
2221 if (node->proc)
2222 binder_inner_proc_lock(node->proc);
2223 else
2224 __acquire(&node->proc->inner_lock);
2225 binder_inc_node_nilocked(node,
2226 fp->hdr.type == BINDER_TYPE_BINDER,
2227 0, NULL);
2228 if (node->proc)
2229 binder_inner_proc_unlock(node->proc);
2230 else
2231 __release(&node->proc->inner_lock);
2232 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2233 binder_debug(BINDER_DEBUG_TRANSACTION,
2234 " ref %d desc %d -> node %d u%016llx\n",
2235 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2236 (u64)node->ptr);
2237 binder_node_unlock(node);
2238 } else {
2239 struct binder_ref_data dest_rdata;
2240
2241 binder_node_unlock(node);
2242 ret = binder_inc_ref_for_node(target_proc, node,
2243 fp->hdr.type == BINDER_TYPE_HANDLE,
2244 NULL, &dest_rdata);
2245 if (ret)
2246 goto done;
2247
2248 fp->binder = 0;
2249 fp->handle = dest_rdata.desc;
2250 fp->cookie = 0;
2251 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2252 &dest_rdata);
2253 binder_debug(BINDER_DEBUG_TRANSACTION,
2254 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2255 src_rdata.debug_id, src_rdata.desc,
2256 dest_rdata.debug_id, dest_rdata.desc,
2257 node->debug_id);
2258 }
2259done:
2260 binder_put_node(node);
2261 return ret;
2262}
2263
2264static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2265 struct binder_transaction *t,
2266 struct binder_thread *thread,
2267 struct binder_transaction *in_reply_to)
2268{
2269 struct binder_proc *proc = thread->proc;
2270 struct binder_proc *target_proc = t->to_proc;
2271 struct binder_txn_fd_fixup *fixup;
2272 struct file *file;
2273 int ret = 0;
2274 bool target_allows_fd;
2275
2276 if (in_reply_to)
2277 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2278 else
2279 target_allows_fd = t->buffer->target_node->accept_fds;
2280 if (!target_allows_fd) {
2281 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2282 proc->pid, thread->pid,
2283 in_reply_to ? "reply" : "transaction",
2284 fd);
2285 ret = -EPERM;
2286 goto err_fd_not_accepted;
2287 }
2288
2289 file = fget(fd);
2290 if (!file) {
2291 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2292 proc->pid, thread->pid, fd);
2293 ret = -EBADF;
2294 goto err_fget;
2295 }
2296 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2297 if (ret < 0) {
2298 ret = -EPERM;
2299 goto err_security;
2300 }
2301
2302 /*
2303 * Add fixup record for this transaction. The allocation
2304 * of the fd in the target needs to be done from a
2305 * target thread.
2306 */
2307 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2308 if (!fixup) {
2309 ret = -ENOMEM;
2310 goto err_alloc;
2311 }
2312 fixup->file = file;
2313 fixup->offset = fd_offset;
2314 fixup->target_fd = -1;
2315 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2316 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2317
2318 return ret;
2319
2320err_alloc:
2321err_security:
2322 fput(file);
2323err_fget:
2324err_fd_not_accepted:
2325 return ret;
2326}
2327
2328/**
2329 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2330 * @offset offset in target buffer to fixup
2331 * @skip_size bytes to skip in copy (fixup will be written later)
2332 * @fixup_data data to write at fixup offset
2333 * @node list node
2334 *
2335 * This is used for the pointer fixup list (pf) which is created and consumed
2336 * during binder_transaction() and is only accessed locally. No
2337 * locking is necessary.
2338 *
2339 * The list is ordered by @offset.
2340 */
2341struct binder_ptr_fixup {
2342 binder_size_t offset;
2343 size_t skip_size;
2344 binder_uintptr_t fixup_data;
2345 struct list_head node;
2346};
2347
2348/**
2349 * struct binder_sg_copy - scatter-gather data to be copied
2350 * @offset offset in target buffer
2351 * @sender_uaddr user address in source buffer
2352 * @length bytes to copy
2353 * @node list node
2354 *
2355 * This is used for the sg copy list (sgc) which is created and consumed
2356 * during binder_transaction() and is only accessed locally. No
2357 * locking is necessary.
2358 *
2359 * The list is ordered by @offset.
2360 */
2361struct binder_sg_copy {
2362 binder_size_t offset;
2363 const void __user *sender_uaddr;
2364 size_t length;
2365 struct list_head node;
2366};
2367
2368/**
2369 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2370 * @alloc: binder_alloc associated with @buffer
2371 * @buffer: binder buffer in target process
2372 * @sgc_head: list_head of scatter-gather copy list
2373 * @pf_head: list_head of pointer fixup list
2374 *
2375 * Processes all elements of @sgc_head, applying fixups from @pf_head
2376 * and copying the scatter-gather data from the source process' user
2377 * buffer to the target's buffer. It is expected that the list creation
2378 * and processing all occurs during binder_transaction() so these lists
2379 * are only accessed in local context.
2380 *
2381 * Return: 0=success, else -errno
2382 */
2383static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2384 struct binder_buffer *buffer,
2385 struct list_head *sgc_head,
2386 struct list_head *pf_head)
2387{
2388 int ret = 0;
2389 struct binder_sg_copy *sgc, *tmpsgc;
2390 struct binder_ptr_fixup *tmppf;
2391 struct binder_ptr_fixup *pf =
2392 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2393 node);
2394
2395 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2396 size_t bytes_copied = 0;
2397
2398 while (bytes_copied < sgc->length) {
2399 size_t copy_size;
2400 size_t bytes_left = sgc->length - bytes_copied;
2401 size_t offset = sgc->offset + bytes_copied;
2402
2403 /*
2404 * We copy up to the fixup (pointed to by pf)
2405 */
2406 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2407 : bytes_left;
2408 if (!ret && copy_size)
2409 ret = binder_alloc_copy_user_to_buffer(
2410 alloc, buffer,
2411 offset,
2412 sgc->sender_uaddr + bytes_copied,
2413 copy_size);
2414 bytes_copied += copy_size;
2415 if (copy_size != bytes_left) {
2416 BUG_ON(!pf);
2417 /* we stopped at a fixup offset */
2418 if (pf->skip_size) {
2419 /*
2420 * we are just skipping. This is for
2421 * BINDER_TYPE_FDA where the translated
2422 * fds will be fixed up when we get
2423 * to target context.
2424 */
2425 bytes_copied += pf->skip_size;
2426 } else {
2427 /* apply the fixup indicated by pf */
2428 if (!ret)
2429 ret = binder_alloc_copy_to_buffer(
2430 alloc, buffer,
2431 pf->offset,
2432 &pf->fixup_data,
2433 sizeof(pf->fixup_data));
2434 bytes_copied += sizeof(pf->fixup_data);
2435 }
2436 list_del(&pf->node);
2437 kfree(pf);
2438 pf = list_first_entry_or_null(pf_head,
2439 struct binder_ptr_fixup, node);
2440 }
2441 }
2442 list_del(&sgc->node);
2443 kfree(sgc);
2444 }
2445 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2446 BUG_ON(pf->skip_size == 0);
2447 list_del(&pf->node);
2448 kfree(pf);
2449 }
2450 BUG_ON(!list_empty(sgc_head));
2451
2452 return ret > 0 ? -EINVAL : ret;
2453}
2454
2455/**
2456 * binder_cleanup_deferred_txn_lists() - free specified lists
2457 * @sgc_head: list_head of scatter-gather copy list
2458 * @pf_head: list_head of pointer fixup list
2459 *
2460 * Called to clean up @sgc_head and @pf_head if there is an
2461 * error.
2462 */
2463static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2464 struct list_head *pf_head)
2465{
2466 struct binder_sg_copy *sgc, *tmpsgc;
2467 struct binder_ptr_fixup *pf, *tmppf;
2468
2469 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2470 list_del(&sgc->node);
2471 kfree(sgc);
2472 }
2473 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2474 list_del(&pf->node);
2475 kfree(pf);
2476 }
2477}
2478
2479/**
2480 * binder_defer_copy() - queue a scatter-gather buffer for copy
2481 * @sgc_head: list_head of scatter-gather copy list
2482 * @offset: binder buffer offset in target process
2483 * @sender_uaddr: user address in source process
2484 * @length: bytes to copy
2485 *
2486 * Specify a scatter-gather block to be copied. The actual copy must
2487 * be deferred until all the needed fixups are identified and queued.
2488 * Then the copy and fixups are done together so un-translated values
2489 * from the source are never visible in the target buffer.
2490 *
2491 * We are guaranteed that repeated calls to this function will have
2492 * monotonically increasing @offset values so the list will naturally
2493 * be ordered.
2494 *
2495 * Return: 0=success, else -errno
2496 */
2497static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2498 const void __user *sender_uaddr, size_t length)
2499{
2500 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2501
2502 if (!bc)
2503 return -ENOMEM;
2504
2505 bc->offset = offset;
2506 bc->sender_uaddr = sender_uaddr;
2507 bc->length = length;
2508 INIT_LIST_HEAD(&bc->node);
2509
2510 /*
2511 * We are guaranteed that the deferred copies are in-order
2512 * so just add to the tail.
2513 */
2514 list_add_tail(&bc->node, sgc_head);
2515
2516 return 0;
2517}
2518
2519/**
2520 * binder_add_fixup() - queue a fixup to be applied to sg copy
2521 * @pf_head: list_head of binder ptr fixup list
2522 * @offset: binder buffer offset in target process
2523 * @fixup: bytes to be copied for fixup
2524 * @skip_size: bytes to skip when copying (fixup will be applied later)
2525 *
2526 * Add the specified fixup to a list ordered by @offset. When copying
2527 * the scatter-gather buffers, the fixup will be copied instead of
2528 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2529 * will be applied later (in target process context), so we just skip
2530 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2531 * value in @fixup.
2532 *
2533 * This function is called *mostly* in @offset order, but there are
2534 * exceptions. Since out-of-order inserts are relatively uncommon,
2535 * we insert the new element by searching backward from the tail of
2536 * the list.
2537 *
2538 * Return: 0=success, else -errno
2539 */
2540static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2541 binder_uintptr_t fixup, size_t skip_size)
2542{
2543 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2544 struct binder_ptr_fixup *tmppf;
2545
2546 if (!pf)
2547 return -ENOMEM;
2548
2549 pf->offset = offset;
2550 pf->fixup_data = fixup;
2551 pf->skip_size = skip_size;
2552 INIT_LIST_HEAD(&pf->node);
2553
2554 /* Fixups are *mostly* added in-order, but there are some
2555 * exceptions. Look backwards through list for insertion point.
2556 */
2557 list_for_each_entry_reverse(tmppf, pf_head, node) {
2558 if (tmppf->offset < pf->offset) {
2559 list_add(&pf->node, &tmppf->node);
2560 return 0;
2561 }
2562 }
2563 /*
2564 * if we get here, then the new offset is the lowest so
2565 * insert at the head
2566 */
2567 list_add(&pf->node, pf_head);
2568 return 0;
2569}
2570
2571static int binder_translate_fd_array(struct list_head *pf_head,
2572 struct binder_fd_array_object *fda,
2573 const void __user *sender_ubuffer,
2574 struct binder_buffer_object *parent,
2575 struct binder_buffer_object *sender_uparent,
2576 struct binder_transaction *t,
2577 struct binder_thread *thread,
2578 struct binder_transaction *in_reply_to)
2579{
2580 binder_size_t fdi, fd_buf_size;
2581 binder_size_t fda_offset;
2582 const void __user *sender_ufda_base;
2583 struct binder_proc *proc = thread->proc;
2584 int ret;
2585
2586 if (fda->num_fds == 0)
2587 return 0;
2588
2589 fd_buf_size = sizeof(u32) * fda->num_fds;
2590 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2591 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2592 proc->pid, thread->pid, (u64)fda->num_fds);
2593 return -EINVAL;
2594 }
2595 if (fd_buf_size > parent->length ||
2596 fda->parent_offset > parent->length - fd_buf_size) {
2597 /* No space for all file descriptors here. */
2598 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2599 proc->pid, thread->pid, (u64)fda->num_fds);
2600 return -EINVAL;
2601 }
2602 /*
2603 * the source data for binder_buffer_object is visible
2604 * to user-space and the @buffer element is the user
2605 * pointer to the buffer_object containing the fd_array.
2606 * Convert the address to an offset relative to
2607 * the base of the transaction buffer.
2608 */
2609 fda_offset = parent->buffer - t->buffer->user_data +
2610 fda->parent_offset;
2611 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2612 fda->parent_offset;
2613
2614 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2615 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2616 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2617 proc->pid, thread->pid);
2618 return -EINVAL;
2619 }
2620 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2621 if (ret)
2622 return ret;
2623
2624 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2625 u32 fd;
2626 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2627 binder_size_t sender_uoffset = fdi * sizeof(fd);
2628
2629 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2630 if (!ret)
2631 ret = binder_translate_fd(fd, offset, t, thread,
2632 in_reply_to);
2633 if (ret)
2634 return ret > 0 ? -EINVAL : ret;
2635 }
2636 return 0;
2637}
2638
2639static int binder_fixup_parent(struct list_head *pf_head,
2640 struct binder_transaction *t,
2641 struct binder_thread *thread,
2642 struct binder_buffer_object *bp,
2643 binder_size_t off_start_offset,
2644 binder_size_t num_valid,
2645 binder_size_t last_fixup_obj_off,
2646 binder_size_t last_fixup_min_off)
2647{
2648 struct binder_buffer_object *parent;
2649 struct binder_buffer *b = t->buffer;
2650 struct binder_proc *proc = thread->proc;
2651 struct binder_proc *target_proc = t->to_proc;
2652 struct binder_object object;
2653 binder_size_t buffer_offset;
2654 binder_size_t parent_offset;
2655
2656 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2657 return 0;
2658
2659 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2660 off_start_offset, &parent_offset,
2661 num_valid);
2662 if (!parent) {
2663 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2664 proc->pid, thread->pid);
2665 return -EINVAL;
2666 }
2667
2668 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2669 parent_offset, bp->parent_offset,
2670 last_fixup_obj_off,
2671 last_fixup_min_off)) {
2672 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2673 proc->pid, thread->pid);
2674 return -EINVAL;
2675 }
2676
2677 if (parent->length < sizeof(binder_uintptr_t) ||
2678 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2679 /* No space for a pointer here! */
2680 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2681 proc->pid, thread->pid);
2682 return -EINVAL;
2683 }
2684
2685 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2686
2687 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2688}
2689
2690/**
2691 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2692 * @t1: the pending async txn in the frozen process
2693 * @t2: the new async txn to supersede the outdated pending one
2694 *
2695 * Return: true if t2 can supersede t1
2696 * false if t2 can not supersede t1
2697 */
2698static bool binder_can_update_transaction(struct binder_transaction *t1,
2699 struct binder_transaction *t2)
2700{
2701 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2702 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2703 return false;
2704 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2705 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2706 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2707 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2708 return true;
2709 return false;
2710}
2711
2712/**
2713 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2714 * @t: new async transaction
2715 * @target_list: list to find outdated transaction
2716 *
2717 * Return: the outdated transaction if found
2718 * NULL if no outdated transacton can be found
2719 *
2720 * Requires the proc->inner_lock to be held.
2721 */
2722static struct binder_transaction *
2723binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2724 struct list_head *target_list)
2725{
2726 struct binder_work *w;
2727
2728 list_for_each_entry(w, target_list, entry) {
2729 struct binder_transaction *t_queued;
2730
2731 if (w->type != BINDER_WORK_TRANSACTION)
2732 continue;
2733 t_queued = container_of(w, struct binder_transaction, work);
2734 if (binder_can_update_transaction(t_queued, t))
2735 return t_queued;
2736 }
2737 return NULL;
2738}
2739
2740/**
2741 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2742 * @t: transaction to send
2743 * @proc: process to send the transaction to
2744 * @thread: thread in @proc to send the transaction to (may be NULL)
2745 *
2746 * This function queues a transaction to the specified process. It will try
2747 * to find a thread in the target process to handle the transaction and
2748 * wake it up. If no thread is found, the work is queued to the proc
2749 * waitqueue.
2750 *
2751 * If the @thread parameter is not NULL, the transaction is always queued
2752 * to the waitlist of that specific thread.
2753 *
2754 * Return: 0 if the transaction was successfully queued
2755 * BR_DEAD_REPLY if the target process or thread is dead
2756 * BR_FROZEN_REPLY if the target process or thread is frozen and
2757 * the sync transaction was rejected
2758 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2759 * and the async transaction was successfully queued
2760 */
2761static int binder_proc_transaction(struct binder_transaction *t,
2762 struct binder_proc *proc,
2763 struct binder_thread *thread)
2764{
2765 struct binder_node *node = t->buffer->target_node;
2766 bool oneway = !!(t->flags & TF_ONE_WAY);
2767 bool pending_async = false;
2768 struct binder_transaction *t_outdated = NULL;
2769 bool frozen = false;
2770
2771 BUG_ON(!node);
2772 binder_node_lock(node);
2773 if (oneway) {
2774 BUG_ON(thread);
2775 if (node->has_async_transaction)
2776 pending_async = true;
2777 else
2778 node->has_async_transaction = true;
2779 }
2780
2781 binder_inner_proc_lock(proc);
2782 if (proc->is_frozen) {
2783 frozen = true;
2784 proc->sync_recv |= !oneway;
2785 proc->async_recv |= oneway;
2786 }
2787
2788 if ((frozen && !oneway) || proc->is_dead ||
2789 (thread && thread->is_dead)) {
2790 binder_inner_proc_unlock(proc);
2791 binder_node_unlock(node);
2792 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2793 }
2794
2795 if (!thread && !pending_async)
2796 thread = binder_select_thread_ilocked(proc);
2797
2798 if (thread) {
2799 binder_enqueue_thread_work_ilocked(thread, &t->work);
2800 } else if (!pending_async) {
2801 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2802 } else {
2803 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2804 t_outdated = binder_find_outdated_transaction_ilocked(t,
2805 &node->async_todo);
2806 if (t_outdated) {
2807 binder_debug(BINDER_DEBUG_TRANSACTION,
2808 "txn %d supersedes %d\n",
2809 t->debug_id, t_outdated->debug_id);
2810 list_del_init(&t_outdated->work.entry);
2811 proc->outstanding_txns--;
2812 }
2813 }
2814 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2815 }
2816
2817 if (!pending_async)
2818 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2819
2820 proc->outstanding_txns++;
2821 binder_inner_proc_unlock(proc);
2822 binder_node_unlock(node);
2823
2824 /*
2825 * To reduce potential contention, free the outdated transaction and
2826 * buffer after releasing the locks.
2827 */
2828 if (t_outdated) {
2829 struct binder_buffer *buffer = t_outdated->buffer;
2830
2831 t_outdated->buffer = NULL;
2832 buffer->transaction = NULL;
2833 trace_binder_transaction_update_buffer_release(buffer);
2834 binder_release_entire_buffer(proc, NULL, buffer, false);
2835 binder_alloc_free_buf(&proc->alloc, buffer);
2836 kfree(t_outdated);
2837 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2838 }
2839
2840 if (oneway && frozen)
2841 return BR_TRANSACTION_PENDING_FROZEN;
2842
2843 return 0;
2844}
2845
2846/**
2847 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2848 * @node: struct binder_node for which to get refs
2849 * @procp: returns @node->proc if valid
2850 * @error: if no @procp then returns BR_DEAD_REPLY
2851 *
2852 * User-space normally keeps the node alive when creating a transaction
2853 * since it has a reference to the target. The local strong ref keeps it
2854 * alive if the sending process dies before the target process processes
2855 * the transaction. If the source process is malicious or has a reference
2856 * counting bug, relying on the local strong ref can fail.
2857 *
2858 * Since user-space can cause the local strong ref to go away, we also take
2859 * a tmpref on the node to ensure it survives while we are constructing
2860 * the transaction. We also need a tmpref on the proc while we are
2861 * constructing the transaction, so we take that here as well.
2862 *
2863 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2864 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2865 * target proc has died, @error is set to BR_DEAD_REPLY.
2866 */
2867static struct binder_node *binder_get_node_refs_for_txn(
2868 struct binder_node *node,
2869 struct binder_proc **procp,
2870 uint32_t *error)
2871{
2872 struct binder_node *target_node = NULL;
2873
2874 binder_node_inner_lock(node);
2875 if (node->proc) {
2876 target_node = node;
2877 binder_inc_node_nilocked(node, 1, 0, NULL);
2878 binder_inc_node_tmpref_ilocked(node);
2879 node->proc->tmp_ref++;
2880 *procp = node->proc;
2881 } else
2882 *error = BR_DEAD_REPLY;
2883 binder_node_inner_unlock(node);
2884
2885 return target_node;
2886}
2887
2888static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2889 uint32_t command, int32_t param)
2890{
2891 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2892
2893 if (!from) {
2894 /* annotation for sparse */
2895 __release(&from->proc->inner_lock);
2896 return;
2897 }
2898
2899 /* don't override existing errors */
2900 if (from->ee.command == BR_OK)
2901 binder_set_extended_error(&from->ee, id, command, param);
2902 binder_inner_proc_unlock(from->proc);
2903 binder_thread_dec_tmpref(from);
2904}
2905
2906static void binder_transaction(struct binder_proc *proc,
2907 struct binder_thread *thread,
2908 struct binder_transaction_data *tr, int reply,
2909 binder_size_t extra_buffers_size)
2910{
2911 int ret;
2912 struct binder_transaction *t;
2913 struct binder_work *w;
2914 struct binder_work *tcomplete;
2915 binder_size_t buffer_offset = 0;
2916 binder_size_t off_start_offset, off_end_offset;
2917 binder_size_t off_min;
2918 binder_size_t sg_buf_offset, sg_buf_end_offset;
2919 binder_size_t user_offset = 0;
2920 struct binder_proc *target_proc = NULL;
2921 struct binder_thread *target_thread = NULL;
2922 struct binder_node *target_node = NULL;
2923 struct binder_transaction *in_reply_to = NULL;
2924 struct binder_transaction_log_entry *e;
2925 uint32_t return_error = 0;
2926 uint32_t return_error_param = 0;
2927 uint32_t return_error_line = 0;
2928 binder_size_t last_fixup_obj_off = 0;
2929 binder_size_t last_fixup_min_off = 0;
2930 struct binder_context *context = proc->context;
2931 int t_debug_id = atomic_inc_return(&binder_last_id);
2932 ktime_t t_start_time = ktime_get();
2933 char *secctx = NULL;
2934 u32 secctx_sz = 0;
2935 struct list_head sgc_head;
2936 struct list_head pf_head;
2937 const void __user *user_buffer = (const void __user *)
2938 (uintptr_t)tr->data.ptr.buffer;
2939 INIT_LIST_HEAD(&sgc_head);
2940 INIT_LIST_HEAD(&pf_head);
2941
2942 e = binder_transaction_log_add(&binder_transaction_log);
2943 e->debug_id = t_debug_id;
2944 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2945 e->from_proc = proc->pid;
2946 e->from_thread = thread->pid;
2947 e->target_handle = tr->target.handle;
2948 e->data_size = tr->data_size;
2949 e->offsets_size = tr->offsets_size;
2950 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2951
2952 binder_inner_proc_lock(proc);
2953 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2954 binder_inner_proc_unlock(proc);
2955
2956 if (reply) {
2957 binder_inner_proc_lock(proc);
2958 in_reply_to = thread->transaction_stack;
2959 if (in_reply_to == NULL) {
2960 binder_inner_proc_unlock(proc);
2961 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2962 proc->pid, thread->pid);
2963 return_error = BR_FAILED_REPLY;
2964 return_error_param = -EPROTO;
2965 return_error_line = __LINE__;
2966 goto err_empty_call_stack;
2967 }
2968 if (in_reply_to->to_thread != thread) {
2969 spin_lock(&in_reply_to->lock);
2970 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2971 proc->pid, thread->pid, in_reply_to->debug_id,
2972 in_reply_to->to_proc ?
2973 in_reply_to->to_proc->pid : 0,
2974 in_reply_to->to_thread ?
2975 in_reply_to->to_thread->pid : 0);
2976 spin_unlock(&in_reply_to->lock);
2977 binder_inner_proc_unlock(proc);
2978 return_error = BR_FAILED_REPLY;
2979 return_error_param = -EPROTO;
2980 return_error_line = __LINE__;
2981 in_reply_to = NULL;
2982 goto err_bad_call_stack;
2983 }
2984 thread->transaction_stack = in_reply_to->to_parent;
2985 binder_inner_proc_unlock(proc);
2986 binder_set_nice(in_reply_to->saved_priority);
2987 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2988 if (target_thread == NULL) {
2989 /* annotation for sparse */
2990 __release(&target_thread->proc->inner_lock);
2991 binder_txn_error("%d:%d reply target not found\n",
2992 thread->pid, proc->pid);
2993 return_error = BR_DEAD_REPLY;
2994 return_error_line = __LINE__;
2995 goto err_dead_binder;
2996 }
2997 if (target_thread->transaction_stack != in_reply_to) {
2998 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2999 proc->pid, thread->pid,
3000 target_thread->transaction_stack ?
3001 target_thread->transaction_stack->debug_id : 0,
3002 in_reply_to->debug_id);
3003 binder_inner_proc_unlock(target_thread->proc);
3004 return_error = BR_FAILED_REPLY;
3005 return_error_param = -EPROTO;
3006 return_error_line = __LINE__;
3007 in_reply_to = NULL;
3008 target_thread = NULL;
3009 goto err_dead_binder;
3010 }
3011 target_proc = target_thread->proc;
3012 target_proc->tmp_ref++;
3013 binder_inner_proc_unlock(target_thread->proc);
3014 } else {
3015 if (tr->target.handle) {
3016 struct binder_ref *ref;
3017
3018 /*
3019 * There must already be a strong ref
3020 * on this node. If so, do a strong
3021 * increment on the node to ensure it
3022 * stays alive until the transaction is
3023 * done.
3024 */
3025 binder_proc_lock(proc);
3026 ref = binder_get_ref_olocked(proc, tr->target.handle,
3027 true);
3028 if (ref) {
3029 target_node = binder_get_node_refs_for_txn(
3030 ref->node, &target_proc,
3031 &return_error);
3032 } else {
3033 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3034 proc->pid, thread->pid, tr->target.handle);
3035 return_error = BR_FAILED_REPLY;
3036 }
3037 binder_proc_unlock(proc);
3038 } else {
3039 mutex_lock(&context->context_mgr_node_lock);
3040 target_node = context->binder_context_mgr_node;
3041 if (target_node)
3042 target_node = binder_get_node_refs_for_txn(
3043 target_node, &target_proc,
3044 &return_error);
3045 else
3046 return_error = BR_DEAD_REPLY;
3047 mutex_unlock(&context->context_mgr_node_lock);
3048 if (target_node && target_proc->pid == proc->pid) {
3049 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3050 proc->pid, thread->pid);
3051 return_error = BR_FAILED_REPLY;
3052 return_error_param = -EINVAL;
3053 return_error_line = __LINE__;
3054 goto err_invalid_target_handle;
3055 }
3056 }
3057 if (!target_node) {
3058 binder_txn_error("%d:%d cannot find target node\n",
3059 thread->pid, proc->pid);
3060 /*
3061 * return_error is set above
3062 */
3063 return_error_param = -EINVAL;
3064 return_error_line = __LINE__;
3065 goto err_dead_binder;
3066 }
3067 e->to_node = target_node->debug_id;
3068 if (WARN_ON(proc == target_proc)) {
3069 binder_txn_error("%d:%d self transactions not allowed\n",
3070 thread->pid, proc->pid);
3071 return_error = BR_FAILED_REPLY;
3072 return_error_param = -EINVAL;
3073 return_error_line = __LINE__;
3074 goto err_invalid_target_handle;
3075 }
3076 if (security_binder_transaction(proc->cred,
3077 target_proc->cred) < 0) {
3078 binder_txn_error("%d:%d transaction credentials failed\n",
3079 thread->pid, proc->pid);
3080 return_error = BR_FAILED_REPLY;
3081 return_error_param = -EPERM;
3082 return_error_line = __LINE__;
3083 goto err_invalid_target_handle;
3084 }
3085 binder_inner_proc_lock(proc);
3086
3087 w = list_first_entry_or_null(&thread->todo,
3088 struct binder_work, entry);
3089 if (!(tr->flags & TF_ONE_WAY) && w &&
3090 w->type == BINDER_WORK_TRANSACTION) {
3091 /*
3092 * Do not allow new outgoing transaction from a
3093 * thread that has a transaction at the head of
3094 * its todo list. Only need to check the head
3095 * because binder_select_thread_ilocked picks a
3096 * thread from proc->waiting_threads to enqueue
3097 * the transaction, and nothing is queued to the
3098 * todo list while the thread is on waiting_threads.
3099 */
3100 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3101 proc->pid, thread->pid);
3102 binder_inner_proc_unlock(proc);
3103 return_error = BR_FAILED_REPLY;
3104 return_error_param = -EPROTO;
3105 return_error_line = __LINE__;
3106 goto err_bad_todo_list;
3107 }
3108
3109 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3110 struct binder_transaction *tmp;
3111
3112 tmp = thread->transaction_stack;
3113 if (tmp->to_thread != thread) {
3114 spin_lock(&tmp->lock);
3115 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3116 proc->pid, thread->pid, tmp->debug_id,
3117 tmp->to_proc ? tmp->to_proc->pid : 0,
3118 tmp->to_thread ?
3119 tmp->to_thread->pid : 0);
3120 spin_unlock(&tmp->lock);
3121 binder_inner_proc_unlock(proc);
3122 return_error = BR_FAILED_REPLY;
3123 return_error_param = -EPROTO;
3124 return_error_line = __LINE__;
3125 goto err_bad_call_stack;
3126 }
3127 while (tmp) {
3128 struct binder_thread *from;
3129
3130 spin_lock(&tmp->lock);
3131 from = tmp->from;
3132 if (from && from->proc == target_proc) {
3133 atomic_inc(&from->tmp_ref);
3134 target_thread = from;
3135 spin_unlock(&tmp->lock);
3136 break;
3137 }
3138 spin_unlock(&tmp->lock);
3139 tmp = tmp->from_parent;
3140 }
3141 }
3142 binder_inner_proc_unlock(proc);
3143 }
3144 if (target_thread)
3145 e->to_thread = target_thread->pid;
3146 e->to_proc = target_proc->pid;
3147
3148 /* TODO: reuse incoming transaction for reply */
3149 t = kzalloc(sizeof(*t), GFP_KERNEL);
3150 if (t == NULL) {
3151 binder_txn_error("%d:%d cannot allocate transaction\n",
3152 thread->pid, proc->pid);
3153 return_error = BR_FAILED_REPLY;
3154 return_error_param = -ENOMEM;
3155 return_error_line = __LINE__;
3156 goto err_alloc_t_failed;
3157 }
3158 INIT_LIST_HEAD(&t->fd_fixups);
3159 binder_stats_created(BINDER_STAT_TRANSACTION);
3160 spin_lock_init(&t->lock);
3161
3162 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3163 if (tcomplete == NULL) {
3164 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3165 thread->pid, proc->pid);
3166 return_error = BR_FAILED_REPLY;
3167 return_error_param = -ENOMEM;
3168 return_error_line = __LINE__;
3169 goto err_alloc_tcomplete_failed;
3170 }
3171 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3172
3173 t->debug_id = t_debug_id;
3174 t->start_time = t_start_time;
3175
3176 if (reply)
3177 binder_debug(BINDER_DEBUG_TRANSACTION,
3178 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3179 proc->pid, thread->pid, t->debug_id,
3180 target_proc->pid, target_thread->pid,
3181 (u64)tr->data.ptr.buffer,
3182 (u64)tr->data.ptr.offsets,
3183 (u64)tr->data_size, (u64)tr->offsets_size,
3184 (u64)extra_buffers_size);
3185 else
3186 binder_debug(BINDER_DEBUG_TRANSACTION,
3187 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3188 proc->pid, thread->pid, t->debug_id,
3189 target_proc->pid, target_node->debug_id,
3190 (u64)tr->data.ptr.buffer,
3191 (u64)tr->data.ptr.offsets,
3192 (u64)tr->data_size, (u64)tr->offsets_size,
3193 (u64)extra_buffers_size);
3194
3195 if (!reply && !(tr->flags & TF_ONE_WAY))
3196 t->from = thread;
3197 else
3198 t->from = NULL;
3199 t->from_pid = proc->pid;
3200 t->from_tid = thread->pid;
3201 t->sender_euid = task_euid(proc->tsk);
3202 t->to_proc = target_proc;
3203 t->to_thread = target_thread;
3204 t->code = tr->code;
3205 t->flags = tr->flags;
3206 t->priority = task_nice(current);
3207
3208 if (target_node && target_node->txn_security_ctx) {
3209 u32 secid;
3210 size_t added_size;
3211
3212 security_cred_getsecid(proc->cred, &secid);
3213 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3214 if (ret) {
3215 binder_txn_error("%d:%d failed to get security context\n",
3216 thread->pid, proc->pid);
3217 return_error = BR_FAILED_REPLY;
3218 return_error_param = ret;
3219 return_error_line = __LINE__;
3220 goto err_get_secctx_failed;
3221 }
3222 added_size = ALIGN(secctx_sz, sizeof(u64));
3223 extra_buffers_size += added_size;
3224 if (extra_buffers_size < added_size) {
3225 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3226 thread->pid, proc->pid);
3227 return_error = BR_FAILED_REPLY;
3228 return_error_param = -EINVAL;
3229 return_error_line = __LINE__;
3230 goto err_bad_extra_size;
3231 }
3232 }
3233
3234 trace_binder_transaction(reply, t, target_node);
3235
3236 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3237 tr->offsets_size, extra_buffers_size,
3238 !reply && (t->flags & TF_ONE_WAY));
3239 if (IS_ERR(t->buffer)) {
3240 char *s;
3241
3242 ret = PTR_ERR(t->buffer);
3243 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3244 : (ret == -ENOSPC) ? ": no space left"
3245 : (ret == -ENOMEM) ? ": memory allocation failed"
3246 : "";
3247 binder_txn_error("cannot allocate buffer%s", s);
3248
3249 return_error_param = PTR_ERR(t->buffer);
3250 return_error = return_error_param == -ESRCH ?
3251 BR_DEAD_REPLY : BR_FAILED_REPLY;
3252 return_error_line = __LINE__;
3253 t->buffer = NULL;
3254 goto err_binder_alloc_buf_failed;
3255 }
3256 if (secctx) {
3257 int err;
3258 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3259 ALIGN(tr->offsets_size, sizeof(void *)) +
3260 ALIGN(extra_buffers_size, sizeof(void *)) -
3261 ALIGN(secctx_sz, sizeof(u64));
3262
3263 t->security_ctx = t->buffer->user_data + buf_offset;
3264 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3265 t->buffer, buf_offset,
3266 secctx, secctx_sz);
3267 if (err) {
3268 t->security_ctx = 0;
3269 WARN_ON(1);
3270 }
3271 security_release_secctx(secctx, secctx_sz);
3272 secctx = NULL;
3273 }
3274 t->buffer->debug_id = t->debug_id;
3275 t->buffer->transaction = t;
3276 t->buffer->target_node = target_node;
3277 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3278 trace_binder_transaction_alloc_buf(t->buffer);
3279
3280 if (binder_alloc_copy_user_to_buffer(
3281 &target_proc->alloc,
3282 t->buffer,
3283 ALIGN(tr->data_size, sizeof(void *)),
3284 (const void __user *)
3285 (uintptr_t)tr->data.ptr.offsets,
3286 tr->offsets_size)) {
3287 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3288 proc->pid, thread->pid);
3289 return_error = BR_FAILED_REPLY;
3290 return_error_param = -EFAULT;
3291 return_error_line = __LINE__;
3292 goto err_copy_data_failed;
3293 }
3294 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3295 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3296 proc->pid, thread->pid, (u64)tr->offsets_size);
3297 return_error = BR_FAILED_REPLY;
3298 return_error_param = -EINVAL;
3299 return_error_line = __LINE__;
3300 goto err_bad_offset;
3301 }
3302 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3303 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3304 proc->pid, thread->pid,
3305 (u64)extra_buffers_size);
3306 return_error = BR_FAILED_REPLY;
3307 return_error_param = -EINVAL;
3308 return_error_line = __LINE__;
3309 goto err_bad_offset;
3310 }
3311 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3312 buffer_offset = off_start_offset;
3313 off_end_offset = off_start_offset + tr->offsets_size;
3314 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3315 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3316 ALIGN(secctx_sz, sizeof(u64));
3317 off_min = 0;
3318 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3319 buffer_offset += sizeof(binder_size_t)) {
3320 struct binder_object_header *hdr;
3321 size_t object_size;
3322 struct binder_object object;
3323 binder_size_t object_offset;
3324 binder_size_t copy_size;
3325
3326 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3327 &object_offset,
3328 t->buffer,
3329 buffer_offset,
3330 sizeof(object_offset))) {
3331 binder_txn_error("%d:%d copy offset from buffer failed\n",
3332 thread->pid, proc->pid);
3333 return_error = BR_FAILED_REPLY;
3334 return_error_param = -EINVAL;
3335 return_error_line = __LINE__;
3336 goto err_bad_offset;
3337 }
3338
3339 /*
3340 * Copy the source user buffer up to the next object
3341 * that will be processed.
3342 */
3343 copy_size = object_offset - user_offset;
3344 if (copy_size && (user_offset > object_offset ||
3345 binder_alloc_copy_user_to_buffer(
3346 &target_proc->alloc,
3347 t->buffer, user_offset,
3348 user_buffer + user_offset,
3349 copy_size))) {
3350 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3351 proc->pid, thread->pid);
3352 return_error = BR_FAILED_REPLY;
3353 return_error_param = -EFAULT;
3354 return_error_line = __LINE__;
3355 goto err_copy_data_failed;
3356 }
3357 object_size = binder_get_object(target_proc, user_buffer,
3358 t->buffer, object_offset, &object);
3359 if (object_size == 0 || object_offset < off_min) {
3360 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3361 proc->pid, thread->pid,
3362 (u64)object_offset,
3363 (u64)off_min,
3364 (u64)t->buffer->data_size);
3365 return_error = BR_FAILED_REPLY;
3366 return_error_param = -EINVAL;
3367 return_error_line = __LINE__;
3368 goto err_bad_offset;
3369 }
3370 /*
3371 * Set offset to the next buffer fragment to be
3372 * copied
3373 */
3374 user_offset = object_offset + object_size;
3375
3376 hdr = &object.hdr;
3377 off_min = object_offset + object_size;
3378 switch (hdr->type) {
3379 case BINDER_TYPE_BINDER:
3380 case BINDER_TYPE_WEAK_BINDER: {
3381 struct flat_binder_object *fp;
3382
3383 fp = to_flat_binder_object(hdr);
3384 ret = binder_translate_binder(fp, t, thread);
3385
3386 if (ret < 0 ||
3387 binder_alloc_copy_to_buffer(&target_proc->alloc,
3388 t->buffer,
3389 object_offset,
3390 fp, sizeof(*fp))) {
3391 binder_txn_error("%d:%d translate binder failed\n",
3392 thread->pid, proc->pid);
3393 return_error = BR_FAILED_REPLY;
3394 return_error_param = ret;
3395 return_error_line = __LINE__;
3396 goto err_translate_failed;
3397 }
3398 } break;
3399 case BINDER_TYPE_HANDLE:
3400 case BINDER_TYPE_WEAK_HANDLE: {
3401 struct flat_binder_object *fp;
3402
3403 fp = to_flat_binder_object(hdr);
3404 ret = binder_translate_handle(fp, t, thread);
3405 if (ret < 0 ||
3406 binder_alloc_copy_to_buffer(&target_proc->alloc,
3407 t->buffer,
3408 object_offset,
3409 fp, sizeof(*fp))) {
3410 binder_txn_error("%d:%d translate handle failed\n",
3411 thread->pid, proc->pid);
3412 return_error = BR_FAILED_REPLY;
3413 return_error_param = ret;
3414 return_error_line = __LINE__;
3415 goto err_translate_failed;
3416 }
3417 } break;
3418
3419 case BINDER_TYPE_FD: {
3420 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3421 binder_size_t fd_offset = object_offset +
3422 (uintptr_t)&fp->fd - (uintptr_t)fp;
3423 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3424 thread, in_reply_to);
3425
3426 fp->pad_binder = 0;
3427 if (ret < 0 ||
3428 binder_alloc_copy_to_buffer(&target_proc->alloc,
3429 t->buffer,
3430 object_offset,
3431 fp, sizeof(*fp))) {
3432 binder_txn_error("%d:%d translate fd failed\n",
3433 thread->pid, proc->pid);
3434 return_error = BR_FAILED_REPLY;
3435 return_error_param = ret;
3436 return_error_line = __LINE__;
3437 goto err_translate_failed;
3438 }
3439 } break;
3440 case BINDER_TYPE_FDA: {
3441 struct binder_object ptr_object;
3442 binder_size_t parent_offset;
3443 struct binder_object user_object;
3444 size_t user_parent_size;
3445 struct binder_fd_array_object *fda =
3446 to_binder_fd_array_object(hdr);
3447 size_t num_valid = (buffer_offset - off_start_offset) /
3448 sizeof(binder_size_t);
3449 struct binder_buffer_object *parent =
3450 binder_validate_ptr(target_proc, t->buffer,
3451 &ptr_object, fda->parent,
3452 off_start_offset,
3453 &parent_offset,
3454 num_valid);
3455 if (!parent) {
3456 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3457 proc->pid, thread->pid);
3458 return_error = BR_FAILED_REPLY;
3459 return_error_param = -EINVAL;
3460 return_error_line = __LINE__;
3461 goto err_bad_parent;
3462 }
3463 if (!binder_validate_fixup(target_proc, t->buffer,
3464 off_start_offset,
3465 parent_offset,
3466 fda->parent_offset,
3467 last_fixup_obj_off,
3468 last_fixup_min_off)) {
3469 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3470 proc->pid, thread->pid);
3471 return_error = BR_FAILED_REPLY;
3472 return_error_param = -EINVAL;
3473 return_error_line = __LINE__;
3474 goto err_bad_parent;
3475 }
3476 /*
3477 * We need to read the user version of the parent
3478 * object to get the original user offset
3479 */
3480 user_parent_size =
3481 binder_get_object(proc, user_buffer, t->buffer,
3482 parent_offset, &user_object);
3483 if (user_parent_size != sizeof(user_object.bbo)) {
3484 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3485 proc->pid, thread->pid,
3486 user_parent_size,
3487 sizeof(user_object.bbo));
3488 return_error = BR_FAILED_REPLY;
3489 return_error_param = -EINVAL;
3490 return_error_line = __LINE__;
3491 goto err_bad_parent;
3492 }
3493 ret = binder_translate_fd_array(&pf_head, fda,
3494 user_buffer, parent,
3495 &user_object.bbo, t,
3496 thread, in_reply_to);
3497 if (!ret)
3498 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3499 t->buffer,
3500 object_offset,
3501 fda, sizeof(*fda));
3502 if (ret) {
3503 binder_txn_error("%d:%d translate fd array failed\n",
3504 thread->pid, proc->pid);
3505 return_error = BR_FAILED_REPLY;
3506 return_error_param = ret > 0 ? -EINVAL : ret;
3507 return_error_line = __LINE__;
3508 goto err_translate_failed;
3509 }
3510 last_fixup_obj_off = parent_offset;
3511 last_fixup_min_off =
3512 fda->parent_offset + sizeof(u32) * fda->num_fds;
3513 } break;
3514 case BINDER_TYPE_PTR: {
3515 struct binder_buffer_object *bp =
3516 to_binder_buffer_object(hdr);
3517 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3518 size_t num_valid;
3519
3520 if (bp->length > buf_left) {
3521 binder_user_error("%d:%d got transaction with too large buffer\n",
3522 proc->pid, thread->pid);
3523 return_error = BR_FAILED_REPLY;
3524 return_error_param = -EINVAL;
3525 return_error_line = __LINE__;
3526 goto err_bad_offset;
3527 }
3528 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3529 (const void __user *)(uintptr_t)bp->buffer,
3530 bp->length);
3531 if (ret) {
3532 binder_txn_error("%d:%d deferred copy failed\n",
3533 thread->pid, proc->pid);
3534 return_error = BR_FAILED_REPLY;
3535 return_error_param = ret;
3536 return_error_line = __LINE__;
3537 goto err_translate_failed;
3538 }
3539 /* Fixup buffer pointer to target proc address space */
3540 bp->buffer = t->buffer->user_data + sg_buf_offset;
3541 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3542
3543 num_valid = (buffer_offset - off_start_offset) /
3544 sizeof(binder_size_t);
3545 ret = binder_fixup_parent(&pf_head, t,
3546 thread, bp,
3547 off_start_offset,
3548 num_valid,
3549 last_fixup_obj_off,
3550 last_fixup_min_off);
3551 if (ret < 0 ||
3552 binder_alloc_copy_to_buffer(&target_proc->alloc,
3553 t->buffer,
3554 object_offset,
3555 bp, sizeof(*bp))) {
3556 binder_txn_error("%d:%d failed to fixup parent\n",
3557 thread->pid, proc->pid);
3558 return_error = BR_FAILED_REPLY;
3559 return_error_param = ret;
3560 return_error_line = __LINE__;
3561 goto err_translate_failed;
3562 }
3563 last_fixup_obj_off = object_offset;
3564 last_fixup_min_off = 0;
3565 } break;
3566 default:
3567 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3568 proc->pid, thread->pid, hdr->type);
3569 return_error = BR_FAILED_REPLY;
3570 return_error_param = -EINVAL;
3571 return_error_line = __LINE__;
3572 goto err_bad_object_type;
3573 }
3574 }
3575 /* Done processing objects, copy the rest of the buffer */
3576 if (binder_alloc_copy_user_to_buffer(
3577 &target_proc->alloc,
3578 t->buffer, user_offset,
3579 user_buffer + user_offset,
3580 tr->data_size - user_offset)) {
3581 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3582 proc->pid, thread->pid);
3583 return_error = BR_FAILED_REPLY;
3584 return_error_param = -EFAULT;
3585 return_error_line = __LINE__;
3586 goto err_copy_data_failed;
3587 }
3588
3589 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3590 &sgc_head, &pf_head);
3591 if (ret) {
3592 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3593 proc->pid, thread->pid);
3594 return_error = BR_FAILED_REPLY;
3595 return_error_param = ret;
3596 return_error_line = __LINE__;
3597 goto err_copy_data_failed;
3598 }
3599 if (t->buffer->oneway_spam_suspect)
3600 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3601 else
3602 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3603 t->work.type = BINDER_WORK_TRANSACTION;
3604
3605 if (reply) {
3606 binder_enqueue_thread_work(thread, tcomplete);
3607 binder_inner_proc_lock(target_proc);
3608 if (target_thread->is_dead) {
3609 return_error = BR_DEAD_REPLY;
3610 binder_inner_proc_unlock(target_proc);
3611 goto err_dead_proc_or_thread;
3612 }
3613 BUG_ON(t->buffer->async_transaction != 0);
3614 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3615 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3616 target_proc->outstanding_txns++;
3617 binder_inner_proc_unlock(target_proc);
3618 wake_up_interruptible_sync(&target_thread->wait);
3619 binder_free_transaction(in_reply_to);
3620 } else if (!(t->flags & TF_ONE_WAY)) {
3621 BUG_ON(t->buffer->async_transaction != 0);
3622 binder_inner_proc_lock(proc);
3623 /*
3624 * Defer the TRANSACTION_COMPLETE, so we don't return to
3625 * userspace immediately; this allows the target process to
3626 * immediately start processing this transaction, reducing
3627 * latency. We will then return the TRANSACTION_COMPLETE when
3628 * the target replies (or there is an error).
3629 */
3630 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3631 t->need_reply = 1;
3632 t->from_parent = thread->transaction_stack;
3633 thread->transaction_stack = t;
3634 binder_inner_proc_unlock(proc);
3635 return_error = binder_proc_transaction(t,
3636 target_proc, target_thread);
3637 if (return_error) {
3638 binder_inner_proc_lock(proc);
3639 binder_pop_transaction_ilocked(thread, t);
3640 binder_inner_proc_unlock(proc);
3641 goto err_dead_proc_or_thread;
3642 }
3643 } else {
3644 BUG_ON(target_node == NULL);
3645 BUG_ON(t->buffer->async_transaction != 1);
3646 return_error = binder_proc_transaction(t, target_proc, NULL);
3647 /*
3648 * Let the caller know when async transaction reaches a frozen
3649 * process and is put in a pending queue, waiting for the target
3650 * process to be unfrozen.
3651 */
3652 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3653 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3654 binder_enqueue_thread_work(thread, tcomplete);
3655 if (return_error &&
3656 return_error != BR_TRANSACTION_PENDING_FROZEN)
3657 goto err_dead_proc_or_thread;
3658 }
3659 if (target_thread)
3660 binder_thread_dec_tmpref(target_thread);
3661 binder_proc_dec_tmpref(target_proc);
3662 if (target_node)
3663 binder_dec_node_tmpref(target_node);
3664 /*
3665 * write barrier to synchronize with initialization
3666 * of log entry
3667 */
3668 smp_wmb();
3669 WRITE_ONCE(e->debug_id_done, t_debug_id);
3670 return;
3671
3672err_dead_proc_or_thread:
3673 binder_txn_error("%d:%d dead process or thread\n",
3674 thread->pid, proc->pid);
3675 return_error_line = __LINE__;
3676 binder_dequeue_work(proc, tcomplete);
3677err_translate_failed:
3678err_bad_object_type:
3679err_bad_offset:
3680err_bad_parent:
3681err_copy_data_failed:
3682 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3683 binder_free_txn_fixups(t);
3684 trace_binder_transaction_failed_buffer_release(t->buffer);
3685 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3686 buffer_offset, true);
3687 if (target_node)
3688 binder_dec_node_tmpref(target_node);
3689 target_node = NULL;
3690 t->buffer->transaction = NULL;
3691 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3692err_binder_alloc_buf_failed:
3693err_bad_extra_size:
3694 if (secctx)
3695 security_release_secctx(secctx, secctx_sz);
3696err_get_secctx_failed:
3697 kfree(tcomplete);
3698 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3699err_alloc_tcomplete_failed:
3700 if (trace_binder_txn_latency_free_enabled())
3701 binder_txn_latency_free(t);
3702 kfree(t);
3703 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3704err_alloc_t_failed:
3705err_bad_todo_list:
3706err_bad_call_stack:
3707err_empty_call_stack:
3708err_dead_binder:
3709err_invalid_target_handle:
3710 if (target_node) {
3711 binder_dec_node(target_node, 1, 0);
3712 binder_dec_node_tmpref(target_node);
3713 }
3714
3715 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3716 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3717 proc->pid, thread->pid, reply ? "reply" :
3718 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3719 target_proc ? target_proc->pid : 0,
3720 target_thread ? target_thread->pid : 0,
3721 t_debug_id, return_error, return_error_param,
3722 (u64)tr->data_size, (u64)tr->offsets_size,
3723 return_error_line);
3724
3725 if (target_thread)
3726 binder_thread_dec_tmpref(target_thread);
3727 if (target_proc)
3728 binder_proc_dec_tmpref(target_proc);
3729
3730 {
3731 struct binder_transaction_log_entry *fe;
3732
3733 e->return_error = return_error;
3734 e->return_error_param = return_error_param;
3735 e->return_error_line = return_error_line;
3736 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3737 *fe = *e;
3738 /*
3739 * write barrier to synchronize with initialization
3740 * of log entry
3741 */
3742 smp_wmb();
3743 WRITE_ONCE(e->debug_id_done, t_debug_id);
3744 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3745 }
3746
3747 BUG_ON(thread->return_error.cmd != BR_OK);
3748 if (in_reply_to) {
3749 binder_set_txn_from_error(in_reply_to, t_debug_id,
3750 return_error, return_error_param);
3751 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3752 binder_enqueue_thread_work(thread, &thread->return_error.work);
3753 binder_send_failed_reply(in_reply_to, return_error);
3754 } else {
3755 binder_inner_proc_lock(proc);
3756 binder_set_extended_error(&thread->ee, t_debug_id,
3757 return_error, return_error_param);
3758 binder_inner_proc_unlock(proc);
3759 thread->return_error.cmd = return_error;
3760 binder_enqueue_thread_work(thread, &thread->return_error.work);
3761 }
3762}
3763
3764/**
3765 * binder_free_buf() - free the specified buffer
3766 * @proc: binder proc that owns buffer
3767 * @buffer: buffer to be freed
3768 * @is_failure: failed to send transaction
3769 *
3770 * If buffer for an async transaction, enqueue the next async
3771 * transaction from the node.
3772 *
3773 * Cleanup buffer and free it.
3774 */
3775static void
3776binder_free_buf(struct binder_proc *proc,
3777 struct binder_thread *thread,
3778 struct binder_buffer *buffer, bool is_failure)
3779{
3780 binder_inner_proc_lock(proc);
3781 if (buffer->transaction) {
3782 buffer->transaction->buffer = NULL;
3783 buffer->transaction = NULL;
3784 }
3785 binder_inner_proc_unlock(proc);
3786 if (buffer->async_transaction && buffer->target_node) {
3787 struct binder_node *buf_node;
3788 struct binder_work *w;
3789
3790 buf_node = buffer->target_node;
3791 binder_node_inner_lock(buf_node);
3792 BUG_ON(!buf_node->has_async_transaction);
3793 BUG_ON(buf_node->proc != proc);
3794 w = binder_dequeue_work_head_ilocked(
3795 &buf_node->async_todo);
3796 if (!w) {
3797 buf_node->has_async_transaction = false;
3798 } else {
3799 binder_enqueue_work_ilocked(
3800 w, &proc->todo);
3801 binder_wakeup_proc_ilocked(proc);
3802 }
3803 binder_node_inner_unlock(buf_node);
3804 }
3805 trace_binder_transaction_buffer_release(buffer);
3806 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3807 binder_alloc_free_buf(&proc->alloc, buffer);
3808}
3809
3810static int binder_thread_write(struct binder_proc *proc,
3811 struct binder_thread *thread,
3812 binder_uintptr_t binder_buffer, size_t size,
3813 binder_size_t *consumed)
3814{
3815 uint32_t cmd;
3816 struct binder_context *context = proc->context;
3817 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3818 void __user *ptr = buffer + *consumed;
3819 void __user *end = buffer + size;
3820
3821 while (ptr < end && thread->return_error.cmd == BR_OK) {
3822 int ret;
3823
3824 if (get_user(cmd, (uint32_t __user *)ptr))
3825 return -EFAULT;
3826 ptr += sizeof(uint32_t);
3827 trace_binder_command(cmd);
3828 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3829 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3830 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3831 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3832 }
3833 switch (cmd) {
3834 case BC_INCREFS:
3835 case BC_ACQUIRE:
3836 case BC_RELEASE:
3837 case BC_DECREFS: {
3838 uint32_t target;
3839 const char *debug_string;
3840 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3841 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3842 struct binder_ref_data rdata;
3843
3844 if (get_user(target, (uint32_t __user *)ptr))
3845 return -EFAULT;
3846
3847 ptr += sizeof(uint32_t);
3848 ret = -1;
3849 if (increment && !target) {
3850 struct binder_node *ctx_mgr_node;
3851
3852 mutex_lock(&context->context_mgr_node_lock);
3853 ctx_mgr_node = context->binder_context_mgr_node;
3854 if (ctx_mgr_node) {
3855 if (ctx_mgr_node->proc == proc) {
3856 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3857 proc->pid, thread->pid);
3858 mutex_unlock(&context->context_mgr_node_lock);
3859 return -EINVAL;
3860 }
3861 ret = binder_inc_ref_for_node(
3862 proc, ctx_mgr_node,
3863 strong, NULL, &rdata);
3864 }
3865 mutex_unlock(&context->context_mgr_node_lock);
3866 }
3867 if (ret)
3868 ret = binder_update_ref_for_handle(
3869 proc, target, increment, strong,
3870 &rdata);
3871 if (!ret && rdata.desc != target) {
3872 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3873 proc->pid, thread->pid,
3874 target, rdata.desc);
3875 }
3876 switch (cmd) {
3877 case BC_INCREFS:
3878 debug_string = "IncRefs";
3879 break;
3880 case BC_ACQUIRE:
3881 debug_string = "Acquire";
3882 break;
3883 case BC_RELEASE:
3884 debug_string = "Release";
3885 break;
3886 case BC_DECREFS:
3887 default:
3888 debug_string = "DecRefs";
3889 break;
3890 }
3891 if (ret) {
3892 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3893 proc->pid, thread->pid, debug_string,
3894 strong, target, ret);
3895 break;
3896 }
3897 binder_debug(BINDER_DEBUG_USER_REFS,
3898 "%d:%d %s ref %d desc %d s %d w %d\n",
3899 proc->pid, thread->pid, debug_string,
3900 rdata.debug_id, rdata.desc, rdata.strong,
3901 rdata.weak);
3902 break;
3903 }
3904 case BC_INCREFS_DONE:
3905 case BC_ACQUIRE_DONE: {
3906 binder_uintptr_t node_ptr;
3907 binder_uintptr_t cookie;
3908 struct binder_node *node;
3909 bool free_node;
3910
3911 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3912 return -EFAULT;
3913 ptr += sizeof(binder_uintptr_t);
3914 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3915 return -EFAULT;
3916 ptr += sizeof(binder_uintptr_t);
3917 node = binder_get_node(proc, node_ptr);
3918 if (node == NULL) {
3919 binder_user_error("%d:%d %s u%016llx no match\n",
3920 proc->pid, thread->pid,
3921 cmd == BC_INCREFS_DONE ?
3922 "BC_INCREFS_DONE" :
3923 "BC_ACQUIRE_DONE",
3924 (u64)node_ptr);
3925 break;
3926 }
3927 if (cookie != node->cookie) {
3928 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3929 proc->pid, thread->pid,
3930 cmd == BC_INCREFS_DONE ?
3931 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3932 (u64)node_ptr, node->debug_id,
3933 (u64)cookie, (u64)node->cookie);
3934 binder_put_node(node);
3935 break;
3936 }
3937 binder_node_inner_lock(node);
3938 if (cmd == BC_ACQUIRE_DONE) {
3939 if (node->pending_strong_ref == 0) {
3940 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3941 proc->pid, thread->pid,
3942 node->debug_id);
3943 binder_node_inner_unlock(node);
3944 binder_put_node(node);
3945 break;
3946 }
3947 node->pending_strong_ref = 0;
3948 } else {
3949 if (node->pending_weak_ref == 0) {
3950 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3951 proc->pid, thread->pid,
3952 node->debug_id);
3953 binder_node_inner_unlock(node);
3954 binder_put_node(node);
3955 break;
3956 }
3957 node->pending_weak_ref = 0;
3958 }
3959 free_node = binder_dec_node_nilocked(node,
3960 cmd == BC_ACQUIRE_DONE, 0);
3961 WARN_ON(free_node);
3962 binder_debug(BINDER_DEBUG_USER_REFS,
3963 "%d:%d %s node %d ls %d lw %d tr %d\n",
3964 proc->pid, thread->pid,
3965 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3966 node->debug_id, node->local_strong_refs,
3967 node->local_weak_refs, node->tmp_refs);
3968 binder_node_inner_unlock(node);
3969 binder_put_node(node);
3970 break;
3971 }
3972 case BC_ATTEMPT_ACQUIRE:
3973 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3974 return -EINVAL;
3975 case BC_ACQUIRE_RESULT:
3976 pr_err("BC_ACQUIRE_RESULT not supported\n");
3977 return -EINVAL;
3978
3979 case BC_FREE_BUFFER: {
3980 binder_uintptr_t data_ptr;
3981 struct binder_buffer *buffer;
3982
3983 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3984 return -EFAULT;
3985 ptr += sizeof(binder_uintptr_t);
3986
3987 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3988 data_ptr);
3989 if (IS_ERR_OR_NULL(buffer)) {
3990 if (PTR_ERR(buffer) == -EPERM) {
3991 binder_user_error(
3992 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3993 proc->pid, thread->pid,
3994 (u64)data_ptr);
3995 } else {
3996 binder_user_error(
3997 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3998 proc->pid, thread->pid,
3999 (u64)data_ptr);
4000 }
4001 break;
4002 }
4003 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4004 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4005 proc->pid, thread->pid, (u64)data_ptr,
4006 buffer->debug_id,
4007 buffer->transaction ? "active" : "finished");
4008 binder_free_buf(proc, thread, buffer, false);
4009 break;
4010 }
4011
4012 case BC_TRANSACTION_SG:
4013 case BC_REPLY_SG: {
4014 struct binder_transaction_data_sg tr;
4015
4016 if (copy_from_user(&tr, ptr, sizeof(tr)))
4017 return -EFAULT;
4018 ptr += sizeof(tr);
4019 binder_transaction(proc, thread, &tr.transaction_data,
4020 cmd == BC_REPLY_SG, tr.buffers_size);
4021 break;
4022 }
4023 case BC_TRANSACTION:
4024 case BC_REPLY: {
4025 struct binder_transaction_data tr;
4026
4027 if (copy_from_user(&tr, ptr, sizeof(tr)))
4028 return -EFAULT;
4029 ptr += sizeof(tr);
4030 binder_transaction(proc, thread, &tr,
4031 cmd == BC_REPLY, 0);
4032 break;
4033 }
4034
4035 case BC_REGISTER_LOOPER:
4036 binder_debug(BINDER_DEBUG_THREADS,
4037 "%d:%d BC_REGISTER_LOOPER\n",
4038 proc->pid, thread->pid);
4039 binder_inner_proc_lock(proc);
4040 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4041 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4042 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4043 proc->pid, thread->pid);
4044 } else if (proc->requested_threads == 0) {
4045 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4046 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4047 proc->pid, thread->pid);
4048 } else {
4049 proc->requested_threads--;
4050 proc->requested_threads_started++;
4051 }
4052 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4053 binder_inner_proc_unlock(proc);
4054 break;
4055 case BC_ENTER_LOOPER:
4056 binder_debug(BINDER_DEBUG_THREADS,
4057 "%d:%d BC_ENTER_LOOPER\n",
4058 proc->pid, thread->pid);
4059 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4060 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4061 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4062 proc->pid, thread->pid);
4063 }
4064 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4065 break;
4066 case BC_EXIT_LOOPER:
4067 binder_debug(BINDER_DEBUG_THREADS,
4068 "%d:%d BC_EXIT_LOOPER\n",
4069 proc->pid, thread->pid);
4070 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4071 break;
4072
4073 case BC_REQUEST_DEATH_NOTIFICATION:
4074 case BC_CLEAR_DEATH_NOTIFICATION: {
4075 uint32_t target;
4076 binder_uintptr_t cookie;
4077 struct binder_ref *ref;
4078 struct binder_ref_death *death = NULL;
4079
4080 if (get_user(target, (uint32_t __user *)ptr))
4081 return -EFAULT;
4082 ptr += sizeof(uint32_t);
4083 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4084 return -EFAULT;
4085 ptr += sizeof(binder_uintptr_t);
4086 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4087 /*
4088 * Allocate memory for death notification
4089 * before taking lock
4090 */
4091 death = kzalloc(sizeof(*death), GFP_KERNEL);
4092 if (death == NULL) {
4093 WARN_ON(thread->return_error.cmd !=
4094 BR_OK);
4095 thread->return_error.cmd = BR_ERROR;
4096 binder_enqueue_thread_work(
4097 thread,
4098 &thread->return_error.work);
4099 binder_debug(
4100 BINDER_DEBUG_FAILED_TRANSACTION,
4101 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4102 proc->pid, thread->pid);
4103 break;
4104 }
4105 }
4106 binder_proc_lock(proc);
4107 ref = binder_get_ref_olocked(proc, target, false);
4108 if (ref == NULL) {
4109 binder_user_error("%d:%d %s invalid ref %d\n",
4110 proc->pid, thread->pid,
4111 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4112 "BC_REQUEST_DEATH_NOTIFICATION" :
4113 "BC_CLEAR_DEATH_NOTIFICATION",
4114 target);
4115 binder_proc_unlock(proc);
4116 kfree(death);
4117 break;
4118 }
4119
4120 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4121 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4122 proc->pid, thread->pid,
4123 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4124 "BC_REQUEST_DEATH_NOTIFICATION" :
4125 "BC_CLEAR_DEATH_NOTIFICATION",
4126 (u64)cookie, ref->data.debug_id,
4127 ref->data.desc, ref->data.strong,
4128 ref->data.weak, ref->node->debug_id);
4129
4130 binder_node_lock(ref->node);
4131 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4132 if (ref->death) {
4133 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4134 proc->pid, thread->pid);
4135 binder_node_unlock(ref->node);
4136 binder_proc_unlock(proc);
4137 kfree(death);
4138 break;
4139 }
4140 binder_stats_created(BINDER_STAT_DEATH);
4141 INIT_LIST_HEAD(&death->work.entry);
4142 death->cookie = cookie;
4143 ref->death = death;
4144 if (ref->node->proc == NULL) {
4145 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4146
4147 binder_inner_proc_lock(proc);
4148 binder_enqueue_work_ilocked(
4149 &ref->death->work, &proc->todo);
4150 binder_wakeup_proc_ilocked(proc);
4151 binder_inner_proc_unlock(proc);
4152 }
4153 } else {
4154 if (ref->death == NULL) {
4155 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4156 proc->pid, thread->pid);
4157 binder_node_unlock(ref->node);
4158 binder_proc_unlock(proc);
4159 break;
4160 }
4161 death = ref->death;
4162 if (death->cookie != cookie) {
4163 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4164 proc->pid, thread->pid,
4165 (u64)death->cookie,
4166 (u64)cookie);
4167 binder_node_unlock(ref->node);
4168 binder_proc_unlock(proc);
4169 break;
4170 }
4171 ref->death = NULL;
4172 binder_inner_proc_lock(proc);
4173 if (list_empty(&death->work.entry)) {
4174 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4175 if (thread->looper &
4176 (BINDER_LOOPER_STATE_REGISTERED |
4177 BINDER_LOOPER_STATE_ENTERED))
4178 binder_enqueue_thread_work_ilocked(
4179 thread,
4180 &death->work);
4181 else {
4182 binder_enqueue_work_ilocked(
4183 &death->work,
4184 &proc->todo);
4185 binder_wakeup_proc_ilocked(
4186 proc);
4187 }
4188 } else {
4189 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4190 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4191 }
4192 binder_inner_proc_unlock(proc);
4193 }
4194 binder_node_unlock(ref->node);
4195 binder_proc_unlock(proc);
4196 } break;
4197 case BC_DEAD_BINDER_DONE: {
4198 struct binder_work *w;
4199 binder_uintptr_t cookie;
4200 struct binder_ref_death *death = NULL;
4201
4202 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4203 return -EFAULT;
4204
4205 ptr += sizeof(cookie);
4206 binder_inner_proc_lock(proc);
4207 list_for_each_entry(w, &proc->delivered_death,
4208 entry) {
4209 struct binder_ref_death *tmp_death =
4210 container_of(w,
4211 struct binder_ref_death,
4212 work);
4213
4214 if (tmp_death->cookie == cookie) {
4215 death = tmp_death;
4216 break;
4217 }
4218 }
4219 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4220 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4221 proc->pid, thread->pid, (u64)cookie,
4222 death);
4223 if (death == NULL) {
4224 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4225 proc->pid, thread->pid, (u64)cookie);
4226 binder_inner_proc_unlock(proc);
4227 break;
4228 }
4229 binder_dequeue_work_ilocked(&death->work);
4230 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4231 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4232 if (thread->looper &
4233 (BINDER_LOOPER_STATE_REGISTERED |
4234 BINDER_LOOPER_STATE_ENTERED))
4235 binder_enqueue_thread_work_ilocked(
4236 thread, &death->work);
4237 else {
4238 binder_enqueue_work_ilocked(
4239 &death->work,
4240 &proc->todo);
4241 binder_wakeup_proc_ilocked(proc);
4242 }
4243 }
4244 binder_inner_proc_unlock(proc);
4245 } break;
4246
4247 default:
4248 pr_err("%d:%d unknown command %u\n",
4249 proc->pid, thread->pid, cmd);
4250 return -EINVAL;
4251 }
4252 *consumed = ptr - buffer;
4253 }
4254 return 0;
4255}
4256
4257static void binder_stat_br(struct binder_proc *proc,
4258 struct binder_thread *thread, uint32_t cmd)
4259{
4260 trace_binder_return(cmd);
4261 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4262 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4263 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4264 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4265 }
4266}
4267
4268static int binder_put_node_cmd(struct binder_proc *proc,
4269 struct binder_thread *thread,
4270 void __user **ptrp,
4271 binder_uintptr_t node_ptr,
4272 binder_uintptr_t node_cookie,
4273 int node_debug_id,
4274 uint32_t cmd, const char *cmd_name)
4275{
4276 void __user *ptr = *ptrp;
4277
4278 if (put_user(cmd, (uint32_t __user *)ptr))
4279 return -EFAULT;
4280 ptr += sizeof(uint32_t);
4281
4282 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4283 return -EFAULT;
4284 ptr += sizeof(binder_uintptr_t);
4285
4286 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4287 return -EFAULT;
4288 ptr += sizeof(binder_uintptr_t);
4289
4290 binder_stat_br(proc, thread, cmd);
4291 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4292 proc->pid, thread->pid, cmd_name, node_debug_id,
4293 (u64)node_ptr, (u64)node_cookie);
4294
4295 *ptrp = ptr;
4296 return 0;
4297}
4298
4299static int binder_wait_for_work(struct binder_thread *thread,
4300 bool do_proc_work)
4301{
4302 DEFINE_WAIT(wait);
4303 struct binder_proc *proc = thread->proc;
4304 int ret = 0;
4305
4306 binder_inner_proc_lock(proc);
4307 for (;;) {
4308 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4309 if (binder_has_work_ilocked(thread, do_proc_work))
4310 break;
4311 if (do_proc_work)
4312 list_add(&thread->waiting_thread_node,
4313 &proc->waiting_threads);
4314 binder_inner_proc_unlock(proc);
4315 schedule();
4316 binder_inner_proc_lock(proc);
4317 list_del_init(&thread->waiting_thread_node);
4318 if (signal_pending(current)) {
4319 ret = -EINTR;
4320 break;
4321 }
4322 }
4323 finish_wait(&thread->wait, &wait);
4324 binder_inner_proc_unlock(proc);
4325
4326 return ret;
4327}
4328
4329/**
4330 * binder_apply_fd_fixups() - finish fd translation
4331 * @proc: binder_proc associated @t->buffer
4332 * @t: binder transaction with list of fd fixups
4333 *
4334 * Now that we are in the context of the transaction target
4335 * process, we can allocate and install fds. Process the
4336 * list of fds to translate and fixup the buffer with the
4337 * new fds first and only then install the files.
4338 *
4339 * If we fail to allocate an fd, skip the install and release
4340 * any fds that have already been allocated.
4341 */
4342static int binder_apply_fd_fixups(struct binder_proc *proc,
4343 struct binder_transaction *t)
4344{
4345 struct binder_txn_fd_fixup *fixup, *tmp;
4346 int ret = 0;
4347
4348 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4349 int fd = get_unused_fd_flags(O_CLOEXEC);
4350
4351 if (fd < 0) {
4352 binder_debug(BINDER_DEBUG_TRANSACTION,
4353 "failed fd fixup txn %d fd %d\n",
4354 t->debug_id, fd);
4355 ret = -ENOMEM;
4356 goto err;
4357 }
4358 binder_debug(BINDER_DEBUG_TRANSACTION,
4359 "fd fixup txn %d fd %d\n",
4360 t->debug_id, fd);
4361 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4362 fixup->target_fd = fd;
4363 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4364 fixup->offset, &fd,
4365 sizeof(u32))) {
4366 ret = -EINVAL;
4367 goto err;
4368 }
4369 }
4370 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4371 fd_install(fixup->target_fd, fixup->file);
4372 list_del(&fixup->fixup_entry);
4373 kfree(fixup);
4374 }
4375
4376 return ret;
4377
4378err:
4379 binder_free_txn_fixups(t);
4380 return ret;
4381}
4382
4383static int binder_thread_read(struct binder_proc *proc,
4384 struct binder_thread *thread,
4385 binder_uintptr_t binder_buffer, size_t size,
4386 binder_size_t *consumed, int non_block)
4387{
4388 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4389 void __user *ptr = buffer + *consumed;
4390 void __user *end = buffer + size;
4391
4392 int ret = 0;
4393 int wait_for_proc_work;
4394
4395 if (*consumed == 0) {
4396 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4397 return -EFAULT;
4398 ptr += sizeof(uint32_t);
4399 }
4400
4401retry:
4402 binder_inner_proc_lock(proc);
4403 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4404 binder_inner_proc_unlock(proc);
4405
4406 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4407
4408 trace_binder_wait_for_work(wait_for_proc_work,
4409 !!thread->transaction_stack,
4410 !binder_worklist_empty(proc, &thread->todo));
4411 if (wait_for_proc_work) {
4412 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4413 BINDER_LOOPER_STATE_ENTERED))) {
4414 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4415 proc->pid, thread->pid, thread->looper);
4416 wait_event_interruptible(binder_user_error_wait,
4417 binder_stop_on_user_error < 2);
4418 }
4419 binder_set_nice(proc->default_priority);
4420 }
4421
4422 if (non_block) {
4423 if (!binder_has_work(thread, wait_for_proc_work))
4424 ret = -EAGAIN;
4425 } else {
4426 ret = binder_wait_for_work(thread, wait_for_proc_work);
4427 }
4428
4429 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4430
4431 if (ret)
4432 return ret;
4433
4434 while (1) {
4435 uint32_t cmd;
4436 struct binder_transaction_data_secctx tr;
4437 struct binder_transaction_data *trd = &tr.transaction_data;
4438 struct binder_work *w = NULL;
4439 struct list_head *list = NULL;
4440 struct binder_transaction *t = NULL;
4441 struct binder_thread *t_from;
4442 size_t trsize = sizeof(*trd);
4443
4444 binder_inner_proc_lock(proc);
4445 if (!binder_worklist_empty_ilocked(&thread->todo))
4446 list = &thread->todo;
4447 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4448 wait_for_proc_work)
4449 list = &proc->todo;
4450 else {
4451 binder_inner_proc_unlock(proc);
4452
4453 /* no data added */
4454 if (ptr - buffer == 4 && !thread->looper_need_return)
4455 goto retry;
4456 break;
4457 }
4458
4459 if (end - ptr < sizeof(tr) + 4) {
4460 binder_inner_proc_unlock(proc);
4461 break;
4462 }
4463 w = binder_dequeue_work_head_ilocked(list);
4464 if (binder_worklist_empty_ilocked(&thread->todo))
4465 thread->process_todo = false;
4466
4467 switch (w->type) {
4468 case BINDER_WORK_TRANSACTION: {
4469 binder_inner_proc_unlock(proc);
4470 t = container_of(w, struct binder_transaction, work);
4471 } break;
4472 case BINDER_WORK_RETURN_ERROR: {
4473 struct binder_error *e = container_of(
4474 w, struct binder_error, work);
4475
4476 WARN_ON(e->cmd == BR_OK);
4477 binder_inner_proc_unlock(proc);
4478 if (put_user(e->cmd, (uint32_t __user *)ptr))
4479 return -EFAULT;
4480 cmd = e->cmd;
4481 e->cmd = BR_OK;
4482 ptr += sizeof(uint32_t);
4483
4484 binder_stat_br(proc, thread, cmd);
4485 } break;
4486 case BINDER_WORK_TRANSACTION_COMPLETE:
4487 case BINDER_WORK_TRANSACTION_PENDING:
4488 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4489 if (proc->oneway_spam_detection_enabled &&
4490 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4491 cmd = BR_ONEWAY_SPAM_SUSPECT;
4492 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4493 cmd = BR_TRANSACTION_PENDING_FROZEN;
4494 else
4495 cmd = BR_TRANSACTION_COMPLETE;
4496 binder_inner_proc_unlock(proc);
4497 kfree(w);
4498 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4499 if (put_user(cmd, (uint32_t __user *)ptr))
4500 return -EFAULT;
4501 ptr += sizeof(uint32_t);
4502
4503 binder_stat_br(proc, thread, cmd);
4504 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4505 "%d:%d BR_TRANSACTION_COMPLETE\n",
4506 proc->pid, thread->pid);
4507 } break;
4508 case BINDER_WORK_NODE: {
4509 struct binder_node *node = container_of(w, struct binder_node, work);
4510 int strong, weak;
4511 binder_uintptr_t node_ptr = node->ptr;
4512 binder_uintptr_t node_cookie = node->cookie;
4513 int node_debug_id = node->debug_id;
4514 int has_weak_ref;
4515 int has_strong_ref;
4516 void __user *orig_ptr = ptr;
4517
4518 BUG_ON(proc != node->proc);
4519 strong = node->internal_strong_refs ||
4520 node->local_strong_refs;
4521 weak = !hlist_empty(&node->refs) ||
4522 node->local_weak_refs ||
4523 node->tmp_refs || strong;
4524 has_strong_ref = node->has_strong_ref;
4525 has_weak_ref = node->has_weak_ref;
4526
4527 if (weak && !has_weak_ref) {
4528 node->has_weak_ref = 1;
4529 node->pending_weak_ref = 1;
4530 node->local_weak_refs++;
4531 }
4532 if (strong && !has_strong_ref) {
4533 node->has_strong_ref = 1;
4534 node->pending_strong_ref = 1;
4535 node->local_strong_refs++;
4536 }
4537 if (!strong && has_strong_ref)
4538 node->has_strong_ref = 0;
4539 if (!weak && has_weak_ref)
4540 node->has_weak_ref = 0;
4541 if (!weak && !strong) {
4542 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4543 "%d:%d node %d u%016llx c%016llx deleted\n",
4544 proc->pid, thread->pid,
4545 node_debug_id,
4546 (u64)node_ptr,
4547 (u64)node_cookie);
4548 rb_erase(&node->rb_node, &proc->nodes);
4549 binder_inner_proc_unlock(proc);
4550 binder_node_lock(node);
4551 /*
4552 * Acquire the node lock before freeing the
4553 * node to serialize with other threads that
4554 * may have been holding the node lock while
4555 * decrementing this node (avoids race where
4556 * this thread frees while the other thread
4557 * is unlocking the node after the final
4558 * decrement)
4559 */
4560 binder_node_unlock(node);
4561 binder_free_node(node);
4562 } else
4563 binder_inner_proc_unlock(proc);
4564
4565 if (weak && !has_weak_ref)
4566 ret = binder_put_node_cmd(
4567 proc, thread, &ptr, node_ptr,
4568 node_cookie, node_debug_id,
4569 BR_INCREFS, "BR_INCREFS");
4570 if (!ret && strong && !has_strong_ref)
4571 ret = binder_put_node_cmd(
4572 proc, thread, &ptr, node_ptr,
4573 node_cookie, node_debug_id,
4574 BR_ACQUIRE, "BR_ACQUIRE");
4575 if (!ret && !strong && has_strong_ref)
4576 ret = binder_put_node_cmd(
4577 proc, thread, &ptr, node_ptr,
4578 node_cookie, node_debug_id,
4579 BR_RELEASE, "BR_RELEASE");
4580 if (!ret && !weak && has_weak_ref)
4581 ret = binder_put_node_cmd(
4582 proc, thread, &ptr, node_ptr,
4583 node_cookie, node_debug_id,
4584 BR_DECREFS, "BR_DECREFS");
4585 if (orig_ptr == ptr)
4586 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4587 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4588 proc->pid, thread->pid,
4589 node_debug_id,
4590 (u64)node_ptr,
4591 (u64)node_cookie);
4592 if (ret)
4593 return ret;
4594 } break;
4595 case BINDER_WORK_DEAD_BINDER:
4596 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4597 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4598 struct binder_ref_death *death;
4599 uint32_t cmd;
4600 binder_uintptr_t cookie;
4601
4602 death = container_of(w, struct binder_ref_death, work);
4603 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4604 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4605 else
4606 cmd = BR_DEAD_BINDER;
4607 cookie = death->cookie;
4608
4609 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4610 "%d:%d %s %016llx\n",
4611 proc->pid, thread->pid,
4612 cmd == BR_DEAD_BINDER ?
4613 "BR_DEAD_BINDER" :
4614 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4615 (u64)cookie);
4616 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4617 binder_inner_proc_unlock(proc);
4618 kfree(death);
4619 binder_stats_deleted(BINDER_STAT_DEATH);
4620 } else {
4621 binder_enqueue_work_ilocked(
4622 w, &proc->delivered_death);
4623 binder_inner_proc_unlock(proc);
4624 }
4625 if (put_user(cmd, (uint32_t __user *)ptr))
4626 return -EFAULT;
4627 ptr += sizeof(uint32_t);
4628 if (put_user(cookie,
4629 (binder_uintptr_t __user *)ptr))
4630 return -EFAULT;
4631 ptr += sizeof(binder_uintptr_t);
4632 binder_stat_br(proc, thread, cmd);
4633 if (cmd == BR_DEAD_BINDER)
4634 goto done; /* DEAD_BINDER notifications can cause transactions */
4635 } break;
4636 default:
4637 binder_inner_proc_unlock(proc);
4638 pr_err("%d:%d: bad work type %d\n",
4639 proc->pid, thread->pid, w->type);
4640 break;
4641 }
4642
4643 if (!t)
4644 continue;
4645
4646 BUG_ON(t->buffer == NULL);
4647 if (t->buffer->target_node) {
4648 struct binder_node *target_node = t->buffer->target_node;
4649
4650 trd->target.ptr = target_node->ptr;
4651 trd->cookie = target_node->cookie;
4652 t->saved_priority = task_nice(current);
4653 if (t->priority < target_node->min_priority &&
4654 !(t->flags & TF_ONE_WAY))
4655 binder_set_nice(t->priority);
4656 else if (!(t->flags & TF_ONE_WAY) ||
4657 t->saved_priority > target_node->min_priority)
4658 binder_set_nice(target_node->min_priority);
4659 cmd = BR_TRANSACTION;
4660 } else {
4661 trd->target.ptr = 0;
4662 trd->cookie = 0;
4663 cmd = BR_REPLY;
4664 }
4665 trd->code = t->code;
4666 trd->flags = t->flags;
4667 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4668
4669 t_from = binder_get_txn_from(t);
4670 if (t_from) {
4671 struct task_struct *sender = t_from->proc->tsk;
4672
4673 trd->sender_pid =
4674 task_tgid_nr_ns(sender,
4675 task_active_pid_ns(current));
4676 } else {
4677 trd->sender_pid = 0;
4678 }
4679
4680 ret = binder_apply_fd_fixups(proc, t);
4681 if (ret) {
4682 struct binder_buffer *buffer = t->buffer;
4683 bool oneway = !!(t->flags & TF_ONE_WAY);
4684 int tid = t->debug_id;
4685
4686 if (t_from)
4687 binder_thread_dec_tmpref(t_from);
4688 buffer->transaction = NULL;
4689 binder_cleanup_transaction(t, "fd fixups failed",
4690 BR_FAILED_REPLY);
4691 binder_free_buf(proc, thread, buffer, true);
4692 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4693 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4694 proc->pid, thread->pid,
4695 oneway ? "async " :
4696 (cmd == BR_REPLY ? "reply " : ""),
4697 tid, BR_FAILED_REPLY, ret, __LINE__);
4698 if (cmd == BR_REPLY) {
4699 cmd = BR_FAILED_REPLY;
4700 if (put_user(cmd, (uint32_t __user *)ptr))
4701 return -EFAULT;
4702 ptr += sizeof(uint32_t);
4703 binder_stat_br(proc, thread, cmd);
4704 break;
4705 }
4706 continue;
4707 }
4708 trd->data_size = t->buffer->data_size;
4709 trd->offsets_size = t->buffer->offsets_size;
4710 trd->data.ptr.buffer = t->buffer->user_data;
4711 trd->data.ptr.offsets = trd->data.ptr.buffer +
4712 ALIGN(t->buffer->data_size,
4713 sizeof(void *));
4714
4715 tr.secctx = t->security_ctx;
4716 if (t->security_ctx) {
4717 cmd = BR_TRANSACTION_SEC_CTX;
4718 trsize = sizeof(tr);
4719 }
4720 if (put_user(cmd, (uint32_t __user *)ptr)) {
4721 if (t_from)
4722 binder_thread_dec_tmpref(t_from);
4723
4724 binder_cleanup_transaction(t, "put_user failed",
4725 BR_FAILED_REPLY);
4726
4727 return -EFAULT;
4728 }
4729 ptr += sizeof(uint32_t);
4730 if (copy_to_user(ptr, &tr, trsize)) {
4731 if (t_from)
4732 binder_thread_dec_tmpref(t_from);
4733
4734 binder_cleanup_transaction(t, "copy_to_user failed",
4735 BR_FAILED_REPLY);
4736
4737 return -EFAULT;
4738 }
4739 ptr += trsize;
4740
4741 trace_binder_transaction_received(t);
4742 binder_stat_br(proc, thread, cmd);
4743 binder_debug(BINDER_DEBUG_TRANSACTION,
4744 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4745 proc->pid, thread->pid,
4746 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4747 (cmd == BR_TRANSACTION_SEC_CTX) ?
4748 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4749 t->debug_id, t_from ? t_from->proc->pid : 0,
4750 t_from ? t_from->pid : 0, cmd,
4751 t->buffer->data_size, t->buffer->offsets_size,
4752 (u64)trd->data.ptr.buffer,
4753 (u64)trd->data.ptr.offsets);
4754
4755 if (t_from)
4756 binder_thread_dec_tmpref(t_from);
4757 t->buffer->allow_user_free = 1;
4758 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4759 binder_inner_proc_lock(thread->proc);
4760 t->to_parent = thread->transaction_stack;
4761 t->to_thread = thread;
4762 thread->transaction_stack = t;
4763 binder_inner_proc_unlock(thread->proc);
4764 } else {
4765 binder_free_transaction(t);
4766 }
4767 break;
4768 }
4769
4770done:
4771
4772 *consumed = ptr - buffer;
4773 binder_inner_proc_lock(proc);
4774 if (proc->requested_threads == 0 &&
4775 list_empty(&thread->proc->waiting_threads) &&
4776 proc->requested_threads_started < proc->max_threads &&
4777 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4778 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4779 /*spawn a new thread if we leave this out */) {
4780 proc->requested_threads++;
4781 binder_inner_proc_unlock(proc);
4782 binder_debug(BINDER_DEBUG_THREADS,
4783 "%d:%d BR_SPAWN_LOOPER\n",
4784 proc->pid, thread->pid);
4785 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4786 return -EFAULT;
4787 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4788 } else
4789 binder_inner_proc_unlock(proc);
4790 return 0;
4791}
4792
4793static void binder_release_work(struct binder_proc *proc,
4794 struct list_head *list)
4795{
4796 struct binder_work *w;
4797 enum binder_work_type wtype;
4798
4799 while (1) {
4800 binder_inner_proc_lock(proc);
4801 w = binder_dequeue_work_head_ilocked(list);
4802 wtype = w ? w->type : 0;
4803 binder_inner_proc_unlock(proc);
4804 if (!w)
4805 return;
4806
4807 switch (wtype) {
4808 case BINDER_WORK_TRANSACTION: {
4809 struct binder_transaction *t;
4810
4811 t = container_of(w, struct binder_transaction, work);
4812
4813 binder_cleanup_transaction(t, "process died.",
4814 BR_DEAD_REPLY);
4815 } break;
4816 case BINDER_WORK_RETURN_ERROR: {
4817 struct binder_error *e = container_of(
4818 w, struct binder_error, work);
4819
4820 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4821 "undelivered TRANSACTION_ERROR: %u\n",
4822 e->cmd);
4823 } break;
4824 case BINDER_WORK_TRANSACTION_PENDING:
4825 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4826 case BINDER_WORK_TRANSACTION_COMPLETE: {
4827 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4828 "undelivered TRANSACTION_COMPLETE\n");
4829 kfree(w);
4830 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4831 } break;
4832 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4833 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4834 struct binder_ref_death *death;
4835
4836 death = container_of(w, struct binder_ref_death, work);
4837 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4838 "undelivered death notification, %016llx\n",
4839 (u64)death->cookie);
4840 kfree(death);
4841 binder_stats_deleted(BINDER_STAT_DEATH);
4842 } break;
4843 case BINDER_WORK_NODE:
4844 break;
4845 default:
4846 pr_err("unexpected work type, %d, not freed\n",
4847 wtype);
4848 break;
4849 }
4850 }
4851
4852}
4853
4854static struct binder_thread *binder_get_thread_ilocked(
4855 struct binder_proc *proc, struct binder_thread *new_thread)
4856{
4857 struct binder_thread *thread = NULL;
4858 struct rb_node *parent = NULL;
4859 struct rb_node **p = &proc->threads.rb_node;
4860
4861 while (*p) {
4862 parent = *p;
4863 thread = rb_entry(parent, struct binder_thread, rb_node);
4864
4865 if (current->pid < thread->pid)
4866 p = &(*p)->rb_left;
4867 else if (current->pid > thread->pid)
4868 p = &(*p)->rb_right;
4869 else
4870 return thread;
4871 }
4872 if (!new_thread)
4873 return NULL;
4874 thread = new_thread;
4875 binder_stats_created(BINDER_STAT_THREAD);
4876 thread->proc = proc;
4877 thread->pid = current->pid;
4878 atomic_set(&thread->tmp_ref, 0);
4879 init_waitqueue_head(&thread->wait);
4880 INIT_LIST_HEAD(&thread->todo);
4881 rb_link_node(&thread->rb_node, parent, p);
4882 rb_insert_color(&thread->rb_node, &proc->threads);
4883 thread->looper_need_return = true;
4884 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4885 thread->return_error.cmd = BR_OK;
4886 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4887 thread->reply_error.cmd = BR_OK;
4888 thread->ee.command = BR_OK;
4889 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4890 return thread;
4891}
4892
4893static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4894{
4895 struct binder_thread *thread;
4896 struct binder_thread *new_thread;
4897
4898 binder_inner_proc_lock(proc);
4899 thread = binder_get_thread_ilocked(proc, NULL);
4900 binder_inner_proc_unlock(proc);
4901 if (!thread) {
4902 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4903 if (new_thread == NULL)
4904 return NULL;
4905 binder_inner_proc_lock(proc);
4906 thread = binder_get_thread_ilocked(proc, new_thread);
4907 binder_inner_proc_unlock(proc);
4908 if (thread != new_thread)
4909 kfree(new_thread);
4910 }
4911 return thread;
4912}
4913
4914static void binder_free_proc(struct binder_proc *proc)
4915{
4916 struct binder_device *device;
4917
4918 BUG_ON(!list_empty(&proc->todo));
4919 BUG_ON(!list_empty(&proc->delivered_death));
4920 if (proc->outstanding_txns)
4921 pr_warn("%s: Unexpected outstanding_txns %d\n",
4922 __func__, proc->outstanding_txns);
4923 device = container_of(proc->context, struct binder_device, context);
4924 if (refcount_dec_and_test(&device->ref)) {
4925 kfree(proc->context->name);
4926 kfree(device);
4927 }
4928 binder_alloc_deferred_release(&proc->alloc);
4929 put_task_struct(proc->tsk);
4930 put_cred(proc->cred);
4931 binder_stats_deleted(BINDER_STAT_PROC);
4932 kfree(proc);
4933}
4934
4935static void binder_free_thread(struct binder_thread *thread)
4936{
4937 BUG_ON(!list_empty(&thread->todo));
4938 binder_stats_deleted(BINDER_STAT_THREAD);
4939 binder_proc_dec_tmpref(thread->proc);
4940 kfree(thread);
4941}
4942
4943static int binder_thread_release(struct binder_proc *proc,
4944 struct binder_thread *thread)
4945{
4946 struct binder_transaction *t;
4947 struct binder_transaction *send_reply = NULL;
4948 int active_transactions = 0;
4949 struct binder_transaction *last_t = NULL;
4950
4951 binder_inner_proc_lock(thread->proc);
4952 /*
4953 * take a ref on the proc so it survives
4954 * after we remove this thread from proc->threads.
4955 * The corresponding dec is when we actually
4956 * free the thread in binder_free_thread()
4957 */
4958 proc->tmp_ref++;
4959 /*
4960 * take a ref on this thread to ensure it
4961 * survives while we are releasing it
4962 */
4963 atomic_inc(&thread->tmp_ref);
4964 rb_erase(&thread->rb_node, &proc->threads);
4965 t = thread->transaction_stack;
4966 if (t) {
4967 spin_lock(&t->lock);
4968 if (t->to_thread == thread)
4969 send_reply = t;
4970 } else {
4971 __acquire(&t->lock);
4972 }
4973 thread->is_dead = true;
4974
4975 while (t) {
4976 last_t = t;
4977 active_transactions++;
4978 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4979 "release %d:%d transaction %d %s, still active\n",
4980 proc->pid, thread->pid,
4981 t->debug_id,
4982 (t->to_thread == thread) ? "in" : "out");
4983
4984 if (t->to_thread == thread) {
4985 thread->proc->outstanding_txns--;
4986 t->to_proc = NULL;
4987 t->to_thread = NULL;
4988 if (t->buffer) {
4989 t->buffer->transaction = NULL;
4990 t->buffer = NULL;
4991 }
4992 t = t->to_parent;
4993 } else if (t->from == thread) {
4994 t->from = NULL;
4995 t = t->from_parent;
4996 } else
4997 BUG();
4998 spin_unlock(&last_t->lock);
4999 if (t)
5000 spin_lock(&t->lock);
5001 else
5002 __acquire(&t->lock);
5003 }
5004 /* annotation for sparse, lock not acquired in last iteration above */
5005 __release(&t->lock);
5006
5007 /*
5008 * If this thread used poll, make sure we remove the waitqueue from any
5009 * poll data structures holding it.
5010 */
5011 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5012 wake_up_pollfree(&thread->wait);
5013
5014 binder_inner_proc_unlock(thread->proc);
5015
5016 /*
5017 * This is needed to avoid races between wake_up_pollfree() above and
5018 * someone else removing the last entry from the queue for other reasons
5019 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5020 * descriptor being closed). Such other users hold an RCU read lock, so
5021 * we can be sure they're done after we call synchronize_rcu().
5022 */
5023 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5024 synchronize_rcu();
5025
5026 if (send_reply)
5027 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5028 binder_release_work(proc, &thread->todo);
5029 binder_thread_dec_tmpref(thread);
5030 return active_transactions;
5031}
5032
5033static __poll_t binder_poll(struct file *filp,
5034 struct poll_table_struct *wait)
5035{
5036 struct binder_proc *proc = filp->private_data;
5037 struct binder_thread *thread = NULL;
5038 bool wait_for_proc_work;
5039
5040 thread = binder_get_thread(proc);
5041 if (!thread)
5042 return EPOLLERR;
5043
5044 binder_inner_proc_lock(thread->proc);
5045 thread->looper |= BINDER_LOOPER_STATE_POLL;
5046 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5047
5048 binder_inner_proc_unlock(thread->proc);
5049
5050 poll_wait(filp, &thread->wait, wait);
5051
5052 if (binder_has_work(thread, wait_for_proc_work))
5053 return EPOLLIN;
5054
5055 return 0;
5056}
5057
5058static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5059 struct binder_thread *thread)
5060{
5061 int ret = 0;
5062 struct binder_proc *proc = filp->private_data;
5063 void __user *ubuf = (void __user *)arg;
5064 struct binder_write_read bwr;
5065
5066 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5067 ret = -EFAULT;
5068 goto out;
5069 }
5070 binder_debug(BINDER_DEBUG_READ_WRITE,
5071 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5072 proc->pid, thread->pid,
5073 (u64)bwr.write_size, (u64)bwr.write_buffer,
5074 (u64)bwr.read_size, (u64)bwr.read_buffer);
5075
5076 if (bwr.write_size > 0) {
5077 ret = binder_thread_write(proc, thread,
5078 bwr.write_buffer,
5079 bwr.write_size,
5080 &bwr.write_consumed);
5081 trace_binder_write_done(ret);
5082 if (ret < 0) {
5083 bwr.read_consumed = 0;
5084 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5085 ret = -EFAULT;
5086 goto out;
5087 }
5088 }
5089 if (bwr.read_size > 0) {
5090 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5091 bwr.read_size,
5092 &bwr.read_consumed,
5093 filp->f_flags & O_NONBLOCK);
5094 trace_binder_read_done(ret);
5095 binder_inner_proc_lock(proc);
5096 if (!binder_worklist_empty_ilocked(&proc->todo))
5097 binder_wakeup_proc_ilocked(proc);
5098 binder_inner_proc_unlock(proc);
5099 if (ret < 0) {
5100 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5101 ret = -EFAULT;
5102 goto out;
5103 }
5104 }
5105 binder_debug(BINDER_DEBUG_READ_WRITE,
5106 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5107 proc->pid, thread->pid,
5108 (u64)bwr.write_consumed, (u64)bwr.write_size,
5109 (u64)bwr.read_consumed, (u64)bwr.read_size);
5110 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5111 ret = -EFAULT;
5112 goto out;
5113 }
5114out:
5115 return ret;
5116}
5117
5118static int binder_ioctl_set_ctx_mgr(struct file *filp,
5119 struct flat_binder_object *fbo)
5120{
5121 int ret = 0;
5122 struct binder_proc *proc = filp->private_data;
5123 struct binder_context *context = proc->context;
5124 struct binder_node *new_node;
5125 kuid_t curr_euid = current_euid();
5126
5127 mutex_lock(&context->context_mgr_node_lock);
5128 if (context->binder_context_mgr_node) {
5129 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5130 ret = -EBUSY;
5131 goto out;
5132 }
5133 ret = security_binder_set_context_mgr(proc->cred);
5134 if (ret < 0)
5135 goto out;
5136 if (uid_valid(context->binder_context_mgr_uid)) {
5137 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5138 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5139 from_kuid(&init_user_ns, curr_euid),
5140 from_kuid(&init_user_ns,
5141 context->binder_context_mgr_uid));
5142 ret = -EPERM;
5143 goto out;
5144 }
5145 } else {
5146 context->binder_context_mgr_uid = curr_euid;
5147 }
5148 new_node = binder_new_node(proc, fbo);
5149 if (!new_node) {
5150 ret = -ENOMEM;
5151 goto out;
5152 }
5153 binder_node_lock(new_node);
5154 new_node->local_weak_refs++;
5155 new_node->local_strong_refs++;
5156 new_node->has_strong_ref = 1;
5157 new_node->has_weak_ref = 1;
5158 context->binder_context_mgr_node = new_node;
5159 binder_node_unlock(new_node);
5160 binder_put_node(new_node);
5161out:
5162 mutex_unlock(&context->context_mgr_node_lock);
5163 return ret;
5164}
5165
5166static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5167 struct binder_node_info_for_ref *info)
5168{
5169 struct binder_node *node;
5170 struct binder_context *context = proc->context;
5171 __u32 handle = info->handle;
5172
5173 if (info->strong_count || info->weak_count || info->reserved1 ||
5174 info->reserved2 || info->reserved3) {
5175 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5176 proc->pid);
5177 return -EINVAL;
5178 }
5179
5180 /* This ioctl may only be used by the context manager */
5181 mutex_lock(&context->context_mgr_node_lock);
5182 if (!context->binder_context_mgr_node ||
5183 context->binder_context_mgr_node->proc != proc) {
5184 mutex_unlock(&context->context_mgr_node_lock);
5185 return -EPERM;
5186 }
5187 mutex_unlock(&context->context_mgr_node_lock);
5188
5189 node = binder_get_node_from_ref(proc, handle, true, NULL);
5190 if (!node)
5191 return -EINVAL;
5192
5193 info->strong_count = node->local_strong_refs +
5194 node->internal_strong_refs;
5195 info->weak_count = node->local_weak_refs;
5196
5197 binder_put_node(node);
5198
5199 return 0;
5200}
5201
5202static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5203 struct binder_node_debug_info *info)
5204{
5205 struct rb_node *n;
5206 binder_uintptr_t ptr = info->ptr;
5207
5208 memset(info, 0, sizeof(*info));
5209
5210 binder_inner_proc_lock(proc);
5211 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5212 struct binder_node *node = rb_entry(n, struct binder_node,
5213 rb_node);
5214 if (node->ptr > ptr) {
5215 info->ptr = node->ptr;
5216 info->cookie = node->cookie;
5217 info->has_strong_ref = node->has_strong_ref;
5218 info->has_weak_ref = node->has_weak_ref;
5219 break;
5220 }
5221 }
5222 binder_inner_proc_unlock(proc);
5223
5224 return 0;
5225}
5226
5227static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5228{
5229 struct rb_node *n;
5230 struct binder_thread *thread;
5231
5232 if (proc->outstanding_txns > 0)
5233 return true;
5234
5235 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5236 thread = rb_entry(n, struct binder_thread, rb_node);
5237 if (thread->transaction_stack)
5238 return true;
5239 }
5240 return false;
5241}
5242
5243static int binder_ioctl_freeze(struct binder_freeze_info *info,
5244 struct binder_proc *target_proc)
5245{
5246 int ret = 0;
5247
5248 if (!info->enable) {
5249 binder_inner_proc_lock(target_proc);
5250 target_proc->sync_recv = false;
5251 target_proc->async_recv = false;
5252 target_proc->is_frozen = false;
5253 binder_inner_proc_unlock(target_proc);
5254 return 0;
5255 }
5256
5257 /*
5258 * Freezing the target. Prevent new transactions by
5259 * setting frozen state. If timeout specified, wait
5260 * for transactions to drain.
5261 */
5262 binder_inner_proc_lock(target_proc);
5263 target_proc->sync_recv = false;
5264 target_proc->async_recv = false;
5265 target_proc->is_frozen = true;
5266 binder_inner_proc_unlock(target_proc);
5267
5268 if (info->timeout_ms > 0)
5269 ret = wait_event_interruptible_timeout(
5270 target_proc->freeze_wait,
5271 (!target_proc->outstanding_txns),
5272 msecs_to_jiffies(info->timeout_ms));
5273
5274 /* Check pending transactions that wait for reply */
5275 if (ret >= 0) {
5276 binder_inner_proc_lock(target_proc);
5277 if (binder_txns_pending_ilocked(target_proc))
5278 ret = -EAGAIN;
5279 binder_inner_proc_unlock(target_proc);
5280 }
5281
5282 if (ret < 0) {
5283 binder_inner_proc_lock(target_proc);
5284 target_proc->is_frozen = false;
5285 binder_inner_proc_unlock(target_proc);
5286 }
5287
5288 return ret;
5289}
5290
5291static int binder_ioctl_get_freezer_info(
5292 struct binder_frozen_status_info *info)
5293{
5294 struct binder_proc *target_proc;
5295 bool found = false;
5296 __u32 txns_pending;
5297
5298 info->sync_recv = 0;
5299 info->async_recv = 0;
5300
5301 mutex_lock(&binder_procs_lock);
5302 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5303 if (target_proc->pid == info->pid) {
5304 found = true;
5305 binder_inner_proc_lock(target_proc);
5306 txns_pending = binder_txns_pending_ilocked(target_proc);
5307 info->sync_recv |= target_proc->sync_recv |
5308 (txns_pending << 1);
5309 info->async_recv |= target_proc->async_recv;
5310 binder_inner_proc_unlock(target_proc);
5311 }
5312 }
5313 mutex_unlock(&binder_procs_lock);
5314
5315 if (!found)
5316 return -EINVAL;
5317
5318 return 0;
5319}
5320
5321static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5322 void __user *ubuf)
5323{
5324 struct binder_extended_error ee;
5325
5326 binder_inner_proc_lock(thread->proc);
5327 ee = thread->ee;
5328 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5329 binder_inner_proc_unlock(thread->proc);
5330
5331 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5332 return -EFAULT;
5333
5334 return 0;
5335}
5336
5337static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5338{
5339 int ret;
5340 struct binder_proc *proc = filp->private_data;
5341 struct binder_thread *thread;
5342 void __user *ubuf = (void __user *)arg;
5343
5344 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5345 proc->pid, current->pid, cmd, arg);*/
5346
5347 binder_selftest_alloc(&proc->alloc);
5348
5349 trace_binder_ioctl(cmd, arg);
5350
5351 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5352 if (ret)
5353 goto err_unlocked;
5354
5355 thread = binder_get_thread(proc);
5356 if (thread == NULL) {
5357 ret = -ENOMEM;
5358 goto err;
5359 }
5360
5361 switch (cmd) {
5362 case BINDER_WRITE_READ:
5363 ret = binder_ioctl_write_read(filp, arg, thread);
5364 if (ret)
5365 goto err;
5366 break;
5367 case BINDER_SET_MAX_THREADS: {
5368 int max_threads;
5369
5370 if (copy_from_user(&max_threads, ubuf,
5371 sizeof(max_threads))) {
5372 ret = -EINVAL;
5373 goto err;
5374 }
5375 binder_inner_proc_lock(proc);
5376 proc->max_threads = max_threads;
5377 binder_inner_proc_unlock(proc);
5378 break;
5379 }
5380 case BINDER_SET_CONTEXT_MGR_EXT: {
5381 struct flat_binder_object fbo;
5382
5383 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5384 ret = -EINVAL;
5385 goto err;
5386 }
5387 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5388 if (ret)
5389 goto err;
5390 break;
5391 }
5392 case BINDER_SET_CONTEXT_MGR:
5393 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5394 if (ret)
5395 goto err;
5396 break;
5397 case BINDER_THREAD_EXIT:
5398 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5399 proc->pid, thread->pid);
5400 binder_thread_release(proc, thread);
5401 thread = NULL;
5402 break;
5403 case BINDER_VERSION: {
5404 struct binder_version __user *ver = ubuf;
5405
5406 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5407 &ver->protocol_version)) {
5408 ret = -EINVAL;
5409 goto err;
5410 }
5411 break;
5412 }
5413 case BINDER_GET_NODE_INFO_FOR_REF: {
5414 struct binder_node_info_for_ref info;
5415
5416 if (copy_from_user(&info, ubuf, sizeof(info))) {
5417 ret = -EFAULT;
5418 goto err;
5419 }
5420
5421 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5422 if (ret < 0)
5423 goto err;
5424
5425 if (copy_to_user(ubuf, &info, sizeof(info))) {
5426 ret = -EFAULT;
5427 goto err;
5428 }
5429
5430 break;
5431 }
5432 case BINDER_GET_NODE_DEBUG_INFO: {
5433 struct binder_node_debug_info info;
5434
5435 if (copy_from_user(&info, ubuf, sizeof(info))) {
5436 ret = -EFAULT;
5437 goto err;
5438 }
5439
5440 ret = binder_ioctl_get_node_debug_info(proc, &info);
5441 if (ret < 0)
5442 goto err;
5443
5444 if (copy_to_user(ubuf, &info, sizeof(info))) {
5445 ret = -EFAULT;
5446 goto err;
5447 }
5448 break;
5449 }
5450 case BINDER_FREEZE: {
5451 struct binder_freeze_info info;
5452 struct binder_proc **target_procs = NULL, *target_proc;
5453 int target_procs_count = 0, i = 0;
5454
5455 ret = 0;
5456
5457 if (copy_from_user(&info, ubuf, sizeof(info))) {
5458 ret = -EFAULT;
5459 goto err;
5460 }
5461
5462 mutex_lock(&binder_procs_lock);
5463 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5464 if (target_proc->pid == info.pid)
5465 target_procs_count++;
5466 }
5467
5468 if (target_procs_count == 0) {
5469 mutex_unlock(&binder_procs_lock);
5470 ret = -EINVAL;
5471 goto err;
5472 }
5473
5474 target_procs = kcalloc(target_procs_count,
5475 sizeof(struct binder_proc *),
5476 GFP_KERNEL);
5477
5478 if (!target_procs) {
5479 mutex_unlock(&binder_procs_lock);
5480 ret = -ENOMEM;
5481 goto err;
5482 }
5483
5484 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5485 if (target_proc->pid != info.pid)
5486 continue;
5487
5488 binder_inner_proc_lock(target_proc);
5489 target_proc->tmp_ref++;
5490 binder_inner_proc_unlock(target_proc);
5491
5492 target_procs[i++] = target_proc;
5493 }
5494 mutex_unlock(&binder_procs_lock);
5495
5496 for (i = 0; i < target_procs_count; i++) {
5497 if (ret >= 0)
5498 ret = binder_ioctl_freeze(&info,
5499 target_procs[i]);
5500
5501 binder_proc_dec_tmpref(target_procs[i]);
5502 }
5503
5504 kfree(target_procs);
5505
5506 if (ret < 0)
5507 goto err;
5508 break;
5509 }
5510 case BINDER_GET_FROZEN_INFO: {
5511 struct binder_frozen_status_info info;
5512
5513 if (copy_from_user(&info, ubuf, sizeof(info))) {
5514 ret = -EFAULT;
5515 goto err;
5516 }
5517
5518 ret = binder_ioctl_get_freezer_info(&info);
5519 if (ret < 0)
5520 goto err;
5521
5522 if (copy_to_user(ubuf, &info, sizeof(info))) {
5523 ret = -EFAULT;
5524 goto err;
5525 }
5526 break;
5527 }
5528 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5529 uint32_t enable;
5530
5531 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5532 ret = -EFAULT;
5533 goto err;
5534 }
5535 binder_inner_proc_lock(proc);
5536 proc->oneway_spam_detection_enabled = (bool)enable;
5537 binder_inner_proc_unlock(proc);
5538 break;
5539 }
5540 case BINDER_GET_EXTENDED_ERROR:
5541 ret = binder_ioctl_get_extended_error(thread, ubuf);
5542 if (ret < 0)
5543 goto err;
5544 break;
5545 default:
5546 ret = -EINVAL;
5547 goto err;
5548 }
5549 ret = 0;
5550err:
5551 if (thread)
5552 thread->looper_need_return = false;
5553 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5554 if (ret && ret != -EINTR)
5555 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5556err_unlocked:
5557 trace_binder_ioctl_done(ret);
5558 return ret;
5559}
5560
5561static void binder_vma_open(struct vm_area_struct *vma)
5562{
5563 struct binder_proc *proc = vma->vm_private_data;
5564
5565 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5566 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5567 proc->pid, vma->vm_start, vma->vm_end,
5568 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5569 (unsigned long)pgprot_val(vma->vm_page_prot));
5570}
5571
5572static void binder_vma_close(struct vm_area_struct *vma)
5573{
5574 struct binder_proc *proc = vma->vm_private_data;
5575
5576 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5577 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5578 proc->pid, vma->vm_start, vma->vm_end,
5579 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5580 (unsigned long)pgprot_val(vma->vm_page_prot));
5581 binder_alloc_vma_close(&proc->alloc);
5582}
5583
5584static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5585{
5586 return VM_FAULT_SIGBUS;
5587}
5588
5589static const struct vm_operations_struct binder_vm_ops = {
5590 .open = binder_vma_open,
5591 .close = binder_vma_close,
5592 .fault = binder_vm_fault,
5593};
5594
5595static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5596{
5597 struct binder_proc *proc = filp->private_data;
5598
5599 if (proc->tsk != current->group_leader)
5600 return -EINVAL;
5601
5602 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5603 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5604 __func__, proc->pid, vma->vm_start, vma->vm_end,
5605 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5606 (unsigned long)pgprot_val(vma->vm_page_prot));
5607
5608 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5609 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5610 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5611 return -EPERM;
5612 }
5613 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5614
5615 vma->vm_ops = &binder_vm_ops;
5616 vma->vm_private_data = proc;
5617
5618 return binder_alloc_mmap_handler(&proc->alloc, vma);
5619}
5620
5621static int binder_open(struct inode *nodp, struct file *filp)
5622{
5623 struct binder_proc *proc, *itr;
5624 struct binder_device *binder_dev;
5625 struct binderfs_info *info;
5626 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5627 bool existing_pid = false;
5628
5629 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5630 current->group_leader->pid, current->pid);
5631
5632 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5633 if (proc == NULL)
5634 return -ENOMEM;
5635 spin_lock_init(&proc->inner_lock);
5636 spin_lock_init(&proc->outer_lock);
5637 get_task_struct(current->group_leader);
5638 proc->tsk = current->group_leader;
5639 proc->cred = get_cred(filp->f_cred);
5640 INIT_LIST_HEAD(&proc->todo);
5641 init_waitqueue_head(&proc->freeze_wait);
5642 proc->default_priority = task_nice(current);
5643 /* binderfs stashes devices in i_private */
5644 if (is_binderfs_device(nodp)) {
5645 binder_dev = nodp->i_private;
5646 info = nodp->i_sb->s_fs_info;
5647 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5648 } else {
5649 binder_dev = container_of(filp->private_data,
5650 struct binder_device, miscdev);
5651 }
5652 refcount_inc(&binder_dev->ref);
5653 proc->context = &binder_dev->context;
5654 binder_alloc_init(&proc->alloc);
5655
5656 binder_stats_created(BINDER_STAT_PROC);
5657 proc->pid = current->group_leader->pid;
5658 INIT_LIST_HEAD(&proc->delivered_death);
5659 INIT_LIST_HEAD(&proc->waiting_threads);
5660 filp->private_data = proc;
5661
5662 mutex_lock(&binder_procs_lock);
5663 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5664 if (itr->pid == proc->pid) {
5665 existing_pid = true;
5666 break;
5667 }
5668 }
5669 hlist_add_head(&proc->proc_node, &binder_procs);
5670 mutex_unlock(&binder_procs_lock);
5671
5672 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5673 char strbuf[11];
5674
5675 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5676 /*
5677 * proc debug entries are shared between contexts.
5678 * Only create for the first PID to avoid debugfs log spamming
5679 * The printing code will anyway print all contexts for a given
5680 * PID so this is not a problem.
5681 */
5682 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5683 binder_debugfs_dir_entry_proc,
5684 (void *)(unsigned long)proc->pid,
5685 &proc_fops);
5686 }
5687
5688 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5689 char strbuf[11];
5690 struct dentry *binderfs_entry;
5691
5692 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5693 /*
5694 * Similar to debugfs, the process specific log file is shared
5695 * between contexts. Only create for the first PID.
5696 * This is ok since same as debugfs, the log file will contain
5697 * information on all contexts of a given PID.
5698 */
5699 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5700 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5701 if (!IS_ERR(binderfs_entry)) {
5702 proc->binderfs_entry = binderfs_entry;
5703 } else {
5704 int error;
5705
5706 error = PTR_ERR(binderfs_entry);
5707 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5708 strbuf, error);
5709 }
5710 }
5711
5712 return 0;
5713}
5714
5715static int binder_flush(struct file *filp, fl_owner_t id)
5716{
5717 struct binder_proc *proc = filp->private_data;
5718
5719 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5720
5721 return 0;
5722}
5723
5724static void binder_deferred_flush(struct binder_proc *proc)
5725{
5726 struct rb_node *n;
5727 int wake_count = 0;
5728
5729 binder_inner_proc_lock(proc);
5730 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5731 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5732
5733 thread->looper_need_return = true;
5734 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5735 wake_up_interruptible(&thread->wait);
5736 wake_count++;
5737 }
5738 }
5739 binder_inner_proc_unlock(proc);
5740
5741 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5742 "binder_flush: %d woke %d threads\n", proc->pid,
5743 wake_count);
5744}
5745
5746static int binder_release(struct inode *nodp, struct file *filp)
5747{
5748 struct binder_proc *proc = filp->private_data;
5749
5750 debugfs_remove(proc->debugfs_entry);
5751
5752 if (proc->binderfs_entry) {
5753 binderfs_remove_file(proc->binderfs_entry);
5754 proc->binderfs_entry = NULL;
5755 }
5756
5757 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5758
5759 return 0;
5760}
5761
5762static int binder_node_release(struct binder_node *node, int refs)
5763{
5764 struct binder_ref *ref;
5765 int death = 0;
5766 struct binder_proc *proc = node->proc;
5767
5768 binder_release_work(proc, &node->async_todo);
5769
5770 binder_node_lock(node);
5771 binder_inner_proc_lock(proc);
5772 binder_dequeue_work_ilocked(&node->work);
5773 /*
5774 * The caller must have taken a temporary ref on the node,
5775 */
5776 BUG_ON(!node->tmp_refs);
5777 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5778 binder_inner_proc_unlock(proc);
5779 binder_node_unlock(node);
5780 binder_free_node(node);
5781
5782 return refs;
5783 }
5784
5785 node->proc = NULL;
5786 node->local_strong_refs = 0;
5787 node->local_weak_refs = 0;
5788 binder_inner_proc_unlock(proc);
5789
5790 spin_lock(&binder_dead_nodes_lock);
5791 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5792 spin_unlock(&binder_dead_nodes_lock);
5793
5794 hlist_for_each_entry(ref, &node->refs, node_entry) {
5795 refs++;
5796 /*
5797 * Need the node lock to synchronize
5798 * with new notification requests and the
5799 * inner lock to synchronize with queued
5800 * death notifications.
5801 */
5802 binder_inner_proc_lock(ref->proc);
5803 if (!ref->death) {
5804 binder_inner_proc_unlock(ref->proc);
5805 continue;
5806 }
5807
5808 death++;
5809
5810 BUG_ON(!list_empty(&ref->death->work.entry));
5811 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5812 binder_enqueue_work_ilocked(&ref->death->work,
5813 &ref->proc->todo);
5814 binder_wakeup_proc_ilocked(ref->proc);
5815 binder_inner_proc_unlock(ref->proc);
5816 }
5817
5818 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5819 "node %d now dead, refs %d, death %d\n",
5820 node->debug_id, refs, death);
5821 binder_node_unlock(node);
5822 binder_put_node(node);
5823
5824 return refs;
5825}
5826
5827static void binder_deferred_release(struct binder_proc *proc)
5828{
5829 struct binder_context *context = proc->context;
5830 struct rb_node *n;
5831 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5832
5833 mutex_lock(&binder_procs_lock);
5834 hlist_del(&proc->proc_node);
5835 mutex_unlock(&binder_procs_lock);
5836
5837 mutex_lock(&context->context_mgr_node_lock);
5838 if (context->binder_context_mgr_node &&
5839 context->binder_context_mgr_node->proc == proc) {
5840 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5841 "%s: %d context_mgr_node gone\n",
5842 __func__, proc->pid);
5843 context->binder_context_mgr_node = NULL;
5844 }
5845 mutex_unlock(&context->context_mgr_node_lock);
5846 binder_inner_proc_lock(proc);
5847 /*
5848 * Make sure proc stays alive after we
5849 * remove all the threads
5850 */
5851 proc->tmp_ref++;
5852
5853 proc->is_dead = true;
5854 proc->is_frozen = false;
5855 proc->sync_recv = false;
5856 proc->async_recv = false;
5857 threads = 0;
5858 active_transactions = 0;
5859 while ((n = rb_first(&proc->threads))) {
5860 struct binder_thread *thread;
5861
5862 thread = rb_entry(n, struct binder_thread, rb_node);
5863 binder_inner_proc_unlock(proc);
5864 threads++;
5865 active_transactions += binder_thread_release(proc, thread);
5866 binder_inner_proc_lock(proc);
5867 }
5868
5869 nodes = 0;
5870 incoming_refs = 0;
5871 while ((n = rb_first(&proc->nodes))) {
5872 struct binder_node *node;
5873
5874 node = rb_entry(n, struct binder_node, rb_node);
5875 nodes++;
5876 /*
5877 * take a temporary ref on the node before
5878 * calling binder_node_release() which will either
5879 * kfree() the node or call binder_put_node()
5880 */
5881 binder_inc_node_tmpref_ilocked(node);
5882 rb_erase(&node->rb_node, &proc->nodes);
5883 binder_inner_proc_unlock(proc);
5884 incoming_refs = binder_node_release(node, incoming_refs);
5885 binder_inner_proc_lock(proc);
5886 }
5887 binder_inner_proc_unlock(proc);
5888
5889 outgoing_refs = 0;
5890 binder_proc_lock(proc);
5891 while ((n = rb_first(&proc->refs_by_desc))) {
5892 struct binder_ref *ref;
5893
5894 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5895 outgoing_refs++;
5896 binder_cleanup_ref_olocked(ref);
5897 binder_proc_unlock(proc);
5898 binder_free_ref(ref);
5899 binder_proc_lock(proc);
5900 }
5901 binder_proc_unlock(proc);
5902
5903 binder_release_work(proc, &proc->todo);
5904 binder_release_work(proc, &proc->delivered_death);
5905
5906 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5907 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5908 __func__, proc->pid, threads, nodes, incoming_refs,
5909 outgoing_refs, active_transactions);
5910
5911 binder_proc_dec_tmpref(proc);
5912}
5913
5914static void binder_deferred_func(struct work_struct *work)
5915{
5916 struct binder_proc *proc;
5917
5918 int defer;
5919
5920 do {
5921 mutex_lock(&binder_deferred_lock);
5922 if (!hlist_empty(&binder_deferred_list)) {
5923 proc = hlist_entry(binder_deferred_list.first,
5924 struct binder_proc, deferred_work_node);
5925 hlist_del_init(&proc->deferred_work_node);
5926 defer = proc->deferred_work;
5927 proc->deferred_work = 0;
5928 } else {
5929 proc = NULL;
5930 defer = 0;
5931 }
5932 mutex_unlock(&binder_deferred_lock);
5933
5934 if (defer & BINDER_DEFERRED_FLUSH)
5935 binder_deferred_flush(proc);
5936
5937 if (defer & BINDER_DEFERRED_RELEASE)
5938 binder_deferred_release(proc); /* frees proc */
5939 } while (proc);
5940}
5941static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5942
5943static void
5944binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5945{
5946 mutex_lock(&binder_deferred_lock);
5947 proc->deferred_work |= defer;
5948 if (hlist_unhashed(&proc->deferred_work_node)) {
5949 hlist_add_head(&proc->deferred_work_node,
5950 &binder_deferred_list);
5951 schedule_work(&binder_deferred_work);
5952 }
5953 mutex_unlock(&binder_deferred_lock);
5954}
5955
5956static void print_binder_transaction_ilocked(struct seq_file *m,
5957 struct binder_proc *proc,
5958 const char *prefix,
5959 struct binder_transaction *t)
5960{
5961 struct binder_proc *to_proc;
5962 struct binder_buffer *buffer = t->buffer;
5963 ktime_t current_time = ktime_get();
5964
5965 spin_lock(&t->lock);
5966 to_proc = t->to_proc;
5967 seq_printf(m,
5968 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
5969 prefix, t->debug_id, t,
5970 t->from_pid,
5971 t->from_tid,
5972 to_proc ? to_proc->pid : 0,
5973 t->to_thread ? t->to_thread->pid : 0,
5974 t->code, t->flags, t->priority, t->need_reply,
5975 ktime_ms_delta(current_time, t->start_time));
5976 spin_unlock(&t->lock);
5977
5978 if (proc != to_proc) {
5979 /*
5980 * Can only safely deref buffer if we are holding the
5981 * correct proc inner lock for this node
5982 */
5983 seq_puts(m, "\n");
5984 return;
5985 }
5986
5987 if (buffer == NULL) {
5988 seq_puts(m, " buffer free\n");
5989 return;
5990 }
5991 if (buffer->target_node)
5992 seq_printf(m, " node %d", buffer->target_node->debug_id);
5993 seq_printf(m, " size %zd:%zd offset %lx\n",
5994 buffer->data_size, buffer->offsets_size,
5995 proc->alloc.buffer - buffer->user_data);
5996}
5997
5998static void print_binder_work_ilocked(struct seq_file *m,
5999 struct binder_proc *proc,
6000 const char *prefix,
6001 const char *transaction_prefix,
6002 struct binder_work *w)
6003{
6004 struct binder_node *node;
6005 struct binder_transaction *t;
6006
6007 switch (w->type) {
6008 case BINDER_WORK_TRANSACTION:
6009 t = container_of(w, struct binder_transaction, work);
6010 print_binder_transaction_ilocked(
6011 m, proc, transaction_prefix, t);
6012 break;
6013 case BINDER_WORK_RETURN_ERROR: {
6014 struct binder_error *e = container_of(
6015 w, struct binder_error, work);
6016
6017 seq_printf(m, "%stransaction error: %u\n",
6018 prefix, e->cmd);
6019 } break;
6020 case BINDER_WORK_TRANSACTION_COMPLETE:
6021 seq_printf(m, "%stransaction complete\n", prefix);
6022 break;
6023 case BINDER_WORK_NODE:
6024 node = container_of(w, struct binder_node, work);
6025 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6026 prefix, node->debug_id,
6027 (u64)node->ptr, (u64)node->cookie);
6028 break;
6029 case BINDER_WORK_DEAD_BINDER:
6030 seq_printf(m, "%shas dead binder\n", prefix);
6031 break;
6032 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6033 seq_printf(m, "%shas cleared dead binder\n", prefix);
6034 break;
6035 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6036 seq_printf(m, "%shas cleared death notification\n", prefix);
6037 break;
6038 default:
6039 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6040 break;
6041 }
6042}
6043
6044static void print_binder_thread_ilocked(struct seq_file *m,
6045 struct binder_thread *thread,
6046 int print_always)
6047{
6048 struct binder_transaction *t;
6049 struct binder_work *w;
6050 size_t start_pos = m->count;
6051 size_t header_pos;
6052
6053 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6054 thread->pid, thread->looper,
6055 thread->looper_need_return,
6056 atomic_read(&thread->tmp_ref));
6057 header_pos = m->count;
6058 t = thread->transaction_stack;
6059 while (t) {
6060 if (t->from == thread) {
6061 print_binder_transaction_ilocked(m, thread->proc,
6062 " outgoing transaction", t);
6063 t = t->from_parent;
6064 } else if (t->to_thread == thread) {
6065 print_binder_transaction_ilocked(m, thread->proc,
6066 " incoming transaction", t);
6067 t = t->to_parent;
6068 } else {
6069 print_binder_transaction_ilocked(m, thread->proc,
6070 " bad transaction", t);
6071 t = NULL;
6072 }
6073 }
6074 list_for_each_entry(w, &thread->todo, entry) {
6075 print_binder_work_ilocked(m, thread->proc, " ",
6076 " pending transaction", w);
6077 }
6078 if (!print_always && m->count == header_pos)
6079 m->count = start_pos;
6080}
6081
6082static void print_binder_node_nilocked(struct seq_file *m,
6083 struct binder_node *node)
6084{
6085 struct binder_ref *ref;
6086 struct binder_work *w;
6087 int count;
6088
6089 count = 0;
6090 hlist_for_each_entry(ref, &node->refs, node_entry)
6091 count++;
6092
6093 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6094 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6095 node->has_strong_ref, node->has_weak_ref,
6096 node->local_strong_refs, node->local_weak_refs,
6097 node->internal_strong_refs, count, node->tmp_refs);
6098 if (count) {
6099 seq_puts(m, " proc");
6100 hlist_for_each_entry(ref, &node->refs, node_entry)
6101 seq_printf(m, " %d", ref->proc->pid);
6102 }
6103 seq_puts(m, "\n");
6104 if (node->proc) {
6105 list_for_each_entry(w, &node->async_todo, entry)
6106 print_binder_work_ilocked(m, node->proc, " ",
6107 " pending async transaction", w);
6108 }
6109}
6110
6111static void print_binder_ref_olocked(struct seq_file *m,
6112 struct binder_ref *ref)
6113{
6114 binder_node_lock(ref->node);
6115 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6116 ref->data.debug_id, ref->data.desc,
6117 ref->node->proc ? "" : "dead ",
6118 ref->node->debug_id, ref->data.strong,
6119 ref->data.weak, ref->death);
6120 binder_node_unlock(ref->node);
6121}
6122
6123static void print_binder_proc(struct seq_file *m,
6124 struct binder_proc *proc, int print_all)
6125{
6126 struct binder_work *w;
6127 struct rb_node *n;
6128 size_t start_pos = m->count;
6129 size_t header_pos;
6130 struct binder_node *last_node = NULL;
6131
6132 seq_printf(m, "proc %d\n", proc->pid);
6133 seq_printf(m, "context %s\n", proc->context->name);
6134 header_pos = m->count;
6135
6136 binder_inner_proc_lock(proc);
6137 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6138 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6139 rb_node), print_all);
6140
6141 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6142 struct binder_node *node = rb_entry(n, struct binder_node,
6143 rb_node);
6144 if (!print_all && !node->has_async_transaction)
6145 continue;
6146
6147 /*
6148 * take a temporary reference on the node so it
6149 * survives and isn't removed from the tree
6150 * while we print it.
6151 */
6152 binder_inc_node_tmpref_ilocked(node);
6153 /* Need to drop inner lock to take node lock */
6154 binder_inner_proc_unlock(proc);
6155 if (last_node)
6156 binder_put_node(last_node);
6157 binder_node_inner_lock(node);
6158 print_binder_node_nilocked(m, node);
6159 binder_node_inner_unlock(node);
6160 last_node = node;
6161 binder_inner_proc_lock(proc);
6162 }
6163 binder_inner_proc_unlock(proc);
6164 if (last_node)
6165 binder_put_node(last_node);
6166
6167 if (print_all) {
6168 binder_proc_lock(proc);
6169 for (n = rb_first(&proc->refs_by_desc);
6170 n != NULL;
6171 n = rb_next(n))
6172 print_binder_ref_olocked(m, rb_entry(n,
6173 struct binder_ref,
6174 rb_node_desc));
6175 binder_proc_unlock(proc);
6176 }
6177 binder_alloc_print_allocated(m, &proc->alloc);
6178 binder_inner_proc_lock(proc);
6179 list_for_each_entry(w, &proc->todo, entry)
6180 print_binder_work_ilocked(m, proc, " ",
6181 " pending transaction", w);
6182 list_for_each_entry(w, &proc->delivered_death, entry) {
6183 seq_puts(m, " has delivered dead binder\n");
6184 break;
6185 }
6186 binder_inner_proc_unlock(proc);
6187 if (!print_all && m->count == header_pos)
6188 m->count = start_pos;
6189}
6190
6191static const char * const binder_return_strings[] = {
6192 "BR_ERROR",
6193 "BR_OK",
6194 "BR_TRANSACTION",
6195 "BR_REPLY",
6196 "BR_ACQUIRE_RESULT",
6197 "BR_DEAD_REPLY",
6198 "BR_TRANSACTION_COMPLETE",
6199 "BR_INCREFS",
6200 "BR_ACQUIRE",
6201 "BR_RELEASE",
6202 "BR_DECREFS",
6203 "BR_ATTEMPT_ACQUIRE",
6204 "BR_NOOP",
6205 "BR_SPAWN_LOOPER",
6206 "BR_FINISHED",
6207 "BR_DEAD_BINDER",
6208 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6209 "BR_FAILED_REPLY",
6210 "BR_FROZEN_REPLY",
6211 "BR_ONEWAY_SPAM_SUSPECT",
6212 "BR_TRANSACTION_PENDING_FROZEN"
6213};
6214
6215static const char * const binder_command_strings[] = {
6216 "BC_TRANSACTION",
6217 "BC_REPLY",
6218 "BC_ACQUIRE_RESULT",
6219 "BC_FREE_BUFFER",
6220 "BC_INCREFS",
6221 "BC_ACQUIRE",
6222 "BC_RELEASE",
6223 "BC_DECREFS",
6224 "BC_INCREFS_DONE",
6225 "BC_ACQUIRE_DONE",
6226 "BC_ATTEMPT_ACQUIRE",
6227 "BC_REGISTER_LOOPER",
6228 "BC_ENTER_LOOPER",
6229 "BC_EXIT_LOOPER",
6230 "BC_REQUEST_DEATH_NOTIFICATION",
6231 "BC_CLEAR_DEATH_NOTIFICATION",
6232 "BC_DEAD_BINDER_DONE",
6233 "BC_TRANSACTION_SG",
6234 "BC_REPLY_SG",
6235};
6236
6237static const char * const binder_objstat_strings[] = {
6238 "proc",
6239 "thread",
6240 "node",
6241 "ref",
6242 "death",
6243 "transaction",
6244 "transaction_complete"
6245};
6246
6247static void print_binder_stats(struct seq_file *m, const char *prefix,
6248 struct binder_stats *stats)
6249{
6250 int i;
6251
6252 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6253 ARRAY_SIZE(binder_command_strings));
6254 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6255 int temp = atomic_read(&stats->bc[i]);
6256
6257 if (temp)
6258 seq_printf(m, "%s%s: %d\n", prefix,
6259 binder_command_strings[i], temp);
6260 }
6261
6262 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6263 ARRAY_SIZE(binder_return_strings));
6264 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6265 int temp = atomic_read(&stats->br[i]);
6266
6267 if (temp)
6268 seq_printf(m, "%s%s: %d\n", prefix,
6269 binder_return_strings[i], temp);
6270 }
6271
6272 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6273 ARRAY_SIZE(binder_objstat_strings));
6274 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6275 ARRAY_SIZE(stats->obj_deleted));
6276 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6277 int created = atomic_read(&stats->obj_created[i]);
6278 int deleted = atomic_read(&stats->obj_deleted[i]);
6279
6280 if (created || deleted)
6281 seq_printf(m, "%s%s: active %d total %d\n",
6282 prefix,
6283 binder_objstat_strings[i],
6284 created - deleted,
6285 created);
6286 }
6287}
6288
6289static void print_binder_proc_stats(struct seq_file *m,
6290 struct binder_proc *proc)
6291{
6292 struct binder_work *w;
6293 struct binder_thread *thread;
6294 struct rb_node *n;
6295 int count, strong, weak, ready_threads;
6296 size_t free_async_space =
6297 binder_alloc_get_free_async_space(&proc->alloc);
6298
6299 seq_printf(m, "proc %d\n", proc->pid);
6300 seq_printf(m, "context %s\n", proc->context->name);
6301 count = 0;
6302 ready_threads = 0;
6303 binder_inner_proc_lock(proc);
6304 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6305 count++;
6306
6307 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6308 ready_threads++;
6309
6310 seq_printf(m, " threads: %d\n", count);
6311 seq_printf(m, " requested threads: %d+%d/%d\n"
6312 " ready threads %d\n"
6313 " free async space %zd\n", proc->requested_threads,
6314 proc->requested_threads_started, proc->max_threads,
6315 ready_threads,
6316 free_async_space);
6317 count = 0;
6318 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6319 count++;
6320 binder_inner_proc_unlock(proc);
6321 seq_printf(m, " nodes: %d\n", count);
6322 count = 0;
6323 strong = 0;
6324 weak = 0;
6325 binder_proc_lock(proc);
6326 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6327 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6328 rb_node_desc);
6329 count++;
6330 strong += ref->data.strong;
6331 weak += ref->data.weak;
6332 }
6333 binder_proc_unlock(proc);
6334 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6335
6336 count = binder_alloc_get_allocated_count(&proc->alloc);
6337 seq_printf(m, " buffers: %d\n", count);
6338
6339 binder_alloc_print_pages(m, &proc->alloc);
6340
6341 count = 0;
6342 binder_inner_proc_lock(proc);
6343 list_for_each_entry(w, &proc->todo, entry) {
6344 if (w->type == BINDER_WORK_TRANSACTION)
6345 count++;
6346 }
6347 binder_inner_proc_unlock(proc);
6348 seq_printf(m, " pending transactions: %d\n", count);
6349
6350 print_binder_stats(m, " ", &proc->stats);
6351}
6352
6353static int state_show(struct seq_file *m, void *unused)
6354{
6355 struct binder_proc *proc;
6356 struct binder_node *node;
6357 struct binder_node *last_node = NULL;
6358
6359 seq_puts(m, "binder state:\n");
6360
6361 spin_lock(&binder_dead_nodes_lock);
6362 if (!hlist_empty(&binder_dead_nodes))
6363 seq_puts(m, "dead nodes:\n");
6364 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6365 /*
6366 * take a temporary reference on the node so it
6367 * survives and isn't removed from the list
6368 * while we print it.
6369 */
6370 node->tmp_refs++;
6371 spin_unlock(&binder_dead_nodes_lock);
6372 if (last_node)
6373 binder_put_node(last_node);
6374 binder_node_lock(node);
6375 print_binder_node_nilocked(m, node);
6376 binder_node_unlock(node);
6377 last_node = node;
6378 spin_lock(&binder_dead_nodes_lock);
6379 }
6380 spin_unlock(&binder_dead_nodes_lock);
6381 if (last_node)
6382 binder_put_node(last_node);
6383
6384 mutex_lock(&binder_procs_lock);
6385 hlist_for_each_entry(proc, &binder_procs, proc_node)
6386 print_binder_proc(m, proc, 1);
6387 mutex_unlock(&binder_procs_lock);
6388
6389 return 0;
6390}
6391
6392static int stats_show(struct seq_file *m, void *unused)
6393{
6394 struct binder_proc *proc;
6395
6396 seq_puts(m, "binder stats:\n");
6397
6398 print_binder_stats(m, "", &binder_stats);
6399
6400 mutex_lock(&binder_procs_lock);
6401 hlist_for_each_entry(proc, &binder_procs, proc_node)
6402 print_binder_proc_stats(m, proc);
6403 mutex_unlock(&binder_procs_lock);
6404
6405 return 0;
6406}
6407
6408static int transactions_show(struct seq_file *m, void *unused)
6409{
6410 struct binder_proc *proc;
6411
6412 seq_puts(m, "binder transactions:\n");
6413 mutex_lock(&binder_procs_lock);
6414 hlist_for_each_entry(proc, &binder_procs, proc_node)
6415 print_binder_proc(m, proc, 0);
6416 mutex_unlock(&binder_procs_lock);
6417
6418 return 0;
6419}
6420
6421static int proc_show(struct seq_file *m, void *unused)
6422{
6423 struct binder_proc *itr;
6424 int pid = (unsigned long)m->private;
6425
6426 mutex_lock(&binder_procs_lock);
6427 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6428 if (itr->pid == pid) {
6429 seq_puts(m, "binder proc state:\n");
6430 print_binder_proc(m, itr, 1);
6431 }
6432 }
6433 mutex_unlock(&binder_procs_lock);
6434
6435 return 0;
6436}
6437
6438static void print_binder_transaction_log_entry(struct seq_file *m,
6439 struct binder_transaction_log_entry *e)
6440{
6441 int debug_id = READ_ONCE(e->debug_id_done);
6442 /*
6443 * read barrier to guarantee debug_id_done read before
6444 * we print the log values
6445 */
6446 smp_rmb();
6447 seq_printf(m,
6448 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6449 e->debug_id, (e->call_type == 2) ? "reply" :
6450 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6451 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6452 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6453 e->return_error, e->return_error_param,
6454 e->return_error_line);
6455 /*
6456 * read-barrier to guarantee read of debug_id_done after
6457 * done printing the fields of the entry
6458 */
6459 smp_rmb();
6460 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6461 "\n" : " (incomplete)\n");
6462}
6463
6464static int transaction_log_show(struct seq_file *m, void *unused)
6465{
6466 struct binder_transaction_log *log = m->private;
6467 unsigned int log_cur = atomic_read(&log->cur);
6468 unsigned int count;
6469 unsigned int cur;
6470 int i;
6471
6472 count = log_cur + 1;
6473 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6474 0 : count % ARRAY_SIZE(log->entry);
6475 if (count > ARRAY_SIZE(log->entry) || log->full)
6476 count = ARRAY_SIZE(log->entry);
6477 for (i = 0; i < count; i++) {
6478 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6479
6480 print_binder_transaction_log_entry(m, &log->entry[index]);
6481 }
6482 return 0;
6483}
6484
6485const struct file_operations binder_fops = {
6486 .owner = THIS_MODULE,
6487 .poll = binder_poll,
6488 .unlocked_ioctl = binder_ioctl,
6489 .compat_ioctl = compat_ptr_ioctl,
6490 .mmap = binder_mmap,
6491 .open = binder_open,
6492 .flush = binder_flush,
6493 .release = binder_release,
6494};
6495
6496DEFINE_SHOW_ATTRIBUTE(state);
6497DEFINE_SHOW_ATTRIBUTE(stats);
6498DEFINE_SHOW_ATTRIBUTE(transactions);
6499DEFINE_SHOW_ATTRIBUTE(transaction_log);
6500
6501const struct binder_debugfs_entry binder_debugfs_entries[] = {
6502 {
6503 .name = "state",
6504 .mode = 0444,
6505 .fops = &state_fops,
6506 .data = NULL,
6507 },
6508 {
6509 .name = "stats",
6510 .mode = 0444,
6511 .fops = &stats_fops,
6512 .data = NULL,
6513 },
6514 {
6515 .name = "transactions",
6516 .mode = 0444,
6517 .fops = &transactions_fops,
6518 .data = NULL,
6519 },
6520 {
6521 .name = "transaction_log",
6522 .mode = 0444,
6523 .fops = &transaction_log_fops,
6524 .data = &binder_transaction_log,
6525 },
6526 {
6527 .name = "failed_transaction_log",
6528 .mode = 0444,
6529 .fops = &transaction_log_fops,
6530 .data = &binder_transaction_log_failed,
6531 },
6532 {} /* terminator */
6533};
6534
6535static int __init init_binder_device(const char *name)
6536{
6537 int ret;
6538 struct binder_device *binder_device;
6539
6540 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6541 if (!binder_device)
6542 return -ENOMEM;
6543
6544 binder_device->miscdev.fops = &binder_fops;
6545 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6546 binder_device->miscdev.name = name;
6547
6548 refcount_set(&binder_device->ref, 1);
6549 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6550 binder_device->context.name = name;
6551 mutex_init(&binder_device->context.context_mgr_node_lock);
6552
6553 ret = misc_register(&binder_device->miscdev);
6554 if (ret < 0) {
6555 kfree(binder_device);
6556 return ret;
6557 }
6558
6559 hlist_add_head(&binder_device->hlist, &binder_devices);
6560
6561 return ret;
6562}
6563
6564static int __init binder_init(void)
6565{
6566 int ret;
6567 char *device_name, *device_tmp;
6568 struct binder_device *device;
6569 struct hlist_node *tmp;
6570 char *device_names = NULL;
6571 const struct binder_debugfs_entry *db_entry;
6572
6573 ret = binder_alloc_shrinker_init();
6574 if (ret)
6575 return ret;
6576
6577 atomic_set(&binder_transaction_log.cur, ~0U);
6578 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6579
6580 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6581
6582 binder_for_each_debugfs_entry(db_entry)
6583 debugfs_create_file(db_entry->name,
6584 db_entry->mode,
6585 binder_debugfs_dir_entry_root,
6586 db_entry->data,
6587 db_entry->fops);
6588
6589 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6590 binder_debugfs_dir_entry_root);
6591
6592 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6593 strcmp(binder_devices_param, "") != 0) {
6594 /*
6595 * Copy the module_parameter string, because we don't want to
6596 * tokenize it in-place.
6597 */
6598 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6599 if (!device_names) {
6600 ret = -ENOMEM;
6601 goto err_alloc_device_names_failed;
6602 }
6603
6604 device_tmp = device_names;
6605 while ((device_name = strsep(&device_tmp, ","))) {
6606 ret = init_binder_device(device_name);
6607 if (ret)
6608 goto err_init_binder_device_failed;
6609 }
6610 }
6611
6612 ret = init_binderfs();
6613 if (ret)
6614 goto err_init_binder_device_failed;
6615
6616 return ret;
6617
6618err_init_binder_device_failed:
6619 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6620 misc_deregister(&device->miscdev);
6621 hlist_del(&device->hlist);
6622 kfree(device);
6623 }
6624
6625 kfree(device_names);
6626
6627err_alloc_device_names_failed:
6628 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6629 binder_alloc_shrinker_exit();
6630
6631 return ret;
6632}
6633
6634device_initcall(binder_init);
6635
6636#define CREATE_TRACE_POINTS
6637#include "binder_trace.h"
6638
6639MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68
69#include <uapi/linux/android/binder.h>
70#include <uapi/linux/android/binderfs.h>
71
72#include <asm/cacheflush.h>
73
74#include "binder_alloc.h"
75#include "binder_internal.h"
76#include "binder_trace.h"
77
78static HLIST_HEAD(binder_deferred_list);
79static DEFINE_MUTEX(binder_deferred_lock);
80
81static HLIST_HEAD(binder_devices);
82static HLIST_HEAD(binder_procs);
83static DEFINE_MUTEX(binder_procs_lock);
84
85static HLIST_HEAD(binder_dead_nodes);
86static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87
88static struct dentry *binder_debugfs_dir_entry_root;
89static struct dentry *binder_debugfs_dir_entry_proc;
90static atomic_t binder_last_id;
91
92static int proc_show(struct seq_file *m, void *unused);
93DEFINE_SHOW_ATTRIBUTE(proc);
94
95/* This is only defined in include/asm-arm/sizes.h */
96#ifndef SZ_1K
97#define SZ_1K 0x400
98#endif
99
100#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
101
102enum {
103 BINDER_DEBUG_USER_ERROR = 1U << 0,
104 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109 BINDER_DEBUG_READ_WRITE = 1U << 6,
110 BINDER_DEBUG_USER_REFS = 1U << 7,
111 BINDER_DEBUG_THREADS = 1U << 8,
112 BINDER_DEBUG_TRANSACTION = 1U << 9,
113 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117 BINDER_DEBUG_SPINLOCKS = 1U << 14,
118};
119static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122
123char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124module_param_named(devices, binder_devices_param, charp, 0444);
125
126static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127static int binder_stop_on_user_error;
128
129static int binder_set_stop_on_user_error(const char *val,
130 const struct kernel_param *kp)
131{
132 int ret;
133
134 ret = param_set_int(val, kp);
135 if (binder_stop_on_user_error < 2)
136 wake_up(&binder_user_error_wait);
137 return ret;
138}
139module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140 param_get_int, &binder_stop_on_user_error, 0644);
141
142#define binder_debug(mask, x...) \
143 do { \
144 if (binder_debug_mask & mask) \
145 pr_info_ratelimited(x); \
146 } while (0)
147
148#define binder_user_error(x...) \
149 do { \
150 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
151 pr_info_ratelimited(x); \
152 if (binder_stop_on_user_error) \
153 binder_stop_on_user_error = 2; \
154 } while (0)
155
156#define to_flat_binder_object(hdr) \
157 container_of(hdr, struct flat_binder_object, hdr)
158
159#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
160
161#define to_binder_buffer_object(hdr) \
162 container_of(hdr, struct binder_buffer_object, hdr)
163
164#define to_binder_fd_array_object(hdr) \
165 container_of(hdr, struct binder_fd_array_object, hdr)
166
167enum binder_stat_types {
168 BINDER_STAT_PROC,
169 BINDER_STAT_THREAD,
170 BINDER_STAT_NODE,
171 BINDER_STAT_REF,
172 BINDER_STAT_DEATH,
173 BINDER_STAT_TRANSACTION,
174 BINDER_STAT_TRANSACTION_COMPLETE,
175 BINDER_STAT_COUNT
176};
177
178struct binder_stats {
179 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
180 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
181 atomic_t obj_created[BINDER_STAT_COUNT];
182 atomic_t obj_deleted[BINDER_STAT_COUNT];
183};
184
185static struct binder_stats binder_stats;
186
187static inline void binder_stats_deleted(enum binder_stat_types type)
188{
189 atomic_inc(&binder_stats.obj_deleted[type]);
190}
191
192static inline void binder_stats_created(enum binder_stat_types type)
193{
194 atomic_inc(&binder_stats.obj_created[type]);
195}
196
197struct binder_transaction_log binder_transaction_log;
198struct binder_transaction_log binder_transaction_log_failed;
199
200static struct binder_transaction_log_entry *binder_transaction_log_add(
201 struct binder_transaction_log *log)
202{
203 struct binder_transaction_log_entry *e;
204 unsigned int cur = atomic_inc_return(&log->cur);
205
206 if (cur >= ARRAY_SIZE(log->entry))
207 log->full = true;
208 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
209 WRITE_ONCE(e->debug_id_done, 0);
210 /*
211 * write-barrier to synchronize access to e->debug_id_done.
212 * We make sure the initialized 0 value is seen before
213 * memset() other fields are zeroed by memset.
214 */
215 smp_wmb();
216 memset(e, 0, sizeof(*e));
217 return e;
218}
219
220/**
221 * struct binder_work - work enqueued on a worklist
222 * @entry: node enqueued on list
223 * @type: type of work to be performed
224 *
225 * There are separate work lists for proc, thread, and node (async).
226 */
227struct binder_work {
228 struct list_head entry;
229
230 enum {
231 BINDER_WORK_TRANSACTION = 1,
232 BINDER_WORK_TRANSACTION_COMPLETE,
233 BINDER_WORK_RETURN_ERROR,
234 BINDER_WORK_NODE,
235 BINDER_WORK_DEAD_BINDER,
236 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
237 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
238 } type;
239};
240
241struct binder_error {
242 struct binder_work work;
243 uint32_t cmd;
244};
245
246/**
247 * struct binder_node - binder node bookkeeping
248 * @debug_id: unique ID for debugging
249 * (invariant after initialized)
250 * @lock: lock for node fields
251 * @work: worklist element for node work
252 * (protected by @proc->inner_lock)
253 * @rb_node: element for proc->nodes tree
254 * (protected by @proc->inner_lock)
255 * @dead_node: element for binder_dead_nodes list
256 * (protected by binder_dead_nodes_lock)
257 * @proc: binder_proc that owns this node
258 * (invariant after initialized)
259 * @refs: list of references on this node
260 * (protected by @lock)
261 * @internal_strong_refs: used to take strong references when
262 * initiating a transaction
263 * (protected by @proc->inner_lock if @proc
264 * and by @lock)
265 * @local_weak_refs: weak user refs from local process
266 * (protected by @proc->inner_lock if @proc
267 * and by @lock)
268 * @local_strong_refs: strong user refs from local process
269 * (protected by @proc->inner_lock if @proc
270 * and by @lock)
271 * @tmp_refs: temporary kernel refs
272 * (protected by @proc->inner_lock while @proc
273 * is valid, and by binder_dead_nodes_lock
274 * if @proc is NULL. During inc/dec and node release
275 * it is also protected by @lock to provide safety
276 * as the node dies and @proc becomes NULL)
277 * @ptr: userspace pointer for node
278 * (invariant, no lock needed)
279 * @cookie: userspace cookie for node
280 * (invariant, no lock needed)
281 * @has_strong_ref: userspace notified of strong ref
282 * (protected by @proc->inner_lock if @proc
283 * and by @lock)
284 * @pending_strong_ref: userspace has acked notification of strong ref
285 * (protected by @proc->inner_lock if @proc
286 * and by @lock)
287 * @has_weak_ref: userspace notified of weak ref
288 * (protected by @proc->inner_lock if @proc
289 * and by @lock)
290 * @pending_weak_ref: userspace has acked notification of weak ref
291 * (protected by @proc->inner_lock if @proc
292 * and by @lock)
293 * @has_async_transaction: async transaction to node in progress
294 * (protected by @lock)
295 * @accept_fds: file descriptor operations supported for node
296 * (invariant after initialized)
297 * @min_priority: minimum scheduling priority
298 * (invariant after initialized)
299 * @txn_security_ctx: require sender's security context
300 * (invariant after initialized)
301 * @async_todo: list of async work items
302 * (protected by @proc->inner_lock)
303 *
304 * Bookkeeping structure for binder nodes.
305 */
306struct binder_node {
307 int debug_id;
308 spinlock_t lock;
309 struct binder_work work;
310 union {
311 struct rb_node rb_node;
312 struct hlist_node dead_node;
313 };
314 struct binder_proc *proc;
315 struct hlist_head refs;
316 int internal_strong_refs;
317 int local_weak_refs;
318 int local_strong_refs;
319 int tmp_refs;
320 binder_uintptr_t ptr;
321 binder_uintptr_t cookie;
322 struct {
323 /*
324 * bitfield elements protected by
325 * proc inner_lock
326 */
327 u8 has_strong_ref:1;
328 u8 pending_strong_ref:1;
329 u8 has_weak_ref:1;
330 u8 pending_weak_ref:1;
331 };
332 struct {
333 /*
334 * invariant after initialization
335 */
336 u8 accept_fds:1;
337 u8 txn_security_ctx:1;
338 u8 min_priority;
339 };
340 bool has_async_transaction;
341 struct list_head async_todo;
342};
343
344struct binder_ref_death {
345 /**
346 * @work: worklist element for death notifications
347 * (protected by inner_lock of the proc that
348 * this ref belongs to)
349 */
350 struct binder_work work;
351 binder_uintptr_t cookie;
352};
353
354/**
355 * struct binder_ref_data - binder_ref counts and id
356 * @debug_id: unique ID for the ref
357 * @desc: unique userspace handle for ref
358 * @strong: strong ref count (debugging only if not locked)
359 * @weak: weak ref count (debugging only if not locked)
360 *
361 * Structure to hold ref count and ref id information. Since
362 * the actual ref can only be accessed with a lock, this structure
363 * is used to return information about the ref to callers of
364 * ref inc/dec functions.
365 */
366struct binder_ref_data {
367 int debug_id;
368 uint32_t desc;
369 int strong;
370 int weak;
371};
372
373/**
374 * struct binder_ref - struct to track references on nodes
375 * @data: binder_ref_data containing id, handle, and current refcounts
376 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
377 * @rb_node_node: node for lookup by @node in proc's rb_tree
378 * @node_entry: list entry for node->refs list in target node
379 * (protected by @node->lock)
380 * @proc: binder_proc containing ref
381 * @node: binder_node of target node. When cleaning up a
382 * ref for deletion in binder_cleanup_ref, a non-NULL
383 * @node indicates the node must be freed
384 * @death: pointer to death notification (ref_death) if requested
385 * (protected by @node->lock)
386 *
387 * Structure to track references from procA to target node (on procB). This
388 * structure is unsafe to access without holding @proc->outer_lock.
389 */
390struct binder_ref {
391 /* Lookups needed: */
392 /* node + proc => ref (transaction) */
393 /* desc + proc => ref (transaction, inc/dec ref) */
394 /* node => refs + procs (proc exit) */
395 struct binder_ref_data data;
396 struct rb_node rb_node_desc;
397 struct rb_node rb_node_node;
398 struct hlist_node node_entry;
399 struct binder_proc *proc;
400 struct binder_node *node;
401 struct binder_ref_death *death;
402};
403
404enum binder_deferred_state {
405 BINDER_DEFERRED_FLUSH = 0x01,
406 BINDER_DEFERRED_RELEASE = 0x02,
407};
408
409/**
410 * struct binder_proc - binder process bookkeeping
411 * @proc_node: element for binder_procs list
412 * @threads: rbtree of binder_threads in this proc
413 * (protected by @inner_lock)
414 * @nodes: rbtree of binder nodes associated with
415 * this proc ordered by node->ptr
416 * (protected by @inner_lock)
417 * @refs_by_desc: rbtree of refs ordered by ref->desc
418 * (protected by @outer_lock)
419 * @refs_by_node: rbtree of refs ordered by ref->node
420 * (protected by @outer_lock)
421 * @waiting_threads: threads currently waiting for proc work
422 * (protected by @inner_lock)
423 * @pid PID of group_leader of process
424 * (invariant after initialized)
425 * @tsk task_struct for group_leader of process
426 * (invariant after initialized)
427 * @deferred_work_node: element for binder_deferred_list
428 * (protected by binder_deferred_lock)
429 * @deferred_work: bitmap of deferred work to perform
430 * (protected by binder_deferred_lock)
431 * @is_dead: process is dead and awaiting free
432 * when outstanding transactions are cleaned up
433 * (protected by @inner_lock)
434 * @todo: list of work for this process
435 * (protected by @inner_lock)
436 * @stats: per-process binder statistics
437 * (atomics, no lock needed)
438 * @delivered_death: list of delivered death notification
439 * (protected by @inner_lock)
440 * @max_threads: cap on number of binder threads
441 * (protected by @inner_lock)
442 * @requested_threads: number of binder threads requested but not
443 * yet started. In current implementation, can
444 * only be 0 or 1.
445 * (protected by @inner_lock)
446 * @requested_threads_started: number binder threads started
447 * (protected by @inner_lock)
448 * @tmp_ref: temporary reference to indicate proc is in use
449 * (protected by @inner_lock)
450 * @default_priority: default scheduler priority
451 * (invariant after initialized)
452 * @debugfs_entry: debugfs node
453 * @alloc: binder allocator bookkeeping
454 * @context: binder_context for this proc
455 * (invariant after initialized)
456 * @inner_lock: can nest under outer_lock and/or node lock
457 * @outer_lock: no nesting under innor or node lock
458 * Lock order: 1) outer, 2) node, 3) inner
459 * @binderfs_entry: process-specific binderfs log file
460 *
461 * Bookkeeping structure for binder processes
462 */
463struct binder_proc {
464 struct hlist_node proc_node;
465 struct rb_root threads;
466 struct rb_root nodes;
467 struct rb_root refs_by_desc;
468 struct rb_root refs_by_node;
469 struct list_head waiting_threads;
470 int pid;
471 struct task_struct *tsk;
472 struct hlist_node deferred_work_node;
473 int deferred_work;
474 bool is_dead;
475
476 struct list_head todo;
477 struct binder_stats stats;
478 struct list_head delivered_death;
479 int max_threads;
480 int requested_threads;
481 int requested_threads_started;
482 int tmp_ref;
483 long default_priority;
484 struct dentry *debugfs_entry;
485 struct binder_alloc alloc;
486 struct binder_context *context;
487 spinlock_t inner_lock;
488 spinlock_t outer_lock;
489 struct dentry *binderfs_entry;
490};
491
492enum {
493 BINDER_LOOPER_STATE_REGISTERED = 0x01,
494 BINDER_LOOPER_STATE_ENTERED = 0x02,
495 BINDER_LOOPER_STATE_EXITED = 0x04,
496 BINDER_LOOPER_STATE_INVALID = 0x08,
497 BINDER_LOOPER_STATE_WAITING = 0x10,
498 BINDER_LOOPER_STATE_POLL = 0x20,
499};
500
501/**
502 * struct binder_thread - binder thread bookkeeping
503 * @proc: binder process for this thread
504 * (invariant after initialization)
505 * @rb_node: element for proc->threads rbtree
506 * (protected by @proc->inner_lock)
507 * @waiting_thread_node: element for @proc->waiting_threads list
508 * (protected by @proc->inner_lock)
509 * @pid: PID for this thread
510 * (invariant after initialization)
511 * @looper: bitmap of looping state
512 * (only accessed by this thread)
513 * @looper_needs_return: looping thread needs to exit driver
514 * (no lock needed)
515 * @transaction_stack: stack of in-progress transactions for this thread
516 * (protected by @proc->inner_lock)
517 * @todo: list of work to do for this thread
518 * (protected by @proc->inner_lock)
519 * @process_todo: whether work in @todo should be processed
520 * (protected by @proc->inner_lock)
521 * @return_error: transaction errors reported by this thread
522 * (only accessed by this thread)
523 * @reply_error: transaction errors reported by target thread
524 * (protected by @proc->inner_lock)
525 * @wait: wait queue for thread work
526 * @stats: per-thread statistics
527 * (atomics, no lock needed)
528 * @tmp_ref: temporary reference to indicate thread is in use
529 * (atomic since @proc->inner_lock cannot
530 * always be acquired)
531 * @is_dead: thread is dead and awaiting free
532 * when outstanding transactions are cleaned up
533 * (protected by @proc->inner_lock)
534 *
535 * Bookkeeping structure for binder threads.
536 */
537struct binder_thread {
538 struct binder_proc *proc;
539 struct rb_node rb_node;
540 struct list_head waiting_thread_node;
541 int pid;
542 int looper; /* only modified by this thread */
543 bool looper_need_return; /* can be written by other thread */
544 struct binder_transaction *transaction_stack;
545 struct list_head todo;
546 bool process_todo;
547 struct binder_error return_error;
548 struct binder_error reply_error;
549 wait_queue_head_t wait;
550 struct binder_stats stats;
551 atomic_t tmp_ref;
552 bool is_dead;
553};
554
555/**
556 * struct binder_txn_fd_fixup - transaction fd fixup list element
557 * @fixup_entry: list entry
558 * @file: struct file to be associated with new fd
559 * @offset: offset in buffer data to this fixup
560 *
561 * List element for fd fixups in a transaction. Since file
562 * descriptors need to be allocated in the context of the
563 * target process, we pass each fd to be processed in this
564 * struct.
565 */
566struct binder_txn_fd_fixup {
567 struct list_head fixup_entry;
568 struct file *file;
569 size_t offset;
570};
571
572struct binder_transaction {
573 int debug_id;
574 struct binder_work work;
575 struct binder_thread *from;
576 struct binder_transaction *from_parent;
577 struct binder_proc *to_proc;
578 struct binder_thread *to_thread;
579 struct binder_transaction *to_parent;
580 unsigned need_reply:1;
581 /* unsigned is_dead:1; */ /* not used at the moment */
582
583 struct binder_buffer *buffer;
584 unsigned int code;
585 unsigned int flags;
586 long priority;
587 long saved_priority;
588 kuid_t sender_euid;
589 struct list_head fd_fixups;
590 binder_uintptr_t security_ctx;
591 /**
592 * @lock: protects @from, @to_proc, and @to_thread
593 *
594 * @from, @to_proc, and @to_thread can be set to NULL
595 * during thread teardown
596 */
597 spinlock_t lock;
598};
599
600/**
601 * struct binder_object - union of flat binder object types
602 * @hdr: generic object header
603 * @fbo: binder object (nodes and refs)
604 * @fdo: file descriptor object
605 * @bbo: binder buffer pointer
606 * @fdao: file descriptor array
607 *
608 * Used for type-independent object copies
609 */
610struct binder_object {
611 union {
612 struct binder_object_header hdr;
613 struct flat_binder_object fbo;
614 struct binder_fd_object fdo;
615 struct binder_buffer_object bbo;
616 struct binder_fd_array_object fdao;
617 };
618};
619
620/**
621 * binder_proc_lock() - Acquire outer lock for given binder_proc
622 * @proc: struct binder_proc to acquire
623 *
624 * Acquires proc->outer_lock. Used to protect binder_ref
625 * structures associated with the given proc.
626 */
627#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
628static void
629_binder_proc_lock(struct binder_proc *proc, int line)
630 __acquires(&proc->outer_lock)
631{
632 binder_debug(BINDER_DEBUG_SPINLOCKS,
633 "%s: line=%d\n", __func__, line);
634 spin_lock(&proc->outer_lock);
635}
636
637/**
638 * binder_proc_unlock() - Release spinlock for given binder_proc
639 * @proc: struct binder_proc to acquire
640 *
641 * Release lock acquired via binder_proc_lock()
642 */
643#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
644static void
645_binder_proc_unlock(struct binder_proc *proc, int line)
646 __releases(&proc->outer_lock)
647{
648 binder_debug(BINDER_DEBUG_SPINLOCKS,
649 "%s: line=%d\n", __func__, line);
650 spin_unlock(&proc->outer_lock);
651}
652
653/**
654 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
655 * @proc: struct binder_proc to acquire
656 *
657 * Acquires proc->inner_lock. Used to protect todo lists
658 */
659#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
660static void
661_binder_inner_proc_lock(struct binder_proc *proc, int line)
662 __acquires(&proc->inner_lock)
663{
664 binder_debug(BINDER_DEBUG_SPINLOCKS,
665 "%s: line=%d\n", __func__, line);
666 spin_lock(&proc->inner_lock);
667}
668
669/**
670 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
671 * @proc: struct binder_proc to acquire
672 *
673 * Release lock acquired via binder_inner_proc_lock()
674 */
675#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
676static void
677_binder_inner_proc_unlock(struct binder_proc *proc, int line)
678 __releases(&proc->inner_lock)
679{
680 binder_debug(BINDER_DEBUG_SPINLOCKS,
681 "%s: line=%d\n", __func__, line);
682 spin_unlock(&proc->inner_lock);
683}
684
685/**
686 * binder_node_lock() - Acquire spinlock for given binder_node
687 * @node: struct binder_node to acquire
688 *
689 * Acquires node->lock. Used to protect binder_node fields
690 */
691#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
692static void
693_binder_node_lock(struct binder_node *node, int line)
694 __acquires(&node->lock)
695{
696 binder_debug(BINDER_DEBUG_SPINLOCKS,
697 "%s: line=%d\n", __func__, line);
698 spin_lock(&node->lock);
699}
700
701/**
702 * binder_node_unlock() - Release spinlock for given binder_proc
703 * @node: struct binder_node to acquire
704 *
705 * Release lock acquired via binder_node_lock()
706 */
707#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
708static void
709_binder_node_unlock(struct binder_node *node, int line)
710 __releases(&node->lock)
711{
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_unlock(&node->lock);
715}
716
717/**
718 * binder_node_inner_lock() - Acquire node and inner locks
719 * @node: struct binder_node to acquire
720 *
721 * Acquires node->lock. If node->proc also acquires
722 * proc->inner_lock. Used to protect binder_node fields
723 */
724#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
725static void
726_binder_node_inner_lock(struct binder_node *node, int line)
727 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
728{
729 binder_debug(BINDER_DEBUG_SPINLOCKS,
730 "%s: line=%d\n", __func__, line);
731 spin_lock(&node->lock);
732 if (node->proc)
733 binder_inner_proc_lock(node->proc);
734 else
735 /* annotation for sparse */
736 __acquire(&node->proc->inner_lock);
737}
738
739/**
740 * binder_node_unlock() - Release node and inner locks
741 * @node: struct binder_node to acquire
742 *
743 * Release lock acquired via binder_node_lock()
744 */
745#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
746static void
747_binder_node_inner_unlock(struct binder_node *node, int line)
748 __releases(&node->lock) __releases(&node->proc->inner_lock)
749{
750 struct binder_proc *proc = node->proc;
751
752 binder_debug(BINDER_DEBUG_SPINLOCKS,
753 "%s: line=%d\n", __func__, line);
754 if (proc)
755 binder_inner_proc_unlock(proc);
756 else
757 /* annotation for sparse */
758 __release(&node->proc->inner_lock);
759 spin_unlock(&node->lock);
760}
761
762static bool binder_worklist_empty_ilocked(struct list_head *list)
763{
764 return list_empty(list);
765}
766
767/**
768 * binder_worklist_empty() - Check if no items on the work list
769 * @proc: binder_proc associated with list
770 * @list: list to check
771 *
772 * Return: true if there are no items on list, else false
773 */
774static bool binder_worklist_empty(struct binder_proc *proc,
775 struct list_head *list)
776{
777 bool ret;
778
779 binder_inner_proc_lock(proc);
780 ret = binder_worklist_empty_ilocked(list);
781 binder_inner_proc_unlock(proc);
782 return ret;
783}
784
785/**
786 * binder_enqueue_work_ilocked() - Add an item to the work list
787 * @work: struct binder_work to add to list
788 * @target_list: list to add work to
789 *
790 * Adds the work to the specified list. Asserts that work
791 * is not already on a list.
792 *
793 * Requires the proc->inner_lock to be held.
794 */
795static void
796binder_enqueue_work_ilocked(struct binder_work *work,
797 struct list_head *target_list)
798{
799 BUG_ON(target_list == NULL);
800 BUG_ON(work->entry.next && !list_empty(&work->entry));
801 list_add_tail(&work->entry, target_list);
802}
803
804/**
805 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
806 * @thread: thread to queue work to
807 * @work: struct binder_work to add to list
808 *
809 * Adds the work to the todo list of the thread. Doesn't set the process_todo
810 * flag, which means that (if it wasn't already set) the thread will go to
811 * sleep without handling this work when it calls read.
812 *
813 * Requires the proc->inner_lock to be held.
814 */
815static void
816binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
817 struct binder_work *work)
818{
819 WARN_ON(!list_empty(&thread->waiting_thread_node));
820 binder_enqueue_work_ilocked(work, &thread->todo);
821}
822
823/**
824 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
825 * @thread: thread to queue work to
826 * @work: struct binder_work to add to list
827 *
828 * Adds the work to the todo list of the thread, and enables processing
829 * of the todo queue.
830 *
831 * Requires the proc->inner_lock to be held.
832 */
833static void
834binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
835 struct binder_work *work)
836{
837 WARN_ON(!list_empty(&thread->waiting_thread_node));
838 binder_enqueue_work_ilocked(work, &thread->todo);
839 thread->process_todo = true;
840}
841
842/**
843 * binder_enqueue_thread_work() - Add an item to the thread work list
844 * @thread: thread to queue work to
845 * @work: struct binder_work to add to list
846 *
847 * Adds the work to the todo list of the thread, and enables processing
848 * of the todo queue.
849 */
850static void
851binder_enqueue_thread_work(struct binder_thread *thread,
852 struct binder_work *work)
853{
854 binder_inner_proc_lock(thread->proc);
855 binder_enqueue_thread_work_ilocked(thread, work);
856 binder_inner_proc_unlock(thread->proc);
857}
858
859static void
860binder_dequeue_work_ilocked(struct binder_work *work)
861{
862 list_del_init(&work->entry);
863}
864
865/**
866 * binder_dequeue_work() - Removes an item from the work list
867 * @proc: binder_proc associated with list
868 * @work: struct binder_work to remove from list
869 *
870 * Removes the specified work item from whatever list it is on.
871 * Can safely be called if work is not on any list.
872 */
873static void
874binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
875{
876 binder_inner_proc_lock(proc);
877 binder_dequeue_work_ilocked(work);
878 binder_inner_proc_unlock(proc);
879}
880
881static struct binder_work *binder_dequeue_work_head_ilocked(
882 struct list_head *list)
883{
884 struct binder_work *w;
885
886 w = list_first_entry_or_null(list, struct binder_work, entry);
887 if (w)
888 list_del_init(&w->entry);
889 return w;
890}
891
892/**
893 * binder_dequeue_work_head() - Dequeues the item at head of list
894 * @proc: binder_proc associated with list
895 * @list: list to dequeue head
896 *
897 * Removes the head of the list if there are items on the list
898 *
899 * Return: pointer dequeued binder_work, NULL if list was empty
900 */
901static struct binder_work *binder_dequeue_work_head(
902 struct binder_proc *proc,
903 struct list_head *list)
904{
905 struct binder_work *w;
906
907 binder_inner_proc_lock(proc);
908 w = binder_dequeue_work_head_ilocked(list);
909 binder_inner_proc_unlock(proc);
910 return w;
911}
912
913static void
914binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
915static void binder_free_thread(struct binder_thread *thread);
916static void binder_free_proc(struct binder_proc *proc);
917static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
918
919static bool binder_has_work_ilocked(struct binder_thread *thread,
920 bool do_proc_work)
921{
922 return thread->process_todo ||
923 thread->looper_need_return ||
924 (do_proc_work &&
925 !binder_worklist_empty_ilocked(&thread->proc->todo));
926}
927
928static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
929{
930 bool has_work;
931
932 binder_inner_proc_lock(thread->proc);
933 has_work = binder_has_work_ilocked(thread, do_proc_work);
934 binder_inner_proc_unlock(thread->proc);
935
936 return has_work;
937}
938
939static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
940{
941 return !thread->transaction_stack &&
942 binder_worklist_empty_ilocked(&thread->todo) &&
943 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
944 BINDER_LOOPER_STATE_REGISTERED));
945}
946
947static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
948 bool sync)
949{
950 struct rb_node *n;
951 struct binder_thread *thread;
952
953 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
954 thread = rb_entry(n, struct binder_thread, rb_node);
955 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
956 binder_available_for_proc_work_ilocked(thread)) {
957 if (sync)
958 wake_up_interruptible_sync(&thread->wait);
959 else
960 wake_up_interruptible(&thread->wait);
961 }
962 }
963}
964
965/**
966 * binder_select_thread_ilocked() - selects a thread for doing proc work.
967 * @proc: process to select a thread from
968 *
969 * Note that calling this function moves the thread off the waiting_threads
970 * list, so it can only be woken up by the caller of this function, or a
971 * signal. Therefore, callers *should* always wake up the thread this function
972 * returns.
973 *
974 * Return: If there's a thread currently waiting for process work,
975 * returns that thread. Otherwise returns NULL.
976 */
977static struct binder_thread *
978binder_select_thread_ilocked(struct binder_proc *proc)
979{
980 struct binder_thread *thread;
981
982 assert_spin_locked(&proc->inner_lock);
983 thread = list_first_entry_or_null(&proc->waiting_threads,
984 struct binder_thread,
985 waiting_thread_node);
986
987 if (thread)
988 list_del_init(&thread->waiting_thread_node);
989
990 return thread;
991}
992
993/**
994 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
995 * @proc: process to wake up a thread in
996 * @thread: specific thread to wake-up (may be NULL)
997 * @sync: whether to do a synchronous wake-up
998 *
999 * This function wakes up a thread in the @proc process.
1000 * The caller may provide a specific thread to wake-up in
1001 * the @thread parameter. If @thread is NULL, this function
1002 * will wake up threads that have called poll().
1003 *
1004 * Note that for this function to work as expected, callers
1005 * should first call binder_select_thread() to find a thread
1006 * to handle the work (if they don't have a thread already),
1007 * and pass the result into the @thread parameter.
1008 */
1009static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1010 struct binder_thread *thread,
1011 bool sync)
1012{
1013 assert_spin_locked(&proc->inner_lock);
1014
1015 if (thread) {
1016 if (sync)
1017 wake_up_interruptible_sync(&thread->wait);
1018 else
1019 wake_up_interruptible(&thread->wait);
1020 return;
1021 }
1022
1023 /* Didn't find a thread waiting for proc work; this can happen
1024 * in two scenarios:
1025 * 1. All threads are busy handling transactions
1026 * In that case, one of those threads should call back into
1027 * the kernel driver soon and pick up this work.
1028 * 2. Threads are using the (e)poll interface, in which case
1029 * they may be blocked on the waitqueue without having been
1030 * added to waiting_threads. For this case, we just iterate
1031 * over all threads not handling transaction work, and
1032 * wake them all up. We wake all because we don't know whether
1033 * a thread that called into (e)poll is handling non-binder
1034 * work currently.
1035 */
1036 binder_wakeup_poll_threads_ilocked(proc, sync);
1037}
1038
1039static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1040{
1041 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1042
1043 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1044}
1045
1046static void binder_set_nice(long nice)
1047{
1048 long min_nice;
1049
1050 if (can_nice(current, nice)) {
1051 set_user_nice(current, nice);
1052 return;
1053 }
1054 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1055 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1056 "%d: nice value %ld not allowed use %ld instead\n",
1057 current->pid, nice, min_nice);
1058 set_user_nice(current, min_nice);
1059 if (min_nice <= MAX_NICE)
1060 return;
1061 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1062}
1063
1064static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1065 binder_uintptr_t ptr)
1066{
1067 struct rb_node *n = proc->nodes.rb_node;
1068 struct binder_node *node;
1069
1070 assert_spin_locked(&proc->inner_lock);
1071
1072 while (n) {
1073 node = rb_entry(n, struct binder_node, rb_node);
1074
1075 if (ptr < node->ptr)
1076 n = n->rb_left;
1077 else if (ptr > node->ptr)
1078 n = n->rb_right;
1079 else {
1080 /*
1081 * take an implicit weak reference
1082 * to ensure node stays alive until
1083 * call to binder_put_node()
1084 */
1085 binder_inc_node_tmpref_ilocked(node);
1086 return node;
1087 }
1088 }
1089 return NULL;
1090}
1091
1092static struct binder_node *binder_get_node(struct binder_proc *proc,
1093 binder_uintptr_t ptr)
1094{
1095 struct binder_node *node;
1096
1097 binder_inner_proc_lock(proc);
1098 node = binder_get_node_ilocked(proc, ptr);
1099 binder_inner_proc_unlock(proc);
1100 return node;
1101}
1102
1103static struct binder_node *binder_init_node_ilocked(
1104 struct binder_proc *proc,
1105 struct binder_node *new_node,
1106 struct flat_binder_object *fp)
1107{
1108 struct rb_node **p = &proc->nodes.rb_node;
1109 struct rb_node *parent = NULL;
1110 struct binder_node *node;
1111 binder_uintptr_t ptr = fp ? fp->binder : 0;
1112 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1113 __u32 flags = fp ? fp->flags : 0;
1114
1115 assert_spin_locked(&proc->inner_lock);
1116
1117 while (*p) {
1118
1119 parent = *p;
1120 node = rb_entry(parent, struct binder_node, rb_node);
1121
1122 if (ptr < node->ptr)
1123 p = &(*p)->rb_left;
1124 else if (ptr > node->ptr)
1125 p = &(*p)->rb_right;
1126 else {
1127 /*
1128 * A matching node is already in
1129 * the rb tree. Abandon the init
1130 * and return it.
1131 */
1132 binder_inc_node_tmpref_ilocked(node);
1133 return node;
1134 }
1135 }
1136 node = new_node;
1137 binder_stats_created(BINDER_STAT_NODE);
1138 node->tmp_refs++;
1139 rb_link_node(&node->rb_node, parent, p);
1140 rb_insert_color(&node->rb_node, &proc->nodes);
1141 node->debug_id = atomic_inc_return(&binder_last_id);
1142 node->proc = proc;
1143 node->ptr = ptr;
1144 node->cookie = cookie;
1145 node->work.type = BINDER_WORK_NODE;
1146 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1147 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1148 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1149 spin_lock_init(&node->lock);
1150 INIT_LIST_HEAD(&node->work.entry);
1151 INIT_LIST_HEAD(&node->async_todo);
1152 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1153 "%d:%d node %d u%016llx c%016llx created\n",
1154 proc->pid, current->pid, node->debug_id,
1155 (u64)node->ptr, (u64)node->cookie);
1156
1157 return node;
1158}
1159
1160static struct binder_node *binder_new_node(struct binder_proc *proc,
1161 struct flat_binder_object *fp)
1162{
1163 struct binder_node *node;
1164 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1165
1166 if (!new_node)
1167 return NULL;
1168 binder_inner_proc_lock(proc);
1169 node = binder_init_node_ilocked(proc, new_node, fp);
1170 binder_inner_proc_unlock(proc);
1171 if (node != new_node)
1172 /*
1173 * The node was already added by another thread
1174 */
1175 kfree(new_node);
1176
1177 return node;
1178}
1179
1180static void binder_free_node(struct binder_node *node)
1181{
1182 kfree(node);
1183 binder_stats_deleted(BINDER_STAT_NODE);
1184}
1185
1186static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1187 int internal,
1188 struct list_head *target_list)
1189{
1190 struct binder_proc *proc = node->proc;
1191
1192 assert_spin_locked(&node->lock);
1193 if (proc)
1194 assert_spin_locked(&proc->inner_lock);
1195 if (strong) {
1196 if (internal) {
1197 if (target_list == NULL &&
1198 node->internal_strong_refs == 0 &&
1199 !(node->proc &&
1200 node == node->proc->context->binder_context_mgr_node &&
1201 node->has_strong_ref)) {
1202 pr_err("invalid inc strong node for %d\n",
1203 node->debug_id);
1204 return -EINVAL;
1205 }
1206 node->internal_strong_refs++;
1207 } else
1208 node->local_strong_refs++;
1209 if (!node->has_strong_ref && target_list) {
1210 struct binder_thread *thread = container_of(target_list,
1211 struct binder_thread, todo);
1212 binder_dequeue_work_ilocked(&node->work);
1213 BUG_ON(&thread->todo != target_list);
1214 binder_enqueue_deferred_thread_work_ilocked(thread,
1215 &node->work);
1216 }
1217 } else {
1218 if (!internal)
1219 node->local_weak_refs++;
1220 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1221 if (target_list == NULL) {
1222 pr_err("invalid inc weak node for %d\n",
1223 node->debug_id);
1224 return -EINVAL;
1225 }
1226 /*
1227 * See comment above
1228 */
1229 binder_enqueue_work_ilocked(&node->work, target_list);
1230 }
1231 }
1232 return 0;
1233}
1234
1235static int binder_inc_node(struct binder_node *node, int strong, int internal,
1236 struct list_head *target_list)
1237{
1238 int ret;
1239
1240 binder_node_inner_lock(node);
1241 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1242 binder_node_inner_unlock(node);
1243
1244 return ret;
1245}
1246
1247static bool binder_dec_node_nilocked(struct binder_node *node,
1248 int strong, int internal)
1249{
1250 struct binder_proc *proc = node->proc;
1251
1252 assert_spin_locked(&node->lock);
1253 if (proc)
1254 assert_spin_locked(&proc->inner_lock);
1255 if (strong) {
1256 if (internal)
1257 node->internal_strong_refs--;
1258 else
1259 node->local_strong_refs--;
1260 if (node->local_strong_refs || node->internal_strong_refs)
1261 return false;
1262 } else {
1263 if (!internal)
1264 node->local_weak_refs--;
1265 if (node->local_weak_refs || node->tmp_refs ||
1266 !hlist_empty(&node->refs))
1267 return false;
1268 }
1269
1270 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1271 if (list_empty(&node->work.entry)) {
1272 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1273 binder_wakeup_proc_ilocked(proc);
1274 }
1275 } else {
1276 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1277 !node->local_weak_refs && !node->tmp_refs) {
1278 if (proc) {
1279 binder_dequeue_work_ilocked(&node->work);
1280 rb_erase(&node->rb_node, &proc->nodes);
1281 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1282 "refless node %d deleted\n",
1283 node->debug_id);
1284 } else {
1285 BUG_ON(!list_empty(&node->work.entry));
1286 spin_lock(&binder_dead_nodes_lock);
1287 /*
1288 * tmp_refs could have changed so
1289 * check it again
1290 */
1291 if (node->tmp_refs) {
1292 spin_unlock(&binder_dead_nodes_lock);
1293 return false;
1294 }
1295 hlist_del(&node->dead_node);
1296 spin_unlock(&binder_dead_nodes_lock);
1297 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1298 "dead node %d deleted\n",
1299 node->debug_id);
1300 }
1301 return true;
1302 }
1303 }
1304 return false;
1305}
1306
1307static void binder_dec_node(struct binder_node *node, int strong, int internal)
1308{
1309 bool free_node;
1310
1311 binder_node_inner_lock(node);
1312 free_node = binder_dec_node_nilocked(node, strong, internal);
1313 binder_node_inner_unlock(node);
1314 if (free_node)
1315 binder_free_node(node);
1316}
1317
1318static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1319{
1320 /*
1321 * No call to binder_inc_node() is needed since we
1322 * don't need to inform userspace of any changes to
1323 * tmp_refs
1324 */
1325 node->tmp_refs++;
1326}
1327
1328/**
1329 * binder_inc_node_tmpref() - take a temporary reference on node
1330 * @node: node to reference
1331 *
1332 * Take reference on node to prevent the node from being freed
1333 * while referenced only by a local variable. The inner lock is
1334 * needed to serialize with the node work on the queue (which
1335 * isn't needed after the node is dead). If the node is dead
1336 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1337 * node->tmp_refs against dead-node-only cases where the node
1338 * lock cannot be acquired (eg traversing the dead node list to
1339 * print nodes)
1340 */
1341static void binder_inc_node_tmpref(struct binder_node *node)
1342{
1343 binder_node_lock(node);
1344 if (node->proc)
1345 binder_inner_proc_lock(node->proc);
1346 else
1347 spin_lock(&binder_dead_nodes_lock);
1348 binder_inc_node_tmpref_ilocked(node);
1349 if (node->proc)
1350 binder_inner_proc_unlock(node->proc);
1351 else
1352 spin_unlock(&binder_dead_nodes_lock);
1353 binder_node_unlock(node);
1354}
1355
1356/**
1357 * binder_dec_node_tmpref() - remove a temporary reference on node
1358 * @node: node to reference
1359 *
1360 * Release temporary reference on node taken via binder_inc_node_tmpref()
1361 */
1362static void binder_dec_node_tmpref(struct binder_node *node)
1363{
1364 bool free_node;
1365
1366 binder_node_inner_lock(node);
1367 if (!node->proc)
1368 spin_lock(&binder_dead_nodes_lock);
1369 else
1370 __acquire(&binder_dead_nodes_lock);
1371 node->tmp_refs--;
1372 BUG_ON(node->tmp_refs < 0);
1373 if (!node->proc)
1374 spin_unlock(&binder_dead_nodes_lock);
1375 else
1376 __release(&binder_dead_nodes_lock);
1377 /*
1378 * Call binder_dec_node() to check if all refcounts are 0
1379 * and cleanup is needed. Calling with strong=0 and internal=1
1380 * causes no actual reference to be released in binder_dec_node().
1381 * If that changes, a change is needed here too.
1382 */
1383 free_node = binder_dec_node_nilocked(node, 0, 1);
1384 binder_node_inner_unlock(node);
1385 if (free_node)
1386 binder_free_node(node);
1387}
1388
1389static void binder_put_node(struct binder_node *node)
1390{
1391 binder_dec_node_tmpref(node);
1392}
1393
1394static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1395 u32 desc, bool need_strong_ref)
1396{
1397 struct rb_node *n = proc->refs_by_desc.rb_node;
1398 struct binder_ref *ref;
1399
1400 while (n) {
1401 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1402
1403 if (desc < ref->data.desc) {
1404 n = n->rb_left;
1405 } else if (desc > ref->data.desc) {
1406 n = n->rb_right;
1407 } else if (need_strong_ref && !ref->data.strong) {
1408 binder_user_error("tried to use weak ref as strong ref\n");
1409 return NULL;
1410 } else {
1411 return ref;
1412 }
1413 }
1414 return NULL;
1415}
1416
1417/**
1418 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1419 * @proc: binder_proc that owns the ref
1420 * @node: binder_node of target
1421 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1422 *
1423 * Look up the ref for the given node and return it if it exists
1424 *
1425 * If it doesn't exist and the caller provides a newly allocated
1426 * ref, initialize the fields of the newly allocated ref and insert
1427 * into the given proc rb_trees and node refs list.
1428 *
1429 * Return: the ref for node. It is possible that another thread
1430 * allocated/initialized the ref first in which case the
1431 * returned ref would be different than the passed-in
1432 * new_ref. new_ref must be kfree'd by the caller in
1433 * this case.
1434 */
1435static struct binder_ref *binder_get_ref_for_node_olocked(
1436 struct binder_proc *proc,
1437 struct binder_node *node,
1438 struct binder_ref *new_ref)
1439{
1440 struct binder_context *context = proc->context;
1441 struct rb_node **p = &proc->refs_by_node.rb_node;
1442 struct rb_node *parent = NULL;
1443 struct binder_ref *ref;
1444 struct rb_node *n;
1445
1446 while (*p) {
1447 parent = *p;
1448 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1449
1450 if (node < ref->node)
1451 p = &(*p)->rb_left;
1452 else if (node > ref->node)
1453 p = &(*p)->rb_right;
1454 else
1455 return ref;
1456 }
1457 if (!new_ref)
1458 return NULL;
1459
1460 binder_stats_created(BINDER_STAT_REF);
1461 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1462 new_ref->proc = proc;
1463 new_ref->node = node;
1464 rb_link_node(&new_ref->rb_node_node, parent, p);
1465 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1466
1467 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1468 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1469 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1470 if (ref->data.desc > new_ref->data.desc)
1471 break;
1472 new_ref->data.desc = ref->data.desc + 1;
1473 }
1474
1475 p = &proc->refs_by_desc.rb_node;
1476 while (*p) {
1477 parent = *p;
1478 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1479
1480 if (new_ref->data.desc < ref->data.desc)
1481 p = &(*p)->rb_left;
1482 else if (new_ref->data.desc > ref->data.desc)
1483 p = &(*p)->rb_right;
1484 else
1485 BUG();
1486 }
1487 rb_link_node(&new_ref->rb_node_desc, parent, p);
1488 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1489
1490 binder_node_lock(node);
1491 hlist_add_head(&new_ref->node_entry, &node->refs);
1492
1493 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1494 "%d new ref %d desc %d for node %d\n",
1495 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1496 node->debug_id);
1497 binder_node_unlock(node);
1498 return new_ref;
1499}
1500
1501static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1502{
1503 bool delete_node = false;
1504
1505 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1506 "%d delete ref %d desc %d for node %d\n",
1507 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1508 ref->node->debug_id);
1509
1510 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1511 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1512
1513 binder_node_inner_lock(ref->node);
1514 if (ref->data.strong)
1515 binder_dec_node_nilocked(ref->node, 1, 1);
1516
1517 hlist_del(&ref->node_entry);
1518 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1519 binder_node_inner_unlock(ref->node);
1520 /*
1521 * Clear ref->node unless we want the caller to free the node
1522 */
1523 if (!delete_node) {
1524 /*
1525 * The caller uses ref->node to determine
1526 * whether the node needs to be freed. Clear
1527 * it since the node is still alive.
1528 */
1529 ref->node = NULL;
1530 }
1531
1532 if (ref->death) {
1533 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1534 "%d delete ref %d desc %d has death notification\n",
1535 ref->proc->pid, ref->data.debug_id,
1536 ref->data.desc);
1537 binder_dequeue_work(ref->proc, &ref->death->work);
1538 binder_stats_deleted(BINDER_STAT_DEATH);
1539 }
1540 binder_stats_deleted(BINDER_STAT_REF);
1541}
1542
1543/**
1544 * binder_inc_ref_olocked() - increment the ref for given handle
1545 * @ref: ref to be incremented
1546 * @strong: if true, strong increment, else weak
1547 * @target_list: list to queue node work on
1548 *
1549 * Increment the ref. @ref->proc->outer_lock must be held on entry
1550 *
1551 * Return: 0, if successful, else errno
1552 */
1553static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1554 struct list_head *target_list)
1555{
1556 int ret;
1557
1558 if (strong) {
1559 if (ref->data.strong == 0) {
1560 ret = binder_inc_node(ref->node, 1, 1, target_list);
1561 if (ret)
1562 return ret;
1563 }
1564 ref->data.strong++;
1565 } else {
1566 if (ref->data.weak == 0) {
1567 ret = binder_inc_node(ref->node, 0, 1, target_list);
1568 if (ret)
1569 return ret;
1570 }
1571 ref->data.weak++;
1572 }
1573 return 0;
1574}
1575
1576/**
1577 * binder_dec_ref() - dec the ref for given handle
1578 * @ref: ref to be decremented
1579 * @strong: if true, strong decrement, else weak
1580 *
1581 * Decrement the ref.
1582 *
1583 * Return: true if ref is cleaned up and ready to be freed
1584 */
1585static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1586{
1587 if (strong) {
1588 if (ref->data.strong == 0) {
1589 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1590 ref->proc->pid, ref->data.debug_id,
1591 ref->data.desc, ref->data.strong,
1592 ref->data.weak);
1593 return false;
1594 }
1595 ref->data.strong--;
1596 if (ref->data.strong == 0)
1597 binder_dec_node(ref->node, strong, 1);
1598 } else {
1599 if (ref->data.weak == 0) {
1600 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1601 ref->proc->pid, ref->data.debug_id,
1602 ref->data.desc, ref->data.strong,
1603 ref->data.weak);
1604 return false;
1605 }
1606 ref->data.weak--;
1607 }
1608 if (ref->data.strong == 0 && ref->data.weak == 0) {
1609 binder_cleanup_ref_olocked(ref);
1610 return true;
1611 }
1612 return false;
1613}
1614
1615/**
1616 * binder_get_node_from_ref() - get the node from the given proc/desc
1617 * @proc: proc containing the ref
1618 * @desc: the handle associated with the ref
1619 * @need_strong_ref: if true, only return node if ref is strong
1620 * @rdata: the id/refcount data for the ref
1621 *
1622 * Given a proc and ref handle, return the associated binder_node
1623 *
1624 * Return: a binder_node or NULL if not found or not strong when strong required
1625 */
1626static struct binder_node *binder_get_node_from_ref(
1627 struct binder_proc *proc,
1628 u32 desc, bool need_strong_ref,
1629 struct binder_ref_data *rdata)
1630{
1631 struct binder_node *node;
1632 struct binder_ref *ref;
1633
1634 binder_proc_lock(proc);
1635 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1636 if (!ref)
1637 goto err_no_ref;
1638 node = ref->node;
1639 /*
1640 * Take an implicit reference on the node to ensure
1641 * it stays alive until the call to binder_put_node()
1642 */
1643 binder_inc_node_tmpref(node);
1644 if (rdata)
1645 *rdata = ref->data;
1646 binder_proc_unlock(proc);
1647
1648 return node;
1649
1650err_no_ref:
1651 binder_proc_unlock(proc);
1652 return NULL;
1653}
1654
1655/**
1656 * binder_free_ref() - free the binder_ref
1657 * @ref: ref to free
1658 *
1659 * Free the binder_ref. Free the binder_node indicated by ref->node
1660 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1661 */
1662static void binder_free_ref(struct binder_ref *ref)
1663{
1664 if (ref->node)
1665 binder_free_node(ref->node);
1666 kfree(ref->death);
1667 kfree(ref);
1668}
1669
1670/**
1671 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1672 * @proc: proc containing the ref
1673 * @desc: the handle associated with the ref
1674 * @increment: true=inc reference, false=dec reference
1675 * @strong: true=strong reference, false=weak reference
1676 * @rdata: the id/refcount data for the ref
1677 *
1678 * Given a proc and ref handle, increment or decrement the ref
1679 * according to "increment" arg.
1680 *
1681 * Return: 0 if successful, else errno
1682 */
1683static int binder_update_ref_for_handle(struct binder_proc *proc,
1684 uint32_t desc, bool increment, bool strong,
1685 struct binder_ref_data *rdata)
1686{
1687 int ret = 0;
1688 struct binder_ref *ref;
1689 bool delete_ref = false;
1690
1691 binder_proc_lock(proc);
1692 ref = binder_get_ref_olocked(proc, desc, strong);
1693 if (!ref) {
1694 ret = -EINVAL;
1695 goto err_no_ref;
1696 }
1697 if (increment)
1698 ret = binder_inc_ref_olocked(ref, strong, NULL);
1699 else
1700 delete_ref = binder_dec_ref_olocked(ref, strong);
1701
1702 if (rdata)
1703 *rdata = ref->data;
1704 binder_proc_unlock(proc);
1705
1706 if (delete_ref)
1707 binder_free_ref(ref);
1708 return ret;
1709
1710err_no_ref:
1711 binder_proc_unlock(proc);
1712 return ret;
1713}
1714
1715/**
1716 * binder_dec_ref_for_handle() - dec the ref for given handle
1717 * @proc: proc containing the ref
1718 * @desc: the handle associated with the ref
1719 * @strong: true=strong reference, false=weak reference
1720 * @rdata: the id/refcount data for the ref
1721 *
1722 * Just calls binder_update_ref_for_handle() to decrement the ref.
1723 *
1724 * Return: 0 if successful, else errno
1725 */
1726static int binder_dec_ref_for_handle(struct binder_proc *proc,
1727 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1728{
1729 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1730}
1731
1732
1733/**
1734 * binder_inc_ref_for_node() - increment the ref for given proc/node
1735 * @proc: proc containing the ref
1736 * @node: target node
1737 * @strong: true=strong reference, false=weak reference
1738 * @target_list: worklist to use if node is incremented
1739 * @rdata: the id/refcount data for the ref
1740 *
1741 * Given a proc and node, increment the ref. Create the ref if it
1742 * doesn't already exist
1743 *
1744 * Return: 0 if successful, else errno
1745 */
1746static int binder_inc_ref_for_node(struct binder_proc *proc,
1747 struct binder_node *node,
1748 bool strong,
1749 struct list_head *target_list,
1750 struct binder_ref_data *rdata)
1751{
1752 struct binder_ref *ref;
1753 struct binder_ref *new_ref = NULL;
1754 int ret = 0;
1755
1756 binder_proc_lock(proc);
1757 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1758 if (!ref) {
1759 binder_proc_unlock(proc);
1760 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1761 if (!new_ref)
1762 return -ENOMEM;
1763 binder_proc_lock(proc);
1764 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1765 }
1766 ret = binder_inc_ref_olocked(ref, strong, target_list);
1767 *rdata = ref->data;
1768 binder_proc_unlock(proc);
1769 if (new_ref && ref != new_ref)
1770 /*
1771 * Another thread created the ref first so
1772 * free the one we allocated
1773 */
1774 kfree(new_ref);
1775 return ret;
1776}
1777
1778static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1779 struct binder_transaction *t)
1780{
1781 BUG_ON(!target_thread);
1782 assert_spin_locked(&target_thread->proc->inner_lock);
1783 BUG_ON(target_thread->transaction_stack != t);
1784 BUG_ON(target_thread->transaction_stack->from != target_thread);
1785 target_thread->transaction_stack =
1786 target_thread->transaction_stack->from_parent;
1787 t->from = NULL;
1788}
1789
1790/**
1791 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1792 * @thread: thread to decrement
1793 *
1794 * A thread needs to be kept alive while being used to create or
1795 * handle a transaction. binder_get_txn_from() is used to safely
1796 * extract t->from from a binder_transaction and keep the thread
1797 * indicated by t->from from being freed. When done with that
1798 * binder_thread, this function is called to decrement the
1799 * tmp_ref and free if appropriate (thread has been released
1800 * and no transaction being processed by the driver)
1801 */
1802static void binder_thread_dec_tmpref(struct binder_thread *thread)
1803{
1804 /*
1805 * atomic is used to protect the counter value while
1806 * it cannot reach zero or thread->is_dead is false
1807 */
1808 binder_inner_proc_lock(thread->proc);
1809 atomic_dec(&thread->tmp_ref);
1810 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1811 binder_inner_proc_unlock(thread->proc);
1812 binder_free_thread(thread);
1813 return;
1814 }
1815 binder_inner_proc_unlock(thread->proc);
1816}
1817
1818/**
1819 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1820 * @proc: proc to decrement
1821 *
1822 * A binder_proc needs to be kept alive while being used to create or
1823 * handle a transaction. proc->tmp_ref is incremented when
1824 * creating a new transaction or the binder_proc is currently in-use
1825 * by threads that are being released. When done with the binder_proc,
1826 * this function is called to decrement the counter and free the
1827 * proc if appropriate (proc has been released, all threads have
1828 * been released and not currenly in-use to process a transaction).
1829 */
1830static void binder_proc_dec_tmpref(struct binder_proc *proc)
1831{
1832 binder_inner_proc_lock(proc);
1833 proc->tmp_ref--;
1834 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1835 !proc->tmp_ref) {
1836 binder_inner_proc_unlock(proc);
1837 binder_free_proc(proc);
1838 return;
1839 }
1840 binder_inner_proc_unlock(proc);
1841}
1842
1843/**
1844 * binder_get_txn_from() - safely extract the "from" thread in transaction
1845 * @t: binder transaction for t->from
1846 *
1847 * Atomically return the "from" thread and increment the tmp_ref
1848 * count for the thread to ensure it stays alive until
1849 * binder_thread_dec_tmpref() is called.
1850 *
1851 * Return: the value of t->from
1852 */
1853static struct binder_thread *binder_get_txn_from(
1854 struct binder_transaction *t)
1855{
1856 struct binder_thread *from;
1857
1858 spin_lock(&t->lock);
1859 from = t->from;
1860 if (from)
1861 atomic_inc(&from->tmp_ref);
1862 spin_unlock(&t->lock);
1863 return from;
1864}
1865
1866/**
1867 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1868 * @t: binder transaction for t->from
1869 *
1870 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1871 * to guarantee that the thread cannot be released while operating on it.
1872 * The caller must call binder_inner_proc_unlock() to release the inner lock
1873 * as well as call binder_dec_thread_txn() to release the reference.
1874 *
1875 * Return: the value of t->from
1876 */
1877static struct binder_thread *binder_get_txn_from_and_acq_inner(
1878 struct binder_transaction *t)
1879 __acquires(&t->from->proc->inner_lock)
1880{
1881 struct binder_thread *from;
1882
1883 from = binder_get_txn_from(t);
1884 if (!from) {
1885 __acquire(&from->proc->inner_lock);
1886 return NULL;
1887 }
1888 binder_inner_proc_lock(from->proc);
1889 if (t->from) {
1890 BUG_ON(from != t->from);
1891 return from;
1892 }
1893 binder_inner_proc_unlock(from->proc);
1894 __acquire(&from->proc->inner_lock);
1895 binder_thread_dec_tmpref(from);
1896 return NULL;
1897}
1898
1899/**
1900 * binder_free_txn_fixups() - free unprocessed fd fixups
1901 * @t: binder transaction for t->from
1902 *
1903 * If the transaction is being torn down prior to being
1904 * processed by the target process, free all of the
1905 * fd fixups and fput the file structs. It is safe to
1906 * call this function after the fixups have been
1907 * processed -- in that case, the list will be empty.
1908 */
1909static void binder_free_txn_fixups(struct binder_transaction *t)
1910{
1911 struct binder_txn_fd_fixup *fixup, *tmp;
1912
1913 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1914 fput(fixup->file);
1915 list_del(&fixup->fixup_entry);
1916 kfree(fixup);
1917 }
1918}
1919
1920static void binder_free_transaction(struct binder_transaction *t)
1921{
1922 struct binder_proc *target_proc = t->to_proc;
1923
1924 if (target_proc) {
1925 binder_inner_proc_lock(target_proc);
1926 if (t->buffer)
1927 t->buffer->transaction = NULL;
1928 binder_inner_proc_unlock(target_proc);
1929 }
1930 /*
1931 * If the transaction has no target_proc, then
1932 * t->buffer->transaction has already been cleared.
1933 */
1934 binder_free_txn_fixups(t);
1935 kfree(t);
1936 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1937}
1938
1939static void binder_send_failed_reply(struct binder_transaction *t,
1940 uint32_t error_code)
1941{
1942 struct binder_thread *target_thread;
1943 struct binder_transaction *next;
1944
1945 BUG_ON(t->flags & TF_ONE_WAY);
1946 while (1) {
1947 target_thread = binder_get_txn_from_and_acq_inner(t);
1948 if (target_thread) {
1949 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1950 "send failed reply for transaction %d to %d:%d\n",
1951 t->debug_id,
1952 target_thread->proc->pid,
1953 target_thread->pid);
1954
1955 binder_pop_transaction_ilocked(target_thread, t);
1956 if (target_thread->reply_error.cmd == BR_OK) {
1957 target_thread->reply_error.cmd = error_code;
1958 binder_enqueue_thread_work_ilocked(
1959 target_thread,
1960 &target_thread->reply_error.work);
1961 wake_up_interruptible(&target_thread->wait);
1962 } else {
1963 /*
1964 * Cannot get here for normal operation, but
1965 * we can if multiple synchronous transactions
1966 * are sent without blocking for responses.
1967 * Just ignore the 2nd error in this case.
1968 */
1969 pr_warn("Unexpected reply error: %u\n",
1970 target_thread->reply_error.cmd);
1971 }
1972 binder_inner_proc_unlock(target_thread->proc);
1973 binder_thread_dec_tmpref(target_thread);
1974 binder_free_transaction(t);
1975 return;
1976 } else {
1977 __release(&target_thread->proc->inner_lock);
1978 }
1979 next = t->from_parent;
1980
1981 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1982 "send failed reply for transaction %d, target dead\n",
1983 t->debug_id);
1984
1985 binder_free_transaction(t);
1986 if (next == NULL) {
1987 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1988 "reply failed, no target thread at root\n");
1989 return;
1990 }
1991 t = next;
1992 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1993 "reply failed, no target thread -- retry %d\n",
1994 t->debug_id);
1995 }
1996}
1997
1998/**
1999 * binder_cleanup_transaction() - cleans up undelivered transaction
2000 * @t: transaction that needs to be cleaned up
2001 * @reason: reason the transaction wasn't delivered
2002 * @error_code: error to return to caller (if synchronous call)
2003 */
2004static void binder_cleanup_transaction(struct binder_transaction *t,
2005 const char *reason,
2006 uint32_t error_code)
2007{
2008 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2009 binder_send_failed_reply(t, error_code);
2010 } else {
2011 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2012 "undelivered transaction %d, %s\n",
2013 t->debug_id, reason);
2014 binder_free_transaction(t);
2015 }
2016}
2017
2018/**
2019 * binder_get_object() - gets object and checks for valid metadata
2020 * @proc: binder_proc owning the buffer
2021 * @buffer: binder_buffer that we're parsing.
2022 * @offset: offset in the @buffer at which to validate an object.
2023 * @object: struct binder_object to read into
2024 *
2025 * Return: If there's a valid metadata object at @offset in @buffer, the
2026 * size of that object. Otherwise, it returns zero. The object
2027 * is read into the struct binder_object pointed to by @object.
2028 */
2029static size_t binder_get_object(struct binder_proc *proc,
2030 struct binder_buffer *buffer,
2031 unsigned long offset,
2032 struct binder_object *object)
2033{
2034 size_t read_size;
2035 struct binder_object_header *hdr;
2036 size_t object_size = 0;
2037
2038 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2039 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2040 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2041 offset, read_size))
2042 return 0;
2043
2044 /* Ok, now see if we read a complete object. */
2045 hdr = &object->hdr;
2046 switch (hdr->type) {
2047 case BINDER_TYPE_BINDER:
2048 case BINDER_TYPE_WEAK_BINDER:
2049 case BINDER_TYPE_HANDLE:
2050 case BINDER_TYPE_WEAK_HANDLE:
2051 object_size = sizeof(struct flat_binder_object);
2052 break;
2053 case BINDER_TYPE_FD:
2054 object_size = sizeof(struct binder_fd_object);
2055 break;
2056 case BINDER_TYPE_PTR:
2057 object_size = sizeof(struct binder_buffer_object);
2058 break;
2059 case BINDER_TYPE_FDA:
2060 object_size = sizeof(struct binder_fd_array_object);
2061 break;
2062 default:
2063 return 0;
2064 }
2065 if (offset <= buffer->data_size - object_size &&
2066 buffer->data_size >= object_size)
2067 return object_size;
2068 else
2069 return 0;
2070}
2071
2072/**
2073 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2074 * @proc: binder_proc owning the buffer
2075 * @b: binder_buffer containing the object
2076 * @object: struct binder_object to read into
2077 * @index: index in offset array at which the binder_buffer_object is
2078 * located
2079 * @start_offset: points to the start of the offset array
2080 * @object_offsetp: offset of @object read from @b
2081 * @num_valid: the number of valid offsets in the offset array
2082 *
2083 * Return: If @index is within the valid range of the offset array
2084 * described by @start and @num_valid, and if there's a valid
2085 * binder_buffer_object at the offset found in index @index
2086 * of the offset array, that object is returned. Otherwise,
2087 * %NULL is returned.
2088 * Note that the offset found in index @index itself is not
2089 * verified; this function assumes that @num_valid elements
2090 * from @start were previously verified to have valid offsets.
2091 * If @object_offsetp is non-NULL, then the offset within
2092 * @b is written to it.
2093 */
2094static struct binder_buffer_object *binder_validate_ptr(
2095 struct binder_proc *proc,
2096 struct binder_buffer *b,
2097 struct binder_object *object,
2098 binder_size_t index,
2099 binder_size_t start_offset,
2100 binder_size_t *object_offsetp,
2101 binder_size_t num_valid)
2102{
2103 size_t object_size;
2104 binder_size_t object_offset;
2105 unsigned long buffer_offset;
2106
2107 if (index >= num_valid)
2108 return NULL;
2109
2110 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2111 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2112 b, buffer_offset,
2113 sizeof(object_offset)))
2114 return NULL;
2115 object_size = binder_get_object(proc, b, object_offset, object);
2116 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2117 return NULL;
2118 if (object_offsetp)
2119 *object_offsetp = object_offset;
2120
2121 return &object->bbo;
2122}
2123
2124/**
2125 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2126 * @proc: binder_proc owning the buffer
2127 * @b: transaction buffer
2128 * @objects_start_offset: offset to start of objects buffer
2129 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2130 * @fixup_offset: start offset in @buffer to fix up
2131 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2132 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2133 *
2134 * Return: %true if a fixup in buffer @buffer at offset @offset is
2135 * allowed.
2136 *
2137 * For safety reasons, we only allow fixups inside a buffer to happen
2138 * at increasing offsets; additionally, we only allow fixup on the last
2139 * buffer object that was verified, or one of its parents.
2140 *
2141 * Example of what is allowed:
2142 *
2143 * A
2144 * B (parent = A, offset = 0)
2145 * C (parent = A, offset = 16)
2146 * D (parent = C, offset = 0)
2147 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2148 *
2149 * Examples of what is not allowed:
2150 *
2151 * Decreasing offsets within the same parent:
2152 * A
2153 * C (parent = A, offset = 16)
2154 * B (parent = A, offset = 0) // decreasing offset within A
2155 *
2156 * Referring to a parent that wasn't the last object or any of its parents:
2157 * A
2158 * B (parent = A, offset = 0)
2159 * C (parent = A, offset = 0)
2160 * C (parent = A, offset = 16)
2161 * D (parent = B, offset = 0) // B is not A or any of A's parents
2162 */
2163static bool binder_validate_fixup(struct binder_proc *proc,
2164 struct binder_buffer *b,
2165 binder_size_t objects_start_offset,
2166 binder_size_t buffer_obj_offset,
2167 binder_size_t fixup_offset,
2168 binder_size_t last_obj_offset,
2169 binder_size_t last_min_offset)
2170{
2171 if (!last_obj_offset) {
2172 /* Nothing to fix up in */
2173 return false;
2174 }
2175
2176 while (last_obj_offset != buffer_obj_offset) {
2177 unsigned long buffer_offset;
2178 struct binder_object last_object;
2179 struct binder_buffer_object *last_bbo;
2180 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2181 &last_object);
2182 if (object_size != sizeof(*last_bbo))
2183 return false;
2184
2185 last_bbo = &last_object.bbo;
2186 /*
2187 * Safe to retrieve the parent of last_obj, since it
2188 * was already previously verified by the driver.
2189 */
2190 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2191 return false;
2192 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2193 buffer_offset = objects_start_offset +
2194 sizeof(binder_size_t) * last_bbo->parent;
2195 if (binder_alloc_copy_from_buffer(&proc->alloc,
2196 &last_obj_offset,
2197 b, buffer_offset,
2198 sizeof(last_obj_offset)))
2199 return false;
2200 }
2201 return (fixup_offset >= last_min_offset);
2202}
2203
2204/**
2205 * struct binder_task_work_cb - for deferred close
2206 *
2207 * @twork: callback_head for task work
2208 * @fd: fd to close
2209 *
2210 * Structure to pass task work to be handled after
2211 * returning from binder_ioctl() via task_work_add().
2212 */
2213struct binder_task_work_cb {
2214 struct callback_head twork;
2215 struct file *file;
2216};
2217
2218/**
2219 * binder_do_fd_close() - close list of file descriptors
2220 * @twork: callback head for task work
2221 *
2222 * It is not safe to call ksys_close() during the binder_ioctl()
2223 * function if there is a chance that binder's own file descriptor
2224 * might be closed. This is to meet the requirements for using
2225 * fdget() (see comments for __fget_light()). Therefore use
2226 * task_work_add() to schedule the close operation once we have
2227 * returned from binder_ioctl(). This function is a callback
2228 * for that mechanism and does the actual ksys_close() on the
2229 * given file descriptor.
2230 */
2231static void binder_do_fd_close(struct callback_head *twork)
2232{
2233 struct binder_task_work_cb *twcb = container_of(twork,
2234 struct binder_task_work_cb, twork);
2235
2236 fput(twcb->file);
2237 kfree(twcb);
2238}
2239
2240/**
2241 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2242 * @fd: file-descriptor to close
2243 *
2244 * See comments in binder_do_fd_close(). This function is used to schedule
2245 * a file-descriptor to be closed after returning from binder_ioctl().
2246 */
2247static void binder_deferred_fd_close(int fd)
2248{
2249 struct binder_task_work_cb *twcb;
2250
2251 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2252 if (!twcb)
2253 return;
2254 init_task_work(&twcb->twork, binder_do_fd_close);
2255 __close_fd_get_file(fd, &twcb->file);
2256 if (twcb->file)
2257 task_work_add(current, &twcb->twork, true);
2258 else
2259 kfree(twcb);
2260}
2261
2262static void binder_transaction_buffer_release(struct binder_proc *proc,
2263 struct binder_buffer *buffer,
2264 binder_size_t failed_at,
2265 bool is_failure)
2266{
2267 int debug_id = buffer->debug_id;
2268 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2269
2270 binder_debug(BINDER_DEBUG_TRANSACTION,
2271 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2272 proc->pid, buffer->debug_id,
2273 buffer->data_size, buffer->offsets_size,
2274 (unsigned long long)failed_at);
2275
2276 if (buffer->target_node)
2277 binder_dec_node(buffer->target_node, 1, 0);
2278
2279 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2280 off_end_offset = is_failure ? failed_at :
2281 off_start_offset + buffer->offsets_size;
2282 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2283 buffer_offset += sizeof(binder_size_t)) {
2284 struct binder_object_header *hdr;
2285 size_t object_size = 0;
2286 struct binder_object object;
2287 binder_size_t object_offset;
2288
2289 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2290 buffer, buffer_offset,
2291 sizeof(object_offset)))
2292 object_size = binder_get_object(proc, buffer,
2293 object_offset, &object);
2294 if (object_size == 0) {
2295 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2296 debug_id, (u64)object_offset, buffer->data_size);
2297 continue;
2298 }
2299 hdr = &object.hdr;
2300 switch (hdr->type) {
2301 case BINDER_TYPE_BINDER:
2302 case BINDER_TYPE_WEAK_BINDER: {
2303 struct flat_binder_object *fp;
2304 struct binder_node *node;
2305
2306 fp = to_flat_binder_object(hdr);
2307 node = binder_get_node(proc, fp->binder);
2308 if (node == NULL) {
2309 pr_err("transaction release %d bad node %016llx\n",
2310 debug_id, (u64)fp->binder);
2311 break;
2312 }
2313 binder_debug(BINDER_DEBUG_TRANSACTION,
2314 " node %d u%016llx\n",
2315 node->debug_id, (u64)node->ptr);
2316 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2317 0);
2318 binder_put_node(node);
2319 } break;
2320 case BINDER_TYPE_HANDLE:
2321 case BINDER_TYPE_WEAK_HANDLE: {
2322 struct flat_binder_object *fp;
2323 struct binder_ref_data rdata;
2324 int ret;
2325
2326 fp = to_flat_binder_object(hdr);
2327 ret = binder_dec_ref_for_handle(proc, fp->handle,
2328 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2329
2330 if (ret) {
2331 pr_err("transaction release %d bad handle %d, ret = %d\n",
2332 debug_id, fp->handle, ret);
2333 break;
2334 }
2335 binder_debug(BINDER_DEBUG_TRANSACTION,
2336 " ref %d desc %d\n",
2337 rdata.debug_id, rdata.desc);
2338 } break;
2339
2340 case BINDER_TYPE_FD: {
2341 /*
2342 * No need to close the file here since user-space
2343 * closes it for for successfully delivered
2344 * transactions. For transactions that weren't
2345 * delivered, the new fd was never allocated so
2346 * there is no need to close and the fput on the
2347 * file is done when the transaction is torn
2348 * down.
2349 */
2350 WARN_ON(failed_at &&
2351 proc->tsk == current->group_leader);
2352 } break;
2353 case BINDER_TYPE_PTR:
2354 /*
2355 * Nothing to do here, this will get cleaned up when the
2356 * transaction buffer gets freed
2357 */
2358 break;
2359 case BINDER_TYPE_FDA: {
2360 struct binder_fd_array_object *fda;
2361 struct binder_buffer_object *parent;
2362 struct binder_object ptr_object;
2363 binder_size_t fda_offset;
2364 size_t fd_index;
2365 binder_size_t fd_buf_size;
2366 binder_size_t num_valid;
2367
2368 if (proc->tsk != current->group_leader) {
2369 /*
2370 * Nothing to do if running in sender context
2371 * The fd fixups have not been applied so no
2372 * fds need to be closed.
2373 */
2374 continue;
2375 }
2376
2377 num_valid = (buffer_offset - off_start_offset) /
2378 sizeof(binder_size_t);
2379 fda = to_binder_fd_array_object(hdr);
2380 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2381 fda->parent,
2382 off_start_offset,
2383 NULL,
2384 num_valid);
2385 if (!parent) {
2386 pr_err("transaction release %d bad parent offset\n",
2387 debug_id);
2388 continue;
2389 }
2390 fd_buf_size = sizeof(u32) * fda->num_fds;
2391 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2392 pr_err("transaction release %d invalid number of fds (%lld)\n",
2393 debug_id, (u64)fda->num_fds);
2394 continue;
2395 }
2396 if (fd_buf_size > parent->length ||
2397 fda->parent_offset > parent->length - fd_buf_size) {
2398 /* No space for all file descriptors here. */
2399 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2400 debug_id, (u64)fda->num_fds);
2401 continue;
2402 }
2403 /*
2404 * the source data for binder_buffer_object is visible
2405 * to user-space and the @buffer element is the user
2406 * pointer to the buffer_object containing the fd_array.
2407 * Convert the address to an offset relative to
2408 * the base of the transaction buffer.
2409 */
2410 fda_offset =
2411 (parent->buffer - (uintptr_t)buffer->user_data) +
2412 fda->parent_offset;
2413 for (fd_index = 0; fd_index < fda->num_fds;
2414 fd_index++) {
2415 u32 fd;
2416 int err;
2417 binder_size_t offset = fda_offset +
2418 fd_index * sizeof(fd);
2419
2420 err = binder_alloc_copy_from_buffer(
2421 &proc->alloc, &fd, buffer,
2422 offset, sizeof(fd));
2423 WARN_ON(err);
2424 if (!err)
2425 binder_deferred_fd_close(fd);
2426 }
2427 } break;
2428 default:
2429 pr_err("transaction release %d bad object type %x\n",
2430 debug_id, hdr->type);
2431 break;
2432 }
2433 }
2434}
2435
2436static int binder_translate_binder(struct flat_binder_object *fp,
2437 struct binder_transaction *t,
2438 struct binder_thread *thread)
2439{
2440 struct binder_node *node;
2441 struct binder_proc *proc = thread->proc;
2442 struct binder_proc *target_proc = t->to_proc;
2443 struct binder_ref_data rdata;
2444 int ret = 0;
2445
2446 node = binder_get_node(proc, fp->binder);
2447 if (!node) {
2448 node = binder_new_node(proc, fp);
2449 if (!node)
2450 return -ENOMEM;
2451 }
2452 if (fp->cookie != node->cookie) {
2453 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2454 proc->pid, thread->pid, (u64)fp->binder,
2455 node->debug_id, (u64)fp->cookie,
2456 (u64)node->cookie);
2457 ret = -EINVAL;
2458 goto done;
2459 }
2460 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2461 ret = -EPERM;
2462 goto done;
2463 }
2464
2465 ret = binder_inc_ref_for_node(target_proc, node,
2466 fp->hdr.type == BINDER_TYPE_BINDER,
2467 &thread->todo, &rdata);
2468 if (ret)
2469 goto done;
2470
2471 if (fp->hdr.type == BINDER_TYPE_BINDER)
2472 fp->hdr.type = BINDER_TYPE_HANDLE;
2473 else
2474 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2475 fp->binder = 0;
2476 fp->handle = rdata.desc;
2477 fp->cookie = 0;
2478
2479 trace_binder_transaction_node_to_ref(t, node, &rdata);
2480 binder_debug(BINDER_DEBUG_TRANSACTION,
2481 " node %d u%016llx -> ref %d desc %d\n",
2482 node->debug_id, (u64)node->ptr,
2483 rdata.debug_id, rdata.desc);
2484done:
2485 binder_put_node(node);
2486 return ret;
2487}
2488
2489static int binder_translate_handle(struct flat_binder_object *fp,
2490 struct binder_transaction *t,
2491 struct binder_thread *thread)
2492{
2493 struct binder_proc *proc = thread->proc;
2494 struct binder_proc *target_proc = t->to_proc;
2495 struct binder_node *node;
2496 struct binder_ref_data src_rdata;
2497 int ret = 0;
2498
2499 node = binder_get_node_from_ref(proc, fp->handle,
2500 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2501 if (!node) {
2502 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2503 proc->pid, thread->pid, fp->handle);
2504 return -EINVAL;
2505 }
2506 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2507 ret = -EPERM;
2508 goto done;
2509 }
2510
2511 binder_node_lock(node);
2512 if (node->proc == target_proc) {
2513 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2514 fp->hdr.type = BINDER_TYPE_BINDER;
2515 else
2516 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2517 fp->binder = node->ptr;
2518 fp->cookie = node->cookie;
2519 if (node->proc)
2520 binder_inner_proc_lock(node->proc);
2521 else
2522 __acquire(&node->proc->inner_lock);
2523 binder_inc_node_nilocked(node,
2524 fp->hdr.type == BINDER_TYPE_BINDER,
2525 0, NULL);
2526 if (node->proc)
2527 binder_inner_proc_unlock(node->proc);
2528 else
2529 __release(&node->proc->inner_lock);
2530 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2531 binder_debug(BINDER_DEBUG_TRANSACTION,
2532 " ref %d desc %d -> node %d u%016llx\n",
2533 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2534 (u64)node->ptr);
2535 binder_node_unlock(node);
2536 } else {
2537 struct binder_ref_data dest_rdata;
2538
2539 binder_node_unlock(node);
2540 ret = binder_inc_ref_for_node(target_proc, node,
2541 fp->hdr.type == BINDER_TYPE_HANDLE,
2542 NULL, &dest_rdata);
2543 if (ret)
2544 goto done;
2545
2546 fp->binder = 0;
2547 fp->handle = dest_rdata.desc;
2548 fp->cookie = 0;
2549 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2550 &dest_rdata);
2551 binder_debug(BINDER_DEBUG_TRANSACTION,
2552 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2553 src_rdata.debug_id, src_rdata.desc,
2554 dest_rdata.debug_id, dest_rdata.desc,
2555 node->debug_id);
2556 }
2557done:
2558 binder_put_node(node);
2559 return ret;
2560}
2561
2562static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2563 struct binder_transaction *t,
2564 struct binder_thread *thread,
2565 struct binder_transaction *in_reply_to)
2566{
2567 struct binder_proc *proc = thread->proc;
2568 struct binder_proc *target_proc = t->to_proc;
2569 struct binder_txn_fd_fixup *fixup;
2570 struct file *file;
2571 int ret = 0;
2572 bool target_allows_fd;
2573
2574 if (in_reply_to)
2575 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2576 else
2577 target_allows_fd = t->buffer->target_node->accept_fds;
2578 if (!target_allows_fd) {
2579 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2580 proc->pid, thread->pid,
2581 in_reply_to ? "reply" : "transaction",
2582 fd);
2583 ret = -EPERM;
2584 goto err_fd_not_accepted;
2585 }
2586
2587 file = fget(fd);
2588 if (!file) {
2589 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2590 proc->pid, thread->pid, fd);
2591 ret = -EBADF;
2592 goto err_fget;
2593 }
2594 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2595 if (ret < 0) {
2596 ret = -EPERM;
2597 goto err_security;
2598 }
2599
2600 /*
2601 * Add fixup record for this transaction. The allocation
2602 * of the fd in the target needs to be done from a
2603 * target thread.
2604 */
2605 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2606 if (!fixup) {
2607 ret = -ENOMEM;
2608 goto err_alloc;
2609 }
2610 fixup->file = file;
2611 fixup->offset = fd_offset;
2612 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2613 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2614
2615 return ret;
2616
2617err_alloc:
2618err_security:
2619 fput(file);
2620err_fget:
2621err_fd_not_accepted:
2622 return ret;
2623}
2624
2625static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2626 struct binder_buffer_object *parent,
2627 struct binder_transaction *t,
2628 struct binder_thread *thread,
2629 struct binder_transaction *in_reply_to)
2630{
2631 binder_size_t fdi, fd_buf_size;
2632 binder_size_t fda_offset;
2633 struct binder_proc *proc = thread->proc;
2634 struct binder_proc *target_proc = t->to_proc;
2635
2636 fd_buf_size = sizeof(u32) * fda->num_fds;
2637 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2638 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2639 proc->pid, thread->pid, (u64)fda->num_fds);
2640 return -EINVAL;
2641 }
2642 if (fd_buf_size > parent->length ||
2643 fda->parent_offset > parent->length - fd_buf_size) {
2644 /* No space for all file descriptors here. */
2645 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2646 proc->pid, thread->pid, (u64)fda->num_fds);
2647 return -EINVAL;
2648 }
2649 /*
2650 * the source data for binder_buffer_object is visible
2651 * to user-space and the @buffer element is the user
2652 * pointer to the buffer_object containing the fd_array.
2653 * Convert the address to an offset relative to
2654 * the base of the transaction buffer.
2655 */
2656 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2657 fda->parent_offset;
2658 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2659 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2660 proc->pid, thread->pid);
2661 return -EINVAL;
2662 }
2663 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2664 u32 fd;
2665 int ret;
2666 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2667
2668 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2669 &fd, t->buffer,
2670 offset, sizeof(fd));
2671 if (!ret)
2672 ret = binder_translate_fd(fd, offset, t, thread,
2673 in_reply_to);
2674 if (ret < 0)
2675 return ret;
2676 }
2677 return 0;
2678}
2679
2680static int binder_fixup_parent(struct binder_transaction *t,
2681 struct binder_thread *thread,
2682 struct binder_buffer_object *bp,
2683 binder_size_t off_start_offset,
2684 binder_size_t num_valid,
2685 binder_size_t last_fixup_obj_off,
2686 binder_size_t last_fixup_min_off)
2687{
2688 struct binder_buffer_object *parent;
2689 struct binder_buffer *b = t->buffer;
2690 struct binder_proc *proc = thread->proc;
2691 struct binder_proc *target_proc = t->to_proc;
2692 struct binder_object object;
2693 binder_size_t buffer_offset;
2694 binder_size_t parent_offset;
2695
2696 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2697 return 0;
2698
2699 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2700 off_start_offset, &parent_offset,
2701 num_valid);
2702 if (!parent) {
2703 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2704 proc->pid, thread->pid);
2705 return -EINVAL;
2706 }
2707
2708 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2709 parent_offset, bp->parent_offset,
2710 last_fixup_obj_off,
2711 last_fixup_min_off)) {
2712 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2713 proc->pid, thread->pid);
2714 return -EINVAL;
2715 }
2716
2717 if (parent->length < sizeof(binder_uintptr_t) ||
2718 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2719 /* No space for a pointer here! */
2720 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2721 proc->pid, thread->pid);
2722 return -EINVAL;
2723 }
2724 buffer_offset = bp->parent_offset +
2725 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2726 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2727 &bp->buffer, sizeof(bp->buffer))) {
2728 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2729 proc->pid, thread->pid);
2730 return -EINVAL;
2731 }
2732
2733 return 0;
2734}
2735
2736/**
2737 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2738 * @t: transaction to send
2739 * @proc: process to send the transaction to
2740 * @thread: thread in @proc to send the transaction to (may be NULL)
2741 *
2742 * This function queues a transaction to the specified process. It will try
2743 * to find a thread in the target process to handle the transaction and
2744 * wake it up. If no thread is found, the work is queued to the proc
2745 * waitqueue.
2746 *
2747 * If the @thread parameter is not NULL, the transaction is always queued
2748 * to the waitlist of that specific thread.
2749 *
2750 * Return: true if the transactions was successfully queued
2751 * false if the target process or thread is dead
2752 */
2753static bool binder_proc_transaction(struct binder_transaction *t,
2754 struct binder_proc *proc,
2755 struct binder_thread *thread)
2756{
2757 struct binder_node *node = t->buffer->target_node;
2758 bool oneway = !!(t->flags & TF_ONE_WAY);
2759 bool pending_async = false;
2760
2761 BUG_ON(!node);
2762 binder_node_lock(node);
2763 if (oneway) {
2764 BUG_ON(thread);
2765 if (node->has_async_transaction) {
2766 pending_async = true;
2767 } else {
2768 node->has_async_transaction = true;
2769 }
2770 }
2771
2772 binder_inner_proc_lock(proc);
2773
2774 if (proc->is_dead || (thread && thread->is_dead)) {
2775 binder_inner_proc_unlock(proc);
2776 binder_node_unlock(node);
2777 return false;
2778 }
2779
2780 if (!thread && !pending_async)
2781 thread = binder_select_thread_ilocked(proc);
2782
2783 if (thread)
2784 binder_enqueue_thread_work_ilocked(thread, &t->work);
2785 else if (!pending_async)
2786 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2787 else
2788 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2789
2790 if (!pending_async)
2791 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2792
2793 binder_inner_proc_unlock(proc);
2794 binder_node_unlock(node);
2795
2796 return true;
2797}
2798
2799/**
2800 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2801 * @node: struct binder_node for which to get refs
2802 * @proc: returns @node->proc if valid
2803 * @error: if no @proc then returns BR_DEAD_REPLY
2804 *
2805 * User-space normally keeps the node alive when creating a transaction
2806 * since it has a reference to the target. The local strong ref keeps it
2807 * alive if the sending process dies before the target process processes
2808 * the transaction. If the source process is malicious or has a reference
2809 * counting bug, relying on the local strong ref can fail.
2810 *
2811 * Since user-space can cause the local strong ref to go away, we also take
2812 * a tmpref on the node to ensure it survives while we are constructing
2813 * the transaction. We also need a tmpref on the proc while we are
2814 * constructing the transaction, so we take that here as well.
2815 *
2816 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2817 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2818 * target proc has died, @error is set to BR_DEAD_REPLY
2819 */
2820static struct binder_node *binder_get_node_refs_for_txn(
2821 struct binder_node *node,
2822 struct binder_proc **procp,
2823 uint32_t *error)
2824{
2825 struct binder_node *target_node = NULL;
2826
2827 binder_node_inner_lock(node);
2828 if (node->proc) {
2829 target_node = node;
2830 binder_inc_node_nilocked(node, 1, 0, NULL);
2831 binder_inc_node_tmpref_ilocked(node);
2832 node->proc->tmp_ref++;
2833 *procp = node->proc;
2834 } else
2835 *error = BR_DEAD_REPLY;
2836 binder_node_inner_unlock(node);
2837
2838 return target_node;
2839}
2840
2841static void binder_transaction(struct binder_proc *proc,
2842 struct binder_thread *thread,
2843 struct binder_transaction_data *tr, int reply,
2844 binder_size_t extra_buffers_size)
2845{
2846 int ret;
2847 struct binder_transaction *t;
2848 struct binder_work *w;
2849 struct binder_work *tcomplete;
2850 binder_size_t buffer_offset = 0;
2851 binder_size_t off_start_offset, off_end_offset;
2852 binder_size_t off_min;
2853 binder_size_t sg_buf_offset, sg_buf_end_offset;
2854 struct binder_proc *target_proc = NULL;
2855 struct binder_thread *target_thread = NULL;
2856 struct binder_node *target_node = NULL;
2857 struct binder_transaction *in_reply_to = NULL;
2858 struct binder_transaction_log_entry *e;
2859 uint32_t return_error = 0;
2860 uint32_t return_error_param = 0;
2861 uint32_t return_error_line = 0;
2862 binder_size_t last_fixup_obj_off = 0;
2863 binder_size_t last_fixup_min_off = 0;
2864 struct binder_context *context = proc->context;
2865 int t_debug_id = atomic_inc_return(&binder_last_id);
2866 char *secctx = NULL;
2867 u32 secctx_sz = 0;
2868
2869 e = binder_transaction_log_add(&binder_transaction_log);
2870 e->debug_id = t_debug_id;
2871 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2872 e->from_proc = proc->pid;
2873 e->from_thread = thread->pid;
2874 e->target_handle = tr->target.handle;
2875 e->data_size = tr->data_size;
2876 e->offsets_size = tr->offsets_size;
2877 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2878
2879 if (reply) {
2880 binder_inner_proc_lock(proc);
2881 in_reply_to = thread->transaction_stack;
2882 if (in_reply_to == NULL) {
2883 binder_inner_proc_unlock(proc);
2884 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2885 proc->pid, thread->pid);
2886 return_error = BR_FAILED_REPLY;
2887 return_error_param = -EPROTO;
2888 return_error_line = __LINE__;
2889 goto err_empty_call_stack;
2890 }
2891 if (in_reply_to->to_thread != thread) {
2892 spin_lock(&in_reply_to->lock);
2893 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2894 proc->pid, thread->pid, in_reply_to->debug_id,
2895 in_reply_to->to_proc ?
2896 in_reply_to->to_proc->pid : 0,
2897 in_reply_to->to_thread ?
2898 in_reply_to->to_thread->pid : 0);
2899 spin_unlock(&in_reply_to->lock);
2900 binder_inner_proc_unlock(proc);
2901 return_error = BR_FAILED_REPLY;
2902 return_error_param = -EPROTO;
2903 return_error_line = __LINE__;
2904 in_reply_to = NULL;
2905 goto err_bad_call_stack;
2906 }
2907 thread->transaction_stack = in_reply_to->to_parent;
2908 binder_inner_proc_unlock(proc);
2909 binder_set_nice(in_reply_to->saved_priority);
2910 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2911 if (target_thread == NULL) {
2912 /* annotation for sparse */
2913 __release(&target_thread->proc->inner_lock);
2914 return_error = BR_DEAD_REPLY;
2915 return_error_line = __LINE__;
2916 goto err_dead_binder;
2917 }
2918 if (target_thread->transaction_stack != in_reply_to) {
2919 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2920 proc->pid, thread->pid,
2921 target_thread->transaction_stack ?
2922 target_thread->transaction_stack->debug_id : 0,
2923 in_reply_to->debug_id);
2924 binder_inner_proc_unlock(target_thread->proc);
2925 return_error = BR_FAILED_REPLY;
2926 return_error_param = -EPROTO;
2927 return_error_line = __LINE__;
2928 in_reply_to = NULL;
2929 target_thread = NULL;
2930 goto err_dead_binder;
2931 }
2932 target_proc = target_thread->proc;
2933 target_proc->tmp_ref++;
2934 binder_inner_proc_unlock(target_thread->proc);
2935 } else {
2936 if (tr->target.handle) {
2937 struct binder_ref *ref;
2938
2939 /*
2940 * There must already be a strong ref
2941 * on this node. If so, do a strong
2942 * increment on the node to ensure it
2943 * stays alive until the transaction is
2944 * done.
2945 */
2946 binder_proc_lock(proc);
2947 ref = binder_get_ref_olocked(proc, tr->target.handle,
2948 true);
2949 if (ref) {
2950 target_node = binder_get_node_refs_for_txn(
2951 ref->node, &target_proc,
2952 &return_error);
2953 } else {
2954 binder_user_error("%d:%d got transaction to invalid handle\n",
2955 proc->pid, thread->pid);
2956 return_error = BR_FAILED_REPLY;
2957 }
2958 binder_proc_unlock(proc);
2959 } else {
2960 mutex_lock(&context->context_mgr_node_lock);
2961 target_node = context->binder_context_mgr_node;
2962 if (target_node)
2963 target_node = binder_get_node_refs_for_txn(
2964 target_node, &target_proc,
2965 &return_error);
2966 else
2967 return_error = BR_DEAD_REPLY;
2968 mutex_unlock(&context->context_mgr_node_lock);
2969 if (target_node && target_proc->pid == proc->pid) {
2970 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2971 proc->pid, thread->pid);
2972 return_error = BR_FAILED_REPLY;
2973 return_error_param = -EINVAL;
2974 return_error_line = __LINE__;
2975 goto err_invalid_target_handle;
2976 }
2977 }
2978 if (!target_node) {
2979 /*
2980 * return_error is set above
2981 */
2982 return_error_param = -EINVAL;
2983 return_error_line = __LINE__;
2984 goto err_dead_binder;
2985 }
2986 e->to_node = target_node->debug_id;
2987 if (security_binder_transaction(proc->tsk,
2988 target_proc->tsk) < 0) {
2989 return_error = BR_FAILED_REPLY;
2990 return_error_param = -EPERM;
2991 return_error_line = __LINE__;
2992 goto err_invalid_target_handle;
2993 }
2994 binder_inner_proc_lock(proc);
2995
2996 w = list_first_entry_or_null(&thread->todo,
2997 struct binder_work, entry);
2998 if (!(tr->flags & TF_ONE_WAY) && w &&
2999 w->type == BINDER_WORK_TRANSACTION) {
3000 /*
3001 * Do not allow new outgoing transaction from a
3002 * thread that has a transaction at the head of
3003 * its todo list. Only need to check the head
3004 * because binder_select_thread_ilocked picks a
3005 * thread from proc->waiting_threads to enqueue
3006 * the transaction, and nothing is queued to the
3007 * todo list while the thread is on waiting_threads.
3008 */
3009 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3010 proc->pid, thread->pid);
3011 binder_inner_proc_unlock(proc);
3012 return_error = BR_FAILED_REPLY;
3013 return_error_param = -EPROTO;
3014 return_error_line = __LINE__;
3015 goto err_bad_todo_list;
3016 }
3017
3018 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3019 struct binder_transaction *tmp;
3020
3021 tmp = thread->transaction_stack;
3022 if (tmp->to_thread != thread) {
3023 spin_lock(&tmp->lock);
3024 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3025 proc->pid, thread->pid, tmp->debug_id,
3026 tmp->to_proc ? tmp->to_proc->pid : 0,
3027 tmp->to_thread ?
3028 tmp->to_thread->pid : 0);
3029 spin_unlock(&tmp->lock);
3030 binder_inner_proc_unlock(proc);
3031 return_error = BR_FAILED_REPLY;
3032 return_error_param = -EPROTO;
3033 return_error_line = __LINE__;
3034 goto err_bad_call_stack;
3035 }
3036 while (tmp) {
3037 struct binder_thread *from;
3038
3039 spin_lock(&tmp->lock);
3040 from = tmp->from;
3041 if (from && from->proc == target_proc) {
3042 atomic_inc(&from->tmp_ref);
3043 target_thread = from;
3044 spin_unlock(&tmp->lock);
3045 break;
3046 }
3047 spin_unlock(&tmp->lock);
3048 tmp = tmp->from_parent;
3049 }
3050 }
3051 binder_inner_proc_unlock(proc);
3052 }
3053 if (target_thread)
3054 e->to_thread = target_thread->pid;
3055 e->to_proc = target_proc->pid;
3056
3057 /* TODO: reuse incoming transaction for reply */
3058 t = kzalloc(sizeof(*t), GFP_KERNEL);
3059 if (t == NULL) {
3060 return_error = BR_FAILED_REPLY;
3061 return_error_param = -ENOMEM;
3062 return_error_line = __LINE__;
3063 goto err_alloc_t_failed;
3064 }
3065 INIT_LIST_HEAD(&t->fd_fixups);
3066 binder_stats_created(BINDER_STAT_TRANSACTION);
3067 spin_lock_init(&t->lock);
3068
3069 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3070 if (tcomplete == NULL) {
3071 return_error = BR_FAILED_REPLY;
3072 return_error_param = -ENOMEM;
3073 return_error_line = __LINE__;
3074 goto err_alloc_tcomplete_failed;
3075 }
3076 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3077
3078 t->debug_id = t_debug_id;
3079
3080 if (reply)
3081 binder_debug(BINDER_DEBUG_TRANSACTION,
3082 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3083 proc->pid, thread->pid, t->debug_id,
3084 target_proc->pid, target_thread->pid,
3085 (u64)tr->data.ptr.buffer,
3086 (u64)tr->data.ptr.offsets,
3087 (u64)tr->data_size, (u64)tr->offsets_size,
3088 (u64)extra_buffers_size);
3089 else
3090 binder_debug(BINDER_DEBUG_TRANSACTION,
3091 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3092 proc->pid, thread->pid, t->debug_id,
3093 target_proc->pid, target_node->debug_id,
3094 (u64)tr->data.ptr.buffer,
3095 (u64)tr->data.ptr.offsets,
3096 (u64)tr->data_size, (u64)tr->offsets_size,
3097 (u64)extra_buffers_size);
3098
3099 if (!reply && !(tr->flags & TF_ONE_WAY))
3100 t->from = thread;
3101 else
3102 t->from = NULL;
3103 t->sender_euid = task_euid(proc->tsk);
3104 t->to_proc = target_proc;
3105 t->to_thread = target_thread;
3106 t->code = tr->code;
3107 t->flags = tr->flags;
3108 t->priority = task_nice(current);
3109
3110 if (target_node && target_node->txn_security_ctx) {
3111 u32 secid;
3112 size_t added_size;
3113
3114 security_task_getsecid(proc->tsk, &secid);
3115 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3116 if (ret) {
3117 return_error = BR_FAILED_REPLY;
3118 return_error_param = ret;
3119 return_error_line = __LINE__;
3120 goto err_get_secctx_failed;
3121 }
3122 added_size = ALIGN(secctx_sz, sizeof(u64));
3123 extra_buffers_size += added_size;
3124 if (extra_buffers_size < added_size) {
3125 /* integer overflow of extra_buffers_size */
3126 return_error = BR_FAILED_REPLY;
3127 return_error_param = EINVAL;
3128 return_error_line = __LINE__;
3129 goto err_bad_extra_size;
3130 }
3131 }
3132
3133 trace_binder_transaction(reply, t, target_node);
3134
3135 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3136 tr->offsets_size, extra_buffers_size,
3137 !reply && (t->flags & TF_ONE_WAY));
3138 if (IS_ERR(t->buffer)) {
3139 /*
3140 * -ESRCH indicates VMA cleared. The target is dying.
3141 */
3142 return_error_param = PTR_ERR(t->buffer);
3143 return_error = return_error_param == -ESRCH ?
3144 BR_DEAD_REPLY : BR_FAILED_REPLY;
3145 return_error_line = __LINE__;
3146 t->buffer = NULL;
3147 goto err_binder_alloc_buf_failed;
3148 }
3149 if (secctx) {
3150 int err;
3151 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3152 ALIGN(tr->offsets_size, sizeof(void *)) +
3153 ALIGN(extra_buffers_size, sizeof(void *)) -
3154 ALIGN(secctx_sz, sizeof(u64));
3155
3156 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3157 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3158 t->buffer, buf_offset,
3159 secctx, secctx_sz);
3160 if (err) {
3161 t->security_ctx = 0;
3162 WARN_ON(1);
3163 }
3164 security_release_secctx(secctx, secctx_sz);
3165 secctx = NULL;
3166 }
3167 t->buffer->debug_id = t->debug_id;
3168 t->buffer->transaction = t;
3169 t->buffer->target_node = target_node;
3170 trace_binder_transaction_alloc_buf(t->buffer);
3171
3172 if (binder_alloc_copy_user_to_buffer(
3173 &target_proc->alloc,
3174 t->buffer, 0,
3175 (const void __user *)
3176 (uintptr_t)tr->data.ptr.buffer,
3177 tr->data_size)) {
3178 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3179 proc->pid, thread->pid);
3180 return_error = BR_FAILED_REPLY;
3181 return_error_param = -EFAULT;
3182 return_error_line = __LINE__;
3183 goto err_copy_data_failed;
3184 }
3185 if (binder_alloc_copy_user_to_buffer(
3186 &target_proc->alloc,
3187 t->buffer,
3188 ALIGN(tr->data_size, sizeof(void *)),
3189 (const void __user *)
3190 (uintptr_t)tr->data.ptr.offsets,
3191 tr->offsets_size)) {
3192 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3193 proc->pid, thread->pid);
3194 return_error = BR_FAILED_REPLY;
3195 return_error_param = -EFAULT;
3196 return_error_line = __LINE__;
3197 goto err_copy_data_failed;
3198 }
3199 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3200 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3201 proc->pid, thread->pid, (u64)tr->offsets_size);
3202 return_error = BR_FAILED_REPLY;
3203 return_error_param = -EINVAL;
3204 return_error_line = __LINE__;
3205 goto err_bad_offset;
3206 }
3207 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3208 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3209 proc->pid, thread->pid,
3210 (u64)extra_buffers_size);
3211 return_error = BR_FAILED_REPLY;
3212 return_error_param = -EINVAL;
3213 return_error_line = __LINE__;
3214 goto err_bad_offset;
3215 }
3216 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3217 buffer_offset = off_start_offset;
3218 off_end_offset = off_start_offset + tr->offsets_size;
3219 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3220 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3221 ALIGN(secctx_sz, sizeof(u64));
3222 off_min = 0;
3223 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3224 buffer_offset += sizeof(binder_size_t)) {
3225 struct binder_object_header *hdr;
3226 size_t object_size;
3227 struct binder_object object;
3228 binder_size_t object_offset;
3229
3230 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3231 &object_offset,
3232 t->buffer,
3233 buffer_offset,
3234 sizeof(object_offset))) {
3235 return_error = BR_FAILED_REPLY;
3236 return_error_param = -EINVAL;
3237 return_error_line = __LINE__;
3238 goto err_bad_offset;
3239 }
3240 object_size = binder_get_object(target_proc, t->buffer,
3241 object_offset, &object);
3242 if (object_size == 0 || object_offset < off_min) {
3243 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3244 proc->pid, thread->pid,
3245 (u64)object_offset,
3246 (u64)off_min,
3247 (u64)t->buffer->data_size);
3248 return_error = BR_FAILED_REPLY;
3249 return_error_param = -EINVAL;
3250 return_error_line = __LINE__;
3251 goto err_bad_offset;
3252 }
3253
3254 hdr = &object.hdr;
3255 off_min = object_offset + object_size;
3256 switch (hdr->type) {
3257 case BINDER_TYPE_BINDER:
3258 case BINDER_TYPE_WEAK_BINDER: {
3259 struct flat_binder_object *fp;
3260
3261 fp = to_flat_binder_object(hdr);
3262 ret = binder_translate_binder(fp, t, thread);
3263
3264 if (ret < 0 ||
3265 binder_alloc_copy_to_buffer(&target_proc->alloc,
3266 t->buffer,
3267 object_offset,
3268 fp, sizeof(*fp))) {
3269 return_error = BR_FAILED_REPLY;
3270 return_error_param = ret;
3271 return_error_line = __LINE__;
3272 goto err_translate_failed;
3273 }
3274 } break;
3275 case BINDER_TYPE_HANDLE:
3276 case BINDER_TYPE_WEAK_HANDLE: {
3277 struct flat_binder_object *fp;
3278
3279 fp = to_flat_binder_object(hdr);
3280 ret = binder_translate_handle(fp, t, thread);
3281 if (ret < 0 ||
3282 binder_alloc_copy_to_buffer(&target_proc->alloc,
3283 t->buffer,
3284 object_offset,
3285 fp, sizeof(*fp))) {
3286 return_error = BR_FAILED_REPLY;
3287 return_error_param = ret;
3288 return_error_line = __LINE__;
3289 goto err_translate_failed;
3290 }
3291 } break;
3292
3293 case BINDER_TYPE_FD: {
3294 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3295 binder_size_t fd_offset = object_offset +
3296 (uintptr_t)&fp->fd - (uintptr_t)fp;
3297 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3298 thread, in_reply_to);
3299
3300 fp->pad_binder = 0;
3301 if (ret < 0 ||
3302 binder_alloc_copy_to_buffer(&target_proc->alloc,
3303 t->buffer,
3304 object_offset,
3305 fp, sizeof(*fp))) {
3306 return_error = BR_FAILED_REPLY;
3307 return_error_param = ret;
3308 return_error_line = __LINE__;
3309 goto err_translate_failed;
3310 }
3311 } break;
3312 case BINDER_TYPE_FDA: {
3313 struct binder_object ptr_object;
3314 binder_size_t parent_offset;
3315 struct binder_fd_array_object *fda =
3316 to_binder_fd_array_object(hdr);
3317 size_t num_valid = (buffer_offset - off_start_offset) *
3318 sizeof(binder_size_t);
3319 struct binder_buffer_object *parent =
3320 binder_validate_ptr(target_proc, t->buffer,
3321 &ptr_object, fda->parent,
3322 off_start_offset,
3323 &parent_offset,
3324 num_valid);
3325 if (!parent) {
3326 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3327 proc->pid, thread->pid);
3328 return_error = BR_FAILED_REPLY;
3329 return_error_param = -EINVAL;
3330 return_error_line = __LINE__;
3331 goto err_bad_parent;
3332 }
3333 if (!binder_validate_fixup(target_proc, t->buffer,
3334 off_start_offset,
3335 parent_offset,
3336 fda->parent_offset,
3337 last_fixup_obj_off,
3338 last_fixup_min_off)) {
3339 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3340 proc->pid, thread->pid);
3341 return_error = BR_FAILED_REPLY;
3342 return_error_param = -EINVAL;
3343 return_error_line = __LINE__;
3344 goto err_bad_parent;
3345 }
3346 ret = binder_translate_fd_array(fda, parent, t, thread,
3347 in_reply_to);
3348 if (ret < 0) {
3349 return_error = BR_FAILED_REPLY;
3350 return_error_param = ret;
3351 return_error_line = __LINE__;
3352 goto err_translate_failed;
3353 }
3354 last_fixup_obj_off = parent_offset;
3355 last_fixup_min_off =
3356 fda->parent_offset + sizeof(u32) * fda->num_fds;
3357 } break;
3358 case BINDER_TYPE_PTR: {
3359 struct binder_buffer_object *bp =
3360 to_binder_buffer_object(hdr);
3361 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3362 size_t num_valid;
3363
3364 if (bp->length > buf_left) {
3365 binder_user_error("%d:%d got transaction with too large buffer\n",
3366 proc->pid, thread->pid);
3367 return_error = BR_FAILED_REPLY;
3368 return_error_param = -EINVAL;
3369 return_error_line = __LINE__;
3370 goto err_bad_offset;
3371 }
3372 if (binder_alloc_copy_user_to_buffer(
3373 &target_proc->alloc,
3374 t->buffer,
3375 sg_buf_offset,
3376 (const void __user *)
3377 (uintptr_t)bp->buffer,
3378 bp->length)) {
3379 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3380 proc->pid, thread->pid);
3381 return_error_param = -EFAULT;
3382 return_error = BR_FAILED_REPLY;
3383 return_error_line = __LINE__;
3384 goto err_copy_data_failed;
3385 }
3386 /* Fixup buffer pointer to target proc address space */
3387 bp->buffer = (uintptr_t)
3388 t->buffer->user_data + sg_buf_offset;
3389 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3390
3391 num_valid = (buffer_offset - off_start_offset) *
3392 sizeof(binder_size_t);
3393 ret = binder_fixup_parent(t, thread, bp,
3394 off_start_offset,
3395 num_valid,
3396 last_fixup_obj_off,
3397 last_fixup_min_off);
3398 if (ret < 0 ||
3399 binder_alloc_copy_to_buffer(&target_proc->alloc,
3400 t->buffer,
3401 object_offset,
3402 bp, sizeof(*bp))) {
3403 return_error = BR_FAILED_REPLY;
3404 return_error_param = ret;
3405 return_error_line = __LINE__;
3406 goto err_translate_failed;
3407 }
3408 last_fixup_obj_off = object_offset;
3409 last_fixup_min_off = 0;
3410 } break;
3411 default:
3412 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3413 proc->pid, thread->pid, hdr->type);
3414 return_error = BR_FAILED_REPLY;
3415 return_error_param = -EINVAL;
3416 return_error_line = __LINE__;
3417 goto err_bad_object_type;
3418 }
3419 }
3420 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3421 t->work.type = BINDER_WORK_TRANSACTION;
3422
3423 if (reply) {
3424 binder_enqueue_thread_work(thread, tcomplete);
3425 binder_inner_proc_lock(target_proc);
3426 if (target_thread->is_dead) {
3427 binder_inner_proc_unlock(target_proc);
3428 goto err_dead_proc_or_thread;
3429 }
3430 BUG_ON(t->buffer->async_transaction != 0);
3431 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3432 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3433 binder_inner_proc_unlock(target_proc);
3434 wake_up_interruptible_sync(&target_thread->wait);
3435 binder_free_transaction(in_reply_to);
3436 } else if (!(t->flags & TF_ONE_WAY)) {
3437 BUG_ON(t->buffer->async_transaction != 0);
3438 binder_inner_proc_lock(proc);
3439 /*
3440 * Defer the TRANSACTION_COMPLETE, so we don't return to
3441 * userspace immediately; this allows the target process to
3442 * immediately start processing this transaction, reducing
3443 * latency. We will then return the TRANSACTION_COMPLETE when
3444 * the target replies (or there is an error).
3445 */
3446 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3447 t->need_reply = 1;
3448 t->from_parent = thread->transaction_stack;
3449 thread->transaction_stack = t;
3450 binder_inner_proc_unlock(proc);
3451 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3452 binder_inner_proc_lock(proc);
3453 binder_pop_transaction_ilocked(thread, t);
3454 binder_inner_proc_unlock(proc);
3455 goto err_dead_proc_or_thread;
3456 }
3457 } else {
3458 BUG_ON(target_node == NULL);
3459 BUG_ON(t->buffer->async_transaction != 1);
3460 binder_enqueue_thread_work(thread, tcomplete);
3461 if (!binder_proc_transaction(t, target_proc, NULL))
3462 goto err_dead_proc_or_thread;
3463 }
3464 if (target_thread)
3465 binder_thread_dec_tmpref(target_thread);
3466 binder_proc_dec_tmpref(target_proc);
3467 if (target_node)
3468 binder_dec_node_tmpref(target_node);
3469 /*
3470 * write barrier to synchronize with initialization
3471 * of log entry
3472 */
3473 smp_wmb();
3474 WRITE_ONCE(e->debug_id_done, t_debug_id);
3475 return;
3476
3477err_dead_proc_or_thread:
3478 return_error = BR_DEAD_REPLY;
3479 return_error_line = __LINE__;
3480 binder_dequeue_work(proc, tcomplete);
3481err_translate_failed:
3482err_bad_object_type:
3483err_bad_offset:
3484err_bad_parent:
3485err_copy_data_failed:
3486 binder_free_txn_fixups(t);
3487 trace_binder_transaction_failed_buffer_release(t->buffer);
3488 binder_transaction_buffer_release(target_proc, t->buffer,
3489 buffer_offset, true);
3490 if (target_node)
3491 binder_dec_node_tmpref(target_node);
3492 target_node = NULL;
3493 t->buffer->transaction = NULL;
3494 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3495err_binder_alloc_buf_failed:
3496err_bad_extra_size:
3497 if (secctx)
3498 security_release_secctx(secctx, secctx_sz);
3499err_get_secctx_failed:
3500 kfree(tcomplete);
3501 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3502err_alloc_tcomplete_failed:
3503 kfree(t);
3504 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3505err_alloc_t_failed:
3506err_bad_todo_list:
3507err_bad_call_stack:
3508err_empty_call_stack:
3509err_dead_binder:
3510err_invalid_target_handle:
3511 if (target_thread)
3512 binder_thread_dec_tmpref(target_thread);
3513 if (target_proc)
3514 binder_proc_dec_tmpref(target_proc);
3515 if (target_node) {
3516 binder_dec_node(target_node, 1, 0);
3517 binder_dec_node_tmpref(target_node);
3518 }
3519
3520 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3521 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3522 proc->pid, thread->pid, return_error, return_error_param,
3523 (u64)tr->data_size, (u64)tr->offsets_size,
3524 return_error_line);
3525
3526 {
3527 struct binder_transaction_log_entry *fe;
3528
3529 e->return_error = return_error;
3530 e->return_error_param = return_error_param;
3531 e->return_error_line = return_error_line;
3532 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3533 *fe = *e;
3534 /*
3535 * write barrier to synchronize with initialization
3536 * of log entry
3537 */
3538 smp_wmb();
3539 WRITE_ONCE(e->debug_id_done, t_debug_id);
3540 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3541 }
3542
3543 BUG_ON(thread->return_error.cmd != BR_OK);
3544 if (in_reply_to) {
3545 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3546 binder_enqueue_thread_work(thread, &thread->return_error.work);
3547 binder_send_failed_reply(in_reply_to, return_error);
3548 } else {
3549 thread->return_error.cmd = return_error;
3550 binder_enqueue_thread_work(thread, &thread->return_error.work);
3551 }
3552}
3553
3554/**
3555 * binder_free_buf() - free the specified buffer
3556 * @proc: binder proc that owns buffer
3557 * @buffer: buffer to be freed
3558 *
3559 * If buffer for an async transaction, enqueue the next async
3560 * transaction from the node.
3561 *
3562 * Cleanup buffer and free it.
3563 */
3564static void
3565binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3566{
3567 binder_inner_proc_lock(proc);
3568 if (buffer->transaction) {
3569 buffer->transaction->buffer = NULL;
3570 buffer->transaction = NULL;
3571 }
3572 binder_inner_proc_unlock(proc);
3573 if (buffer->async_transaction && buffer->target_node) {
3574 struct binder_node *buf_node;
3575 struct binder_work *w;
3576
3577 buf_node = buffer->target_node;
3578 binder_node_inner_lock(buf_node);
3579 BUG_ON(!buf_node->has_async_transaction);
3580 BUG_ON(buf_node->proc != proc);
3581 w = binder_dequeue_work_head_ilocked(
3582 &buf_node->async_todo);
3583 if (!w) {
3584 buf_node->has_async_transaction = false;
3585 } else {
3586 binder_enqueue_work_ilocked(
3587 w, &proc->todo);
3588 binder_wakeup_proc_ilocked(proc);
3589 }
3590 binder_node_inner_unlock(buf_node);
3591 }
3592 trace_binder_transaction_buffer_release(buffer);
3593 binder_transaction_buffer_release(proc, buffer, 0, false);
3594 binder_alloc_free_buf(&proc->alloc, buffer);
3595}
3596
3597static int binder_thread_write(struct binder_proc *proc,
3598 struct binder_thread *thread,
3599 binder_uintptr_t binder_buffer, size_t size,
3600 binder_size_t *consumed)
3601{
3602 uint32_t cmd;
3603 struct binder_context *context = proc->context;
3604 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3605 void __user *ptr = buffer + *consumed;
3606 void __user *end = buffer + size;
3607
3608 while (ptr < end && thread->return_error.cmd == BR_OK) {
3609 int ret;
3610
3611 if (get_user(cmd, (uint32_t __user *)ptr))
3612 return -EFAULT;
3613 ptr += sizeof(uint32_t);
3614 trace_binder_command(cmd);
3615 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3616 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3617 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3618 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3619 }
3620 switch (cmd) {
3621 case BC_INCREFS:
3622 case BC_ACQUIRE:
3623 case BC_RELEASE:
3624 case BC_DECREFS: {
3625 uint32_t target;
3626 const char *debug_string;
3627 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3628 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3629 struct binder_ref_data rdata;
3630
3631 if (get_user(target, (uint32_t __user *)ptr))
3632 return -EFAULT;
3633
3634 ptr += sizeof(uint32_t);
3635 ret = -1;
3636 if (increment && !target) {
3637 struct binder_node *ctx_mgr_node;
3638 mutex_lock(&context->context_mgr_node_lock);
3639 ctx_mgr_node = context->binder_context_mgr_node;
3640 if (ctx_mgr_node)
3641 ret = binder_inc_ref_for_node(
3642 proc, ctx_mgr_node,
3643 strong, NULL, &rdata);
3644 mutex_unlock(&context->context_mgr_node_lock);
3645 }
3646 if (ret)
3647 ret = binder_update_ref_for_handle(
3648 proc, target, increment, strong,
3649 &rdata);
3650 if (!ret && rdata.desc != target) {
3651 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3652 proc->pid, thread->pid,
3653 target, rdata.desc);
3654 }
3655 switch (cmd) {
3656 case BC_INCREFS:
3657 debug_string = "IncRefs";
3658 break;
3659 case BC_ACQUIRE:
3660 debug_string = "Acquire";
3661 break;
3662 case BC_RELEASE:
3663 debug_string = "Release";
3664 break;
3665 case BC_DECREFS:
3666 default:
3667 debug_string = "DecRefs";
3668 break;
3669 }
3670 if (ret) {
3671 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3672 proc->pid, thread->pid, debug_string,
3673 strong, target, ret);
3674 break;
3675 }
3676 binder_debug(BINDER_DEBUG_USER_REFS,
3677 "%d:%d %s ref %d desc %d s %d w %d\n",
3678 proc->pid, thread->pid, debug_string,
3679 rdata.debug_id, rdata.desc, rdata.strong,
3680 rdata.weak);
3681 break;
3682 }
3683 case BC_INCREFS_DONE:
3684 case BC_ACQUIRE_DONE: {
3685 binder_uintptr_t node_ptr;
3686 binder_uintptr_t cookie;
3687 struct binder_node *node;
3688 bool free_node;
3689
3690 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3691 return -EFAULT;
3692 ptr += sizeof(binder_uintptr_t);
3693 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3694 return -EFAULT;
3695 ptr += sizeof(binder_uintptr_t);
3696 node = binder_get_node(proc, node_ptr);
3697 if (node == NULL) {
3698 binder_user_error("%d:%d %s u%016llx no match\n",
3699 proc->pid, thread->pid,
3700 cmd == BC_INCREFS_DONE ?
3701 "BC_INCREFS_DONE" :
3702 "BC_ACQUIRE_DONE",
3703 (u64)node_ptr);
3704 break;
3705 }
3706 if (cookie != node->cookie) {
3707 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3708 proc->pid, thread->pid,
3709 cmd == BC_INCREFS_DONE ?
3710 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3711 (u64)node_ptr, node->debug_id,
3712 (u64)cookie, (u64)node->cookie);
3713 binder_put_node(node);
3714 break;
3715 }
3716 binder_node_inner_lock(node);
3717 if (cmd == BC_ACQUIRE_DONE) {
3718 if (node->pending_strong_ref == 0) {
3719 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3720 proc->pid, thread->pid,
3721 node->debug_id);
3722 binder_node_inner_unlock(node);
3723 binder_put_node(node);
3724 break;
3725 }
3726 node->pending_strong_ref = 0;
3727 } else {
3728 if (node->pending_weak_ref == 0) {
3729 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3730 proc->pid, thread->pid,
3731 node->debug_id);
3732 binder_node_inner_unlock(node);
3733 binder_put_node(node);
3734 break;
3735 }
3736 node->pending_weak_ref = 0;
3737 }
3738 free_node = binder_dec_node_nilocked(node,
3739 cmd == BC_ACQUIRE_DONE, 0);
3740 WARN_ON(free_node);
3741 binder_debug(BINDER_DEBUG_USER_REFS,
3742 "%d:%d %s node %d ls %d lw %d tr %d\n",
3743 proc->pid, thread->pid,
3744 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3745 node->debug_id, node->local_strong_refs,
3746 node->local_weak_refs, node->tmp_refs);
3747 binder_node_inner_unlock(node);
3748 binder_put_node(node);
3749 break;
3750 }
3751 case BC_ATTEMPT_ACQUIRE:
3752 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3753 return -EINVAL;
3754 case BC_ACQUIRE_RESULT:
3755 pr_err("BC_ACQUIRE_RESULT not supported\n");
3756 return -EINVAL;
3757
3758 case BC_FREE_BUFFER: {
3759 binder_uintptr_t data_ptr;
3760 struct binder_buffer *buffer;
3761
3762 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3763 return -EFAULT;
3764 ptr += sizeof(binder_uintptr_t);
3765
3766 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3767 data_ptr);
3768 if (IS_ERR_OR_NULL(buffer)) {
3769 if (PTR_ERR(buffer) == -EPERM) {
3770 binder_user_error(
3771 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3772 proc->pid, thread->pid,
3773 (u64)data_ptr);
3774 } else {
3775 binder_user_error(
3776 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3777 proc->pid, thread->pid,
3778 (u64)data_ptr);
3779 }
3780 break;
3781 }
3782 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3783 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3784 proc->pid, thread->pid, (u64)data_ptr,
3785 buffer->debug_id,
3786 buffer->transaction ? "active" : "finished");
3787 binder_free_buf(proc, buffer);
3788 break;
3789 }
3790
3791 case BC_TRANSACTION_SG:
3792 case BC_REPLY_SG: {
3793 struct binder_transaction_data_sg tr;
3794
3795 if (copy_from_user(&tr, ptr, sizeof(tr)))
3796 return -EFAULT;
3797 ptr += sizeof(tr);
3798 binder_transaction(proc, thread, &tr.transaction_data,
3799 cmd == BC_REPLY_SG, tr.buffers_size);
3800 break;
3801 }
3802 case BC_TRANSACTION:
3803 case BC_REPLY: {
3804 struct binder_transaction_data tr;
3805
3806 if (copy_from_user(&tr, ptr, sizeof(tr)))
3807 return -EFAULT;
3808 ptr += sizeof(tr);
3809 binder_transaction(proc, thread, &tr,
3810 cmd == BC_REPLY, 0);
3811 break;
3812 }
3813
3814 case BC_REGISTER_LOOPER:
3815 binder_debug(BINDER_DEBUG_THREADS,
3816 "%d:%d BC_REGISTER_LOOPER\n",
3817 proc->pid, thread->pid);
3818 binder_inner_proc_lock(proc);
3819 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3820 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3821 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3822 proc->pid, thread->pid);
3823 } else if (proc->requested_threads == 0) {
3824 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3825 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3826 proc->pid, thread->pid);
3827 } else {
3828 proc->requested_threads--;
3829 proc->requested_threads_started++;
3830 }
3831 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3832 binder_inner_proc_unlock(proc);
3833 break;
3834 case BC_ENTER_LOOPER:
3835 binder_debug(BINDER_DEBUG_THREADS,
3836 "%d:%d BC_ENTER_LOOPER\n",
3837 proc->pid, thread->pid);
3838 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3839 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3840 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3841 proc->pid, thread->pid);
3842 }
3843 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3844 break;
3845 case BC_EXIT_LOOPER:
3846 binder_debug(BINDER_DEBUG_THREADS,
3847 "%d:%d BC_EXIT_LOOPER\n",
3848 proc->pid, thread->pid);
3849 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3850 break;
3851
3852 case BC_REQUEST_DEATH_NOTIFICATION:
3853 case BC_CLEAR_DEATH_NOTIFICATION: {
3854 uint32_t target;
3855 binder_uintptr_t cookie;
3856 struct binder_ref *ref;
3857 struct binder_ref_death *death = NULL;
3858
3859 if (get_user(target, (uint32_t __user *)ptr))
3860 return -EFAULT;
3861 ptr += sizeof(uint32_t);
3862 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3863 return -EFAULT;
3864 ptr += sizeof(binder_uintptr_t);
3865 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3866 /*
3867 * Allocate memory for death notification
3868 * before taking lock
3869 */
3870 death = kzalloc(sizeof(*death), GFP_KERNEL);
3871 if (death == NULL) {
3872 WARN_ON(thread->return_error.cmd !=
3873 BR_OK);
3874 thread->return_error.cmd = BR_ERROR;
3875 binder_enqueue_thread_work(
3876 thread,
3877 &thread->return_error.work);
3878 binder_debug(
3879 BINDER_DEBUG_FAILED_TRANSACTION,
3880 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3881 proc->pid, thread->pid);
3882 break;
3883 }
3884 }
3885 binder_proc_lock(proc);
3886 ref = binder_get_ref_olocked(proc, target, false);
3887 if (ref == NULL) {
3888 binder_user_error("%d:%d %s invalid ref %d\n",
3889 proc->pid, thread->pid,
3890 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3891 "BC_REQUEST_DEATH_NOTIFICATION" :
3892 "BC_CLEAR_DEATH_NOTIFICATION",
3893 target);
3894 binder_proc_unlock(proc);
3895 kfree(death);
3896 break;
3897 }
3898
3899 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3900 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3901 proc->pid, thread->pid,
3902 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3903 "BC_REQUEST_DEATH_NOTIFICATION" :
3904 "BC_CLEAR_DEATH_NOTIFICATION",
3905 (u64)cookie, ref->data.debug_id,
3906 ref->data.desc, ref->data.strong,
3907 ref->data.weak, ref->node->debug_id);
3908
3909 binder_node_lock(ref->node);
3910 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3911 if (ref->death) {
3912 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3913 proc->pid, thread->pid);
3914 binder_node_unlock(ref->node);
3915 binder_proc_unlock(proc);
3916 kfree(death);
3917 break;
3918 }
3919 binder_stats_created(BINDER_STAT_DEATH);
3920 INIT_LIST_HEAD(&death->work.entry);
3921 death->cookie = cookie;
3922 ref->death = death;
3923 if (ref->node->proc == NULL) {
3924 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3925
3926 binder_inner_proc_lock(proc);
3927 binder_enqueue_work_ilocked(
3928 &ref->death->work, &proc->todo);
3929 binder_wakeup_proc_ilocked(proc);
3930 binder_inner_proc_unlock(proc);
3931 }
3932 } else {
3933 if (ref->death == NULL) {
3934 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3935 proc->pid, thread->pid);
3936 binder_node_unlock(ref->node);
3937 binder_proc_unlock(proc);
3938 break;
3939 }
3940 death = ref->death;
3941 if (death->cookie != cookie) {
3942 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3943 proc->pid, thread->pid,
3944 (u64)death->cookie,
3945 (u64)cookie);
3946 binder_node_unlock(ref->node);
3947 binder_proc_unlock(proc);
3948 break;
3949 }
3950 ref->death = NULL;
3951 binder_inner_proc_lock(proc);
3952 if (list_empty(&death->work.entry)) {
3953 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3954 if (thread->looper &
3955 (BINDER_LOOPER_STATE_REGISTERED |
3956 BINDER_LOOPER_STATE_ENTERED))
3957 binder_enqueue_thread_work_ilocked(
3958 thread,
3959 &death->work);
3960 else {
3961 binder_enqueue_work_ilocked(
3962 &death->work,
3963 &proc->todo);
3964 binder_wakeup_proc_ilocked(
3965 proc);
3966 }
3967 } else {
3968 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3969 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3970 }
3971 binder_inner_proc_unlock(proc);
3972 }
3973 binder_node_unlock(ref->node);
3974 binder_proc_unlock(proc);
3975 } break;
3976 case BC_DEAD_BINDER_DONE: {
3977 struct binder_work *w;
3978 binder_uintptr_t cookie;
3979 struct binder_ref_death *death = NULL;
3980
3981 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3982 return -EFAULT;
3983
3984 ptr += sizeof(cookie);
3985 binder_inner_proc_lock(proc);
3986 list_for_each_entry(w, &proc->delivered_death,
3987 entry) {
3988 struct binder_ref_death *tmp_death =
3989 container_of(w,
3990 struct binder_ref_death,
3991 work);
3992
3993 if (tmp_death->cookie == cookie) {
3994 death = tmp_death;
3995 break;
3996 }
3997 }
3998 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3999 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4000 proc->pid, thread->pid, (u64)cookie,
4001 death);
4002 if (death == NULL) {
4003 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4004 proc->pid, thread->pid, (u64)cookie);
4005 binder_inner_proc_unlock(proc);
4006 break;
4007 }
4008 binder_dequeue_work_ilocked(&death->work);
4009 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4010 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4011 if (thread->looper &
4012 (BINDER_LOOPER_STATE_REGISTERED |
4013 BINDER_LOOPER_STATE_ENTERED))
4014 binder_enqueue_thread_work_ilocked(
4015 thread, &death->work);
4016 else {
4017 binder_enqueue_work_ilocked(
4018 &death->work,
4019 &proc->todo);
4020 binder_wakeup_proc_ilocked(proc);
4021 }
4022 }
4023 binder_inner_proc_unlock(proc);
4024 } break;
4025
4026 default:
4027 pr_err("%d:%d unknown command %d\n",
4028 proc->pid, thread->pid, cmd);
4029 return -EINVAL;
4030 }
4031 *consumed = ptr - buffer;
4032 }
4033 return 0;
4034}
4035
4036static void binder_stat_br(struct binder_proc *proc,
4037 struct binder_thread *thread, uint32_t cmd)
4038{
4039 trace_binder_return(cmd);
4040 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4041 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4042 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4043 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4044 }
4045}
4046
4047static int binder_put_node_cmd(struct binder_proc *proc,
4048 struct binder_thread *thread,
4049 void __user **ptrp,
4050 binder_uintptr_t node_ptr,
4051 binder_uintptr_t node_cookie,
4052 int node_debug_id,
4053 uint32_t cmd, const char *cmd_name)
4054{
4055 void __user *ptr = *ptrp;
4056
4057 if (put_user(cmd, (uint32_t __user *)ptr))
4058 return -EFAULT;
4059 ptr += sizeof(uint32_t);
4060
4061 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4062 return -EFAULT;
4063 ptr += sizeof(binder_uintptr_t);
4064
4065 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4066 return -EFAULT;
4067 ptr += sizeof(binder_uintptr_t);
4068
4069 binder_stat_br(proc, thread, cmd);
4070 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4071 proc->pid, thread->pid, cmd_name, node_debug_id,
4072 (u64)node_ptr, (u64)node_cookie);
4073
4074 *ptrp = ptr;
4075 return 0;
4076}
4077
4078static int binder_wait_for_work(struct binder_thread *thread,
4079 bool do_proc_work)
4080{
4081 DEFINE_WAIT(wait);
4082 struct binder_proc *proc = thread->proc;
4083 int ret = 0;
4084
4085 freezer_do_not_count();
4086 binder_inner_proc_lock(proc);
4087 for (;;) {
4088 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4089 if (binder_has_work_ilocked(thread, do_proc_work))
4090 break;
4091 if (do_proc_work)
4092 list_add(&thread->waiting_thread_node,
4093 &proc->waiting_threads);
4094 binder_inner_proc_unlock(proc);
4095 schedule();
4096 binder_inner_proc_lock(proc);
4097 list_del_init(&thread->waiting_thread_node);
4098 if (signal_pending(current)) {
4099 ret = -ERESTARTSYS;
4100 break;
4101 }
4102 }
4103 finish_wait(&thread->wait, &wait);
4104 binder_inner_proc_unlock(proc);
4105 freezer_count();
4106
4107 return ret;
4108}
4109
4110/**
4111 * binder_apply_fd_fixups() - finish fd translation
4112 * @proc: binder_proc associated @t->buffer
4113 * @t: binder transaction with list of fd fixups
4114 *
4115 * Now that we are in the context of the transaction target
4116 * process, we can allocate and install fds. Process the
4117 * list of fds to translate and fixup the buffer with the
4118 * new fds.
4119 *
4120 * If we fail to allocate an fd, then free the resources by
4121 * fput'ing files that have not been processed and ksys_close'ing
4122 * any fds that have already been allocated.
4123 */
4124static int binder_apply_fd_fixups(struct binder_proc *proc,
4125 struct binder_transaction *t)
4126{
4127 struct binder_txn_fd_fixup *fixup, *tmp;
4128 int ret = 0;
4129
4130 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4131 int fd = get_unused_fd_flags(O_CLOEXEC);
4132
4133 if (fd < 0) {
4134 binder_debug(BINDER_DEBUG_TRANSACTION,
4135 "failed fd fixup txn %d fd %d\n",
4136 t->debug_id, fd);
4137 ret = -ENOMEM;
4138 break;
4139 }
4140 binder_debug(BINDER_DEBUG_TRANSACTION,
4141 "fd fixup txn %d fd %d\n",
4142 t->debug_id, fd);
4143 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4144 fd_install(fd, fixup->file);
4145 fixup->file = NULL;
4146 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4147 fixup->offset, &fd,
4148 sizeof(u32))) {
4149 ret = -EINVAL;
4150 break;
4151 }
4152 }
4153 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4154 if (fixup->file) {
4155 fput(fixup->file);
4156 } else if (ret) {
4157 u32 fd;
4158 int err;
4159
4160 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4161 t->buffer,
4162 fixup->offset,
4163 sizeof(fd));
4164 WARN_ON(err);
4165 if (!err)
4166 binder_deferred_fd_close(fd);
4167 }
4168 list_del(&fixup->fixup_entry);
4169 kfree(fixup);
4170 }
4171
4172 return ret;
4173}
4174
4175static int binder_thread_read(struct binder_proc *proc,
4176 struct binder_thread *thread,
4177 binder_uintptr_t binder_buffer, size_t size,
4178 binder_size_t *consumed, int non_block)
4179{
4180 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4181 void __user *ptr = buffer + *consumed;
4182 void __user *end = buffer + size;
4183
4184 int ret = 0;
4185 int wait_for_proc_work;
4186
4187 if (*consumed == 0) {
4188 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4189 return -EFAULT;
4190 ptr += sizeof(uint32_t);
4191 }
4192
4193retry:
4194 binder_inner_proc_lock(proc);
4195 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4196 binder_inner_proc_unlock(proc);
4197
4198 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4199
4200 trace_binder_wait_for_work(wait_for_proc_work,
4201 !!thread->transaction_stack,
4202 !binder_worklist_empty(proc, &thread->todo));
4203 if (wait_for_proc_work) {
4204 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4205 BINDER_LOOPER_STATE_ENTERED))) {
4206 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4207 proc->pid, thread->pid, thread->looper);
4208 wait_event_interruptible(binder_user_error_wait,
4209 binder_stop_on_user_error < 2);
4210 }
4211 binder_set_nice(proc->default_priority);
4212 }
4213
4214 if (non_block) {
4215 if (!binder_has_work(thread, wait_for_proc_work))
4216 ret = -EAGAIN;
4217 } else {
4218 ret = binder_wait_for_work(thread, wait_for_proc_work);
4219 }
4220
4221 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4222
4223 if (ret)
4224 return ret;
4225
4226 while (1) {
4227 uint32_t cmd;
4228 struct binder_transaction_data_secctx tr;
4229 struct binder_transaction_data *trd = &tr.transaction_data;
4230 struct binder_work *w = NULL;
4231 struct list_head *list = NULL;
4232 struct binder_transaction *t = NULL;
4233 struct binder_thread *t_from;
4234 size_t trsize = sizeof(*trd);
4235
4236 binder_inner_proc_lock(proc);
4237 if (!binder_worklist_empty_ilocked(&thread->todo))
4238 list = &thread->todo;
4239 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4240 wait_for_proc_work)
4241 list = &proc->todo;
4242 else {
4243 binder_inner_proc_unlock(proc);
4244
4245 /* no data added */
4246 if (ptr - buffer == 4 && !thread->looper_need_return)
4247 goto retry;
4248 break;
4249 }
4250
4251 if (end - ptr < sizeof(tr) + 4) {
4252 binder_inner_proc_unlock(proc);
4253 break;
4254 }
4255 w = binder_dequeue_work_head_ilocked(list);
4256 if (binder_worklist_empty_ilocked(&thread->todo))
4257 thread->process_todo = false;
4258
4259 switch (w->type) {
4260 case BINDER_WORK_TRANSACTION: {
4261 binder_inner_proc_unlock(proc);
4262 t = container_of(w, struct binder_transaction, work);
4263 } break;
4264 case BINDER_WORK_RETURN_ERROR: {
4265 struct binder_error *e = container_of(
4266 w, struct binder_error, work);
4267
4268 WARN_ON(e->cmd == BR_OK);
4269 binder_inner_proc_unlock(proc);
4270 if (put_user(e->cmd, (uint32_t __user *)ptr))
4271 return -EFAULT;
4272 cmd = e->cmd;
4273 e->cmd = BR_OK;
4274 ptr += sizeof(uint32_t);
4275
4276 binder_stat_br(proc, thread, cmd);
4277 } break;
4278 case BINDER_WORK_TRANSACTION_COMPLETE: {
4279 binder_inner_proc_unlock(proc);
4280 cmd = BR_TRANSACTION_COMPLETE;
4281 kfree(w);
4282 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4283 if (put_user(cmd, (uint32_t __user *)ptr))
4284 return -EFAULT;
4285 ptr += sizeof(uint32_t);
4286
4287 binder_stat_br(proc, thread, cmd);
4288 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4289 "%d:%d BR_TRANSACTION_COMPLETE\n",
4290 proc->pid, thread->pid);
4291 } break;
4292 case BINDER_WORK_NODE: {
4293 struct binder_node *node = container_of(w, struct binder_node, work);
4294 int strong, weak;
4295 binder_uintptr_t node_ptr = node->ptr;
4296 binder_uintptr_t node_cookie = node->cookie;
4297 int node_debug_id = node->debug_id;
4298 int has_weak_ref;
4299 int has_strong_ref;
4300 void __user *orig_ptr = ptr;
4301
4302 BUG_ON(proc != node->proc);
4303 strong = node->internal_strong_refs ||
4304 node->local_strong_refs;
4305 weak = !hlist_empty(&node->refs) ||
4306 node->local_weak_refs ||
4307 node->tmp_refs || strong;
4308 has_strong_ref = node->has_strong_ref;
4309 has_weak_ref = node->has_weak_ref;
4310
4311 if (weak && !has_weak_ref) {
4312 node->has_weak_ref = 1;
4313 node->pending_weak_ref = 1;
4314 node->local_weak_refs++;
4315 }
4316 if (strong && !has_strong_ref) {
4317 node->has_strong_ref = 1;
4318 node->pending_strong_ref = 1;
4319 node->local_strong_refs++;
4320 }
4321 if (!strong && has_strong_ref)
4322 node->has_strong_ref = 0;
4323 if (!weak && has_weak_ref)
4324 node->has_weak_ref = 0;
4325 if (!weak && !strong) {
4326 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4327 "%d:%d node %d u%016llx c%016llx deleted\n",
4328 proc->pid, thread->pid,
4329 node_debug_id,
4330 (u64)node_ptr,
4331 (u64)node_cookie);
4332 rb_erase(&node->rb_node, &proc->nodes);
4333 binder_inner_proc_unlock(proc);
4334 binder_node_lock(node);
4335 /*
4336 * Acquire the node lock before freeing the
4337 * node to serialize with other threads that
4338 * may have been holding the node lock while
4339 * decrementing this node (avoids race where
4340 * this thread frees while the other thread
4341 * is unlocking the node after the final
4342 * decrement)
4343 */
4344 binder_node_unlock(node);
4345 binder_free_node(node);
4346 } else
4347 binder_inner_proc_unlock(proc);
4348
4349 if (weak && !has_weak_ref)
4350 ret = binder_put_node_cmd(
4351 proc, thread, &ptr, node_ptr,
4352 node_cookie, node_debug_id,
4353 BR_INCREFS, "BR_INCREFS");
4354 if (!ret && strong && !has_strong_ref)
4355 ret = binder_put_node_cmd(
4356 proc, thread, &ptr, node_ptr,
4357 node_cookie, node_debug_id,
4358 BR_ACQUIRE, "BR_ACQUIRE");
4359 if (!ret && !strong && has_strong_ref)
4360 ret = binder_put_node_cmd(
4361 proc, thread, &ptr, node_ptr,
4362 node_cookie, node_debug_id,
4363 BR_RELEASE, "BR_RELEASE");
4364 if (!ret && !weak && has_weak_ref)
4365 ret = binder_put_node_cmd(
4366 proc, thread, &ptr, node_ptr,
4367 node_cookie, node_debug_id,
4368 BR_DECREFS, "BR_DECREFS");
4369 if (orig_ptr == ptr)
4370 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4371 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4372 proc->pid, thread->pid,
4373 node_debug_id,
4374 (u64)node_ptr,
4375 (u64)node_cookie);
4376 if (ret)
4377 return ret;
4378 } break;
4379 case BINDER_WORK_DEAD_BINDER:
4380 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4381 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4382 struct binder_ref_death *death;
4383 uint32_t cmd;
4384 binder_uintptr_t cookie;
4385
4386 death = container_of(w, struct binder_ref_death, work);
4387 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4388 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4389 else
4390 cmd = BR_DEAD_BINDER;
4391 cookie = death->cookie;
4392
4393 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4394 "%d:%d %s %016llx\n",
4395 proc->pid, thread->pid,
4396 cmd == BR_DEAD_BINDER ?
4397 "BR_DEAD_BINDER" :
4398 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4399 (u64)cookie);
4400 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4401 binder_inner_proc_unlock(proc);
4402 kfree(death);
4403 binder_stats_deleted(BINDER_STAT_DEATH);
4404 } else {
4405 binder_enqueue_work_ilocked(
4406 w, &proc->delivered_death);
4407 binder_inner_proc_unlock(proc);
4408 }
4409 if (put_user(cmd, (uint32_t __user *)ptr))
4410 return -EFAULT;
4411 ptr += sizeof(uint32_t);
4412 if (put_user(cookie,
4413 (binder_uintptr_t __user *)ptr))
4414 return -EFAULT;
4415 ptr += sizeof(binder_uintptr_t);
4416 binder_stat_br(proc, thread, cmd);
4417 if (cmd == BR_DEAD_BINDER)
4418 goto done; /* DEAD_BINDER notifications can cause transactions */
4419 } break;
4420 default:
4421 binder_inner_proc_unlock(proc);
4422 pr_err("%d:%d: bad work type %d\n",
4423 proc->pid, thread->pid, w->type);
4424 break;
4425 }
4426
4427 if (!t)
4428 continue;
4429
4430 BUG_ON(t->buffer == NULL);
4431 if (t->buffer->target_node) {
4432 struct binder_node *target_node = t->buffer->target_node;
4433
4434 trd->target.ptr = target_node->ptr;
4435 trd->cookie = target_node->cookie;
4436 t->saved_priority = task_nice(current);
4437 if (t->priority < target_node->min_priority &&
4438 !(t->flags & TF_ONE_WAY))
4439 binder_set_nice(t->priority);
4440 else if (!(t->flags & TF_ONE_WAY) ||
4441 t->saved_priority > target_node->min_priority)
4442 binder_set_nice(target_node->min_priority);
4443 cmd = BR_TRANSACTION;
4444 } else {
4445 trd->target.ptr = 0;
4446 trd->cookie = 0;
4447 cmd = BR_REPLY;
4448 }
4449 trd->code = t->code;
4450 trd->flags = t->flags;
4451 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4452
4453 t_from = binder_get_txn_from(t);
4454 if (t_from) {
4455 struct task_struct *sender = t_from->proc->tsk;
4456
4457 trd->sender_pid =
4458 task_tgid_nr_ns(sender,
4459 task_active_pid_ns(current));
4460 } else {
4461 trd->sender_pid = 0;
4462 }
4463
4464 ret = binder_apply_fd_fixups(proc, t);
4465 if (ret) {
4466 struct binder_buffer *buffer = t->buffer;
4467 bool oneway = !!(t->flags & TF_ONE_WAY);
4468 int tid = t->debug_id;
4469
4470 if (t_from)
4471 binder_thread_dec_tmpref(t_from);
4472 buffer->transaction = NULL;
4473 binder_cleanup_transaction(t, "fd fixups failed",
4474 BR_FAILED_REPLY);
4475 binder_free_buf(proc, buffer);
4476 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4477 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4478 proc->pid, thread->pid,
4479 oneway ? "async " :
4480 (cmd == BR_REPLY ? "reply " : ""),
4481 tid, BR_FAILED_REPLY, ret, __LINE__);
4482 if (cmd == BR_REPLY) {
4483 cmd = BR_FAILED_REPLY;
4484 if (put_user(cmd, (uint32_t __user *)ptr))
4485 return -EFAULT;
4486 ptr += sizeof(uint32_t);
4487 binder_stat_br(proc, thread, cmd);
4488 break;
4489 }
4490 continue;
4491 }
4492 trd->data_size = t->buffer->data_size;
4493 trd->offsets_size = t->buffer->offsets_size;
4494 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4495 trd->data.ptr.offsets = trd->data.ptr.buffer +
4496 ALIGN(t->buffer->data_size,
4497 sizeof(void *));
4498
4499 tr.secctx = t->security_ctx;
4500 if (t->security_ctx) {
4501 cmd = BR_TRANSACTION_SEC_CTX;
4502 trsize = sizeof(tr);
4503 }
4504 if (put_user(cmd, (uint32_t __user *)ptr)) {
4505 if (t_from)
4506 binder_thread_dec_tmpref(t_from);
4507
4508 binder_cleanup_transaction(t, "put_user failed",
4509 BR_FAILED_REPLY);
4510
4511 return -EFAULT;
4512 }
4513 ptr += sizeof(uint32_t);
4514 if (copy_to_user(ptr, &tr, trsize)) {
4515 if (t_from)
4516 binder_thread_dec_tmpref(t_from);
4517
4518 binder_cleanup_transaction(t, "copy_to_user failed",
4519 BR_FAILED_REPLY);
4520
4521 return -EFAULT;
4522 }
4523 ptr += trsize;
4524
4525 trace_binder_transaction_received(t);
4526 binder_stat_br(proc, thread, cmd);
4527 binder_debug(BINDER_DEBUG_TRANSACTION,
4528 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4529 proc->pid, thread->pid,
4530 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4531 (cmd == BR_TRANSACTION_SEC_CTX) ?
4532 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4533 t->debug_id, t_from ? t_from->proc->pid : 0,
4534 t_from ? t_from->pid : 0, cmd,
4535 t->buffer->data_size, t->buffer->offsets_size,
4536 (u64)trd->data.ptr.buffer,
4537 (u64)trd->data.ptr.offsets);
4538
4539 if (t_from)
4540 binder_thread_dec_tmpref(t_from);
4541 t->buffer->allow_user_free = 1;
4542 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4543 binder_inner_proc_lock(thread->proc);
4544 t->to_parent = thread->transaction_stack;
4545 t->to_thread = thread;
4546 thread->transaction_stack = t;
4547 binder_inner_proc_unlock(thread->proc);
4548 } else {
4549 binder_free_transaction(t);
4550 }
4551 break;
4552 }
4553
4554done:
4555
4556 *consumed = ptr - buffer;
4557 binder_inner_proc_lock(proc);
4558 if (proc->requested_threads == 0 &&
4559 list_empty(&thread->proc->waiting_threads) &&
4560 proc->requested_threads_started < proc->max_threads &&
4561 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4562 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4563 /*spawn a new thread if we leave this out */) {
4564 proc->requested_threads++;
4565 binder_inner_proc_unlock(proc);
4566 binder_debug(BINDER_DEBUG_THREADS,
4567 "%d:%d BR_SPAWN_LOOPER\n",
4568 proc->pid, thread->pid);
4569 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4570 return -EFAULT;
4571 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4572 } else
4573 binder_inner_proc_unlock(proc);
4574 return 0;
4575}
4576
4577static void binder_release_work(struct binder_proc *proc,
4578 struct list_head *list)
4579{
4580 struct binder_work *w;
4581
4582 while (1) {
4583 w = binder_dequeue_work_head(proc, list);
4584 if (!w)
4585 return;
4586
4587 switch (w->type) {
4588 case BINDER_WORK_TRANSACTION: {
4589 struct binder_transaction *t;
4590
4591 t = container_of(w, struct binder_transaction, work);
4592
4593 binder_cleanup_transaction(t, "process died.",
4594 BR_DEAD_REPLY);
4595 } break;
4596 case BINDER_WORK_RETURN_ERROR: {
4597 struct binder_error *e = container_of(
4598 w, struct binder_error, work);
4599
4600 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4601 "undelivered TRANSACTION_ERROR: %u\n",
4602 e->cmd);
4603 } break;
4604 case BINDER_WORK_TRANSACTION_COMPLETE: {
4605 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4606 "undelivered TRANSACTION_COMPLETE\n");
4607 kfree(w);
4608 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4609 } break;
4610 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4611 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4612 struct binder_ref_death *death;
4613
4614 death = container_of(w, struct binder_ref_death, work);
4615 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4616 "undelivered death notification, %016llx\n",
4617 (u64)death->cookie);
4618 kfree(death);
4619 binder_stats_deleted(BINDER_STAT_DEATH);
4620 } break;
4621 default:
4622 pr_err("unexpected work type, %d, not freed\n",
4623 w->type);
4624 break;
4625 }
4626 }
4627
4628}
4629
4630static struct binder_thread *binder_get_thread_ilocked(
4631 struct binder_proc *proc, struct binder_thread *new_thread)
4632{
4633 struct binder_thread *thread = NULL;
4634 struct rb_node *parent = NULL;
4635 struct rb_node **p = &proc->threads.rb_node;
4636
4637 while (*p) {
4638 parent = *p;
4639 thread = rb_entry(parent, struct binder_thread, rb_node);
4640
4641 if (current->pid < thread->pid)
4642 p = &(*p)->rb_left;
4643 else if (current->pid > thread->pid)
4644 p = &(*p)->rb_right;
4645 else
4646 return thread;
4647 }
4648 if (!new_thread)
4649 return NULL;
4650 thread = new_thread;
4651 binder_stats_created(BINDER_STAT_THREAD);
4652 thread->proc = proc;
4653 thread->pid = current->pid;
4654 atomic_set(&thread->tmp_ref, 0);
4655 init_waitqueue_head(&thread->wait);
4656 INIT_LIST_HEAD(&thread->todo);
4657 rb_link_node(&thread->rb_node, parent, p);
4658 rb_insert_color(&thread->rb_node, &proc->threads);
4659 thread->looper_need_return = true;
4660 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4661 thread->return_error.cmd = BR_OK;
4662 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4663 thread->reply_error.cmd = BR_OK;
4664 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4665 return thread;
4666}
4667
4668static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4669{
4670 struct binder_thread *thread;
4671 struct binder_thread *new_thread;
4672
4673 binder_inner_proc_lock(proc);
4674 thread = binder_get_thread_ilocked(proc, NULL);
4675 binder_inner_proc_unlock(proc);
4676 if (!thread) {
4677 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4678 if (new_thread == NULL)
4679 return NULL;
4680 binder_inner_proc_lock(proc);
4681 thread = binder_get_thread_ilocked(proc, new_thread);
4682 binder_inner_proc_unlock(proc);
4683 if (thread != new_thread)
4684 kfree(new_thread);
4685 }
4686 return thread;
4687}
4688
4689static void binder_free_proc(struct binder_proc *proc)
4690{
4691 BUG_ON(!list_empty(&proc->todo));
4692 BUG_ON(!list_empty(&proc->delivered_death));
4693 binder_alloc_deferred_release(&proc->alloc);
4694 put_task_struct(proc->tsk);
4695 binder_stats_deleted(BINDER_STAT_PROC);
4696 kfree(proc);
4697}
4698
4699static void binder_free_thread(struct binder_thread *thread)
4700{
4701 BUG_ON(!list_empty(&thread->todo));
4702 binder_stats_deleted(BINDER_STAT_THREAD);
4703 binder_proc_dec_tmpref(thread->proc);
4704 kfree(thread);
4705}
4706
4707static int binder_thread_release(struct binder_proc *proc,
4708 struct binder_thread *thread)
4709{
4710 struct binder_transaction *t;
4711 struct binder_transaction *send_reply = NULL;
4712 int active_transactions = 0;
4713 struct binder_transaction *last_t = NULL;
4714
4715 binder_inner_proc_lock(thread->proc);
4716 /*
4717 * take a ref on the proc so it survives
4718 * after we remove this thread from proc->threads.
4719 * The corresponding dec is when we actually
4720 * free the thread in binder_free_thread()
4721 */
4722 proc->tmp_ref++;
4723 /*
4724 * take a ref on this thread to ensure it
4725 * survives while we are releasing it
4726 */
4727 atomic_inc(&thread->tmp_ref);
4728 rb_erase(&thread->rb_node, &proc->threads);
4729 t = thread->transaction_stack;
4730 if (t) {
4731 spin_lock(&t->lock);
4732 if (t->to_thread == thread)
4733 send_reply = t;
4734 } else {
4735 __acquire(&t->lock);
4736 }
4737 thread->is_dead = true;
4738
4739 while (t) {
4740 last_t = t;
4741 active_transactions++;
4742 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4743 "release %d:%d transaction %d %s, still active\n",
4744 proc->pid, thread->pid,
4745 t->debug_id,
4746 (t->to_thread == thread) ? "in" : "out");
4747
4748 if (t->to_thread == thread) {
4749 t->to_proc = NULL;
4750 t->to_thread = NULL;
4751 if (t->buffer) {
4752 t->buffer->transaction = NULL;
4753 t->buffer = NULL;
4754 }
4755 t = t->to_parent;
4756 } else if (t->from == thread) {
4757 t->from = NULL;
4758 t = t->from_parent;
4759 } else
4760 BUG();
4761 spin_unlock(&last_t->lock);
4762 if (t)
4763 spin_lock(&t->lock);
4764 else
4765 __acquire(&t->lock);
4766 }
4767 /* annotation for sparse, lock not acquired in last iteration above */
4768 __release(&t->lock);
4769
4770 /*
4771 * If this thread used poll, make sure we remove the waitqueue
4772 * from any epoll data structures holding it with POLLFREE.
4773 * waitqueue_active() is safe to use here because we're holding
4774 * the inner lock.
4775 */
4776 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4777 waitqueue_active(&thread->wait)) {
4778 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4779 }
4780
4781 binder_inner_proc_unlock(thread->proc);
4782
4783 /*
4784 * This is needed to avoid races between wake_up_poll() above and
4785 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4786 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4787 * lock, so we can be sure it's done after calling synchronize_rcu().
4788 */
4789 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4790 synchronize_rcu();
4791
4792 if (send_reply)
4793 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4794 binder_release_work(proc, &thread->todo);
4795 binder_thread_dec_tmpref(thread);
4796 return active_transactions;
4797}
4798
4799static __poll_t binder_poll(struct file *filp,
4800 struct poll_table_struct *wait)
4801{
4802 struct binder_proc *proc = filp->private_data;
4803 struct binder_thread *thread = NULL;
4804 bool wait_for_proc_work;
4805
4806 thread = binder_get_thread(proc);
4807 if (!thread)
4808 return POLLERR;
4809
4810 binder_inner_proc_lock(thread->proc);
4811 thread->looper |= BINDER_LOOPER_STATE_POLL;
4812 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4813
4814 binder_inner_proc_unlock(thread->proc);
4815
4816 poll_wait(filp, &thread->wait, wait);
4817
4818 if (binder_has_work(thread, wait_for_proc_work))
4819 return EPOLLIN;
4820
4821 return 0;
4822}
4823
4824static int binder_ioctl_write_read(struct file *filp,
4825 unsigned int cmd, unsigned long arg,
4826 struct binder_thread *thread)
4827{
4828 int ret = 0;
4829 struct binder_proc *proc = filp->private_data;
4830 unsigned int size = _IOC_SIZE(cmd);
4831 void __user *ubuf = (void __user *)arg;
4832 struct binder_write_read bwr;
4833
4834 if (size != sizeof(struct binder_write_read)) {
4835 ret = -EINVAL;
4836 goto out;
4837 }
4838 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4839 ret = -EFAULT;
4840 goto out;
4841 }
4842 binder_debug(BINDER_DEBUG_READ_WRITE,
4843 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4844 proc->pid, thread->pid,
4845 (u64)bwr.write_size, (u64)bwr.write_buffer,
4846 (u64)bwr.read_size, (u64)bwr.read_buffer);
4847
4848 if (bwr.write_size > 0) {
4849 ret = binder_thread_write(proc, thread,
4850 bwr.write_buffer,
4851 bwr.write_size,
4852 &bwr.write_consumed);
4853 trace_binder_write_done(ret);
4854 if (ret < 0) {
4855 bwr.read_consumed = 0;
4856 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4857 ret = -EFAULT;
4858 goto out;
4859 }
4860 }
4861 if (bwr.read_size > 0) {
4862 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4863 bwr.read_size,
4864 &bwr.read_consumed,
4865 filp->f_flags & O_NONBLOCK);
4866 trace_binder_read_done(ret);
4867 binder_inner_proc_lock(proc);
4868 if (!binder_worklist_empty_ilocked(&proc->todo))
4869 binder_wakeup_proc_ilocked(proc);
4870 binder_inner_proc_unlock(proc);
4871 if (ret < 0) {
4872 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4873 ret = -EFAULT;
4874 goto out;
4875 }
4876 }
4877 binder_debug(BINDER_DEBUG_READ_WRITE,
4878 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4879 proc->pid, thread->pid,
4880 (u64)bwr.write_consumed, (u64)bwr.write_size,
4881 (u64)bwr.read_consumed, (u64)bwr.read_size);
4882 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4883 ret = -EFAULT;
4884 goto out;
4885 }
4886out:
4887 return ret;
4888}
4889
4890static int binder_ioctl_set_ctx_mgr(struct file *filp,
4891 struct flat_binder_object *fbo)
4892{
4893 int ret = 0;
4894 struct binder_proc *proc = filp->private_data;
4895 struct binder_context *context = proc->context;
4896 struct binder_node *new_node;
4897 kuid_t curr_euid = current_euid();
4898
4899 mutex_lock(&context->context_mgr_node_lock);
4900 if (context->binder_context_mgr_node) {
4901 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4902 ret = -EBUSY;
4903 goto out;
4904 }
4905 ret = security_binder_set_context_mgr(proc->tsk);
4906 if (ret < 0)
4907 goto out;
4908 if (uid_valid(context->binder_context_mgr_uid)) {
4909 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4910 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4911 from_kuid(&init_user_ns, curr_euid),
4912 from_kuid(&init_user_ns,
4913 context->binder_context_mgr_uid));
4914 ret = -EPERM;
4915 goto out;
4916 }
4917 } else {
4918 context->binder_context_mgr_uid = curr_euid;
4919 }
4920 new_node = binder_new_node(proc, fbo);
4921 if (!new_node) {
4922 ret = -ENOMEM;
4923 goto out;
4924 }
4925 binder_node_lock(new_node);
4926 new_node->local_weak_refs++;
4927 new_node->local_strong_refs++;
4928 new_node->has_strong_ref = 1;
4929 new_node->has_weak_ref = 1;
4930 context->binder_context_mgr_node = new_node;
4931 binder_node_unlock(new_node);
4932 binder_put_node(new_node);
4933out:
4934 mutex_unlock(&context->context_mgr_node_lock);
4935 return ret;
4936}
4937
4938static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4939 struct binder_node_info_for_ref *info)
4940{
4941 struct binder_node *node;
4942 struct binder_context *context = proc->context;
4943 __u32 handle = info->handle;
4944
4945 if (info->strong_count || info->weak_count || info->reserved1 ||
4946 info->reserved2 || info->reserved3) {
4947 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4948 proc->pid);
4949 return -EINVAL;
4950 }
4951
4952 /* This ioctl may only be used by the context manager */
4953 mutex_lock(&context->context_mgr_node_lock);
4954 if (!context->binder_context_mgr_node ||
4955 context->binder_context_mgr_node->proc != proc) {
4956 mutex_unlock(&context->context_mgr_node_lock);
4957 return -EPERM;
4958 }
4959 mutex_unlock(&context->context_mgr_node_lock);
4960
4961 node = binder_get_node_from_ref(proc, handle, true, NULL);
4962 if (!node)
4963 return -EINVAL;
4964
4965 info->strong_count = node->local_strong_refs +
4966 node->internal_strong_refs;
4967 info->weak_count = node->local_weak_refs;
4968
4969 binder_put_node(node);
4970
4971 return 0;
4972}
4973
4974static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4975 struct binder_node_debug_info *info)
4976{
4977 struct rb_node *n;
4978 binder_uintptr_t ptr = info->ptr;
4979
4980 memset(info, 0, sizeof(*info));
4981
4982 binder_inner_proc_lock(proc);
4983 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4984 struct binder_node *node = rb_entry(n, struct binder_node,
4985 rb_node);
4986 if (node->ptr > ptr) {
4987 info->ptr = node->ptr;
4988 info->cookie = node->cookie;
4989 info->has_strong_ref = node->has_strong_ref;
4990 info->has_weak_ref = node->has_weak_ref;
4991 break;
4992 }
4993 }
4994 binder_inner_proc_unlock(proc);
4995
4996 return 0;
4997}
4998
4999static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5000{
5001 int ret;
5002 struct binder_proc *proc = filp->private_data;
5003 struct binder_thread *thread;
5004 unsigned int size = _IOC_SIZE(cmd);
5005 void __user *ubuf = (void __user *)arg;
5006
5007 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5008 proc->pid, current->pid, cmd, arg);*/
5009
5010 binder_selftest_alloc(&proc->alloc);
5011
5012 trace_binder_ioctl(cmd, arg);
5013
5014 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5015 if (ret)
5016 goto err_unlocked;
5017
5018 thread = binder_get_thread(proc);
5019 if (thread == NULL) {
5020 ret = -ENOMEM;
5021 goto err;
5022 }
5023
5024 switch (cmd) {
5025 case BINDER_WRITE_READ:
5026 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5027 if (ret)
5028 goto err;
5029 break;
5030 case BINDER_SET_MAX_THREADS: {
5031 int max_threads;
5032
5033 if (copy_from_user(&max_threads, ubuf,
5034 sizeof(max_threads))) {
5035 ret = -EINVAL;
5036 goto err;
5037 }
5038 binder_inner_proc_lock(proc);
5039 proc->max_threads = max_threads;
5040 binder_inner_proc_unlock(proc);
5041 break;
5042 }
5043 case BINDER_SET_CONTEXT_MGR_EXT: {
5044 struct flat_binder_object fbo;
5045
5046 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5047 ret = -EINVAL;
5048 goto err;
5049 }
5050 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5051 if (ret)
5052 goto err;
5053 break;
5054 }
5055 case BINDER_SET_CONTEXT_MGR:
5056 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5057 if (ret)
5058 goto err;
5059 break;
5060 case BINDER_THREAD_EXIT:
5061 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5062 proc->pid, thread->pid);
5063 binder_thread_release(proc, thread);
5064 thread = NULL;
5065 break;
5066 case BINDER_VERSION: {
5067 struct binder_version __user *ver = ubuf;
5068
5069 if (size != sizeof(struct binder_version)) {
5070 ret = -EINVAL;
5071 goto err;
5072 }
5073 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5074 &ver->protocol_version)) {
5075 ret = -EINVAL;
5076 goto err;
5077 }
5078 break;
5079 }
5080 case BINDER_GET_NODE_INFO_FOR_REF: {
5081 struct binder_node_info_for_ref info;
5082
5083 if (copy_from_user(&info, ubuf, sizeof(info))) {
5084 ret = -EFAULT;
5085 goto err;
5086 }
5087
5088 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5089 if (ret < 0)
5090 goto err;
5091
5092 if (copy_to_user(ubuf, &info, sizeof(info))) {
5093 ret = -EFAULT;
5094 goto err;
5095 }
5096
5097 break;
5098 }
5099 case BINDER_GET_NODE_DEBUG_INFO: {
5100 struct binder_node_debug_info info;
5101
5102 if (copy_from_user(&info, ubuf, sizeof(info))) {
5103 ret = -EFAULT;
5104 goto err;
5105 }
5106
5107 ret = binder_ioctl_get_node_debug_info(proc, &info);
5108 if (ret < 0)
5109 goto err;
5110
5111 if (copy_to_user(ubuf, &info, sizeof(info))) {
5112 ret = -EFAULT;
5113 goto err;
5114 }
5115 break;
5116 }
5117 default:
5118 ret = -EINVAL;
5119 goto err;
5120 }
5121 ret = 0;
5122err:
5123 if (thread)
5124 thread->looper_need_return = false;
5125 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5126 if (ret && ret != -ERESTARTSYS)
5127 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5128err_unlocked:
5129 trace_binder_ioctl_done(ret);
5130 return ret;
5131}
5132
5133static void binder_vma_open(struct vm_area_struct *vma)
5134{
5135 struct binder_proc *proc = vma->vm_private_data;
5136
5137 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5138 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5139 proc->pid, vma->vm_start, vma->vm_end,
5140 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5141 (unsigned long)pgprot_val(vma->vm_page_prot));
5142}
5143
5144static void binder_vma_close(struct vm_area_struct *vma)
5145{
5146 struct binder_proc *proc = vma->vm_private_data;
5147
5148 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5149 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5150 proc->pid, vma->vm_start, vma->vm_end,
5151 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5152 (unsigned long)pgprot_val(vma->vm_page_prot));
5153 binder_alloc_vma_close(&proc->alloc);
5154}
5155
5156static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5157{
5158 return VM_FAULT_SIGBUS;
5159}
5160
5161static const struct vm_operations_struct binder_vm_ops = {
5162 .open = binder_vma_open,
5163 .close = binder_vma_close,
5164 .fault = binder_vm_fault,
5165};
5166
5167static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5168{
5169 int ret;
5170 struct binder_proc *proc = filp->private_data;
5171 const char *failure_string;
5172
5173 if (proc->tsk != current->group_leader)
5174 return -EINVAL;
5175
5176 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5177 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5178 __func__, proc->pid, vma->vm_start, vma->vm_end,
5179 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5180 (unsigned long)pgprot_val(vma->vm_page_prot));
5181
5182 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5183 ret = -EPERM;
5184 failure_string = "bad vm_flags";
5185 goto err_bad_arg;
5186 }
5187 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5188 vma->vm_flags &= ~VM_MAYWRITE;
5189
5190 vma->vm_ops = &binder_vm_ops;
5191 vma->vm_private_data = proc;
5192
5193 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5194 if (ret)
5195 return ret;
5196 return 0;
5197
5198err_bad_arg:
5199 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5200 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5201 return ret;
5202}
5203
5204static int binder_open(struct inode *nodp, struct file *filp)
5205{
5206 struct binder_proc *proc;
5207 struct binder_device *binder_dev;
5208 struct binderfs_info *info;
5209 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5210
5211 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5212 current->group_leader->pid, current->pid);
5213
5214 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5215 if (proc == NULL)
5216 return -ENOMEM;
5217 spin_lock_init(&proc->inner_lock);
5218 spin_lock_init(&proc->outer_lock);
5219 get_task_struct(current->group_leader);
5220 proc->tsk = current->group_leader;
5221 INIT_LIST_HEAD(&proc->todo);
5222 proc->default_priority = task_nice(current);
5223 /* binderfs stashes devices in i_private */
5224 if (is_binderfs_device(nodp)) {
5225 binder_dev = nodp->i_private;
5226 info = nodp->i_sb->s_fs_info;
5227 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5228 } else {
5229 binder_dev = container_of(filp->private_data,
5230 struct binder_device, miscdev);
5231 }
5232 proc->context = &binder_dev->context;
5233 binder_alloc_init(&proc->alloc);
5234
5235 binder_stats_created(BINDER_STAT_PROC);
5236 proc->pid = current->group_leader->pid;
5237 INIT_LIST_HEAD(&proc->delivered_death);
5238 INIT_LIST_HEAD(&proc->waiting_threads);
5239 filp->private_data = proc;
5240
5241 mutex_lock(&binder_procs_lock);
5242 hlist_add_head(&proc->proc_node, &binder_procs);
5243 mutex_unlock(&binder_procs_lock);
5244
5245 if (binder_debugfs_dir_entry_proc) {
5246 char strbuf[11];
5247
5248 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5249 /*
5250 * proc debug entries are shared between contexts, so
5251 * this will fail if the process tries to open the driver
5252 * again with a different context. The priting code will
5253 * anyway print all contexts that a given PID has, so this
5254 * is not a problem.
5255 */
5256 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5257 binder_debugfs_dir_entry_proc,
5258 (void *)(unsigned long)proc->pid,
5259 &proc_fops);
5260 }
5261
5262 if (binder_binderfs_dir_entry_proc) {
5263 char strbuf[11];
5264 struct dentry *binderfs_entry;
5265
5266 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5267 /*
5268 * Similar to debugfs, the process specific log file is shared
5269 * between contexts. If the file has already been created for a
5270 * process, the following binderfs_create_file() call will
5271 * fail with error code EEXIST if another context of the same
5272 * process invoked binder_open(). This is ok since same as
5273 * debugfs, the log file will contain information on all
5274 * contexts of a given PID.
5275 */
5276 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5277 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5278 if (!IS_ERR(binderfs_entry)) {
5279 proc->binderfs_entry = binderfs_entry;
5280 } else {
5281 int error;
5282
5283 error = PTR_ERR(binderfs_entry);
5284 if (error != -EEXIST) {
5285 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5286 strbuf, error);
5287 }
5288 }
5289 }
5290
5291 return 0;
5292}
5293
5294static int binder_flush(struct file *filp, fl_owner_t id)
5295{
5296 struct binder_proc *proc = filp->private_data;
5297
5298 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5299
5300 return 0;
5301}
5302
5303static void binder_deferred_flush(struct binder_proc *proc)
5304{
5305 struct rb_node *n;
5306 int wake_count = 0;
5307
5308 binder_inner_proc_lock(proc);
5309 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5310 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5311
5312 thread->looper_need_return = true;
5313 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5314 wake_up_interruptible(&thread->wait);
5315 wake_count++;
5316 }
5317 }
5318 binder_inner_proc_unlock(proc);
5319
5320 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5321 "binder_flush: %d woke %d threads\n", proc->pid,
5322 wake_count);
5323}
5324
5325static int binder_release(struct inode *nodp, struct file *filp)
5326{
5327 struct binder_proc *proc = filp->private_data;
5328
5329 debugfs_remove(proc->debugfs_entry);
5330
5331 if (proc->binderfs_entry) {
5332 binderfs_remove_file(proc->binderfs_entry);
5333 proc->binderfs_entry = NULL;
5334 }
5335
5336 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5337
5338 return 0;
5339}
5340
5341static int binder_node_release(struct binder_node *node, int refs)
5342{
5343 struct binder_ref *ref;
5344 int death = 0;
5345 struct binder_proc *proc = node->proc;
5346
5347 binder_release_work(proc, &node->async_todo);
5348
5349 binder_node_lock(node);
5350 binder_inner_proc_lock(proc);
5351 binder_dequeue_work_ilocked(&node->work);
5352 /*
5353 * The caller must have taken a temporary ref on the node,
5354 */
5355 BUG_ON(!node->tmp_refs);
5356 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5357 binder_inner_proc_unlock(proc);
5358 binder_node_unlock(node);
5359 binder_free_node(node);
5360
5361 return refs;
5362 }
5363
5364 node->proc = NULL;
5365 node->local_strong_refs = 0;
5366 node->local_weak_refs = 0;
5367 binder_inner_proc_unlock(proc);
5368
5369 spin_lock(&binder_dead_nodes_lock);
5370 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5371 spin_unlock(&binder_dead_nodes_lock);
5372
5373 hlist_for_each_entry(ref, &node->refs, node_entry) {
5374 refs++;
5375 /*
5376 * Need the node lock to synchronize
5377 * with new notification requests and the
5378 * inner lock to synchronize with queued
5379 * death notifications.
5380 */
5381 binder_inner_proc_lock(ref->proc);
5382 if (!ref->death) {
5383 binder_inner_proc_unlock(ref->proc);
5384 continue;
5385 }
5386
5387 death++;
5388
5389 BUG_ON(!list_empty(&ref->death->work.entry));
5390 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5391 binder_enqueue_work_ilocked(&ref->death->work,
5392 &ref->proc->todo);
5393 binder_wakeup_proc_ilocked(ref->proc);
5394 binder_inner_proc_unlock(ref->proc);
5395 }
5396
5397 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5398 "node %d now dead, refs %d, death %d\n",
5399 node->debug_id, refs, death);
5400 binder_node_unlock(node);
5401 binder_put_node(node);
5402
5403 return refs;
5404}
5405
5406static void binder_deferred_release(struct binder_proc *proc)
5407{
5408 struct binder_context *context = proc->context;
5409 struct rb_node *n;
5410 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5411
5412 mutex_lock(&binder_procs_lock);
5413 hlist_del(&proc->proc_node);
5414 mutex_unlock(&binder_procs_lock);
5415
5416 mutex_lock(&context->context_mgr_node_lock);
5417 if (context->binder_context_mgr_node &&
5418 context->binder_context_mgr_node->proc == proc) {
5419 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5420 "%s: %d context_mgr_node gone\n",
5421 __func__, proc->pid);
5422 context->binder_context_mgr_node = NULL;
5423 }
5424 mutex_unlock(&context->context_mgr_node_lock);
5425 binder_inner_proc_lock(proc);
5426 /*
5427 * Make sure proc stays alive after we
5428 * remove all the threads
5429 */
5430 proc->tmp_ref++;
5431
5432 proc->is_dead = true;
5433 threads = 0;
5434 active_transactions = 0;
5435 while ((n = rb_first(&proc->threads))) {
5436 struct binder_thread *thread;
5437
5438 thread = rb_entry(n, struct binder_thread, rb_node);
5439 binder_inner_proc_unlock(proc);
5440 threads++;
5441 active_transactions += binder_thread_release(proc, thread);
5442 binder_inner_proc_lock(proc);
5443 }
5444
5445 nodes = 0;
5446 incoming_refs = 0;
5447 while ((n = rb_first(&proc->nodes))) {
5448 struct binder_node *node;
5449
5450 node = rb_entry(n, struct binder_node, rb_node);
5451 nodes++;
5452 /*
5453 * take a temporary ref on the node before
5454 * calling binder_node_release() which will either
5455 * kfree() the node or call binder_put_node()
5456 */
5457 binder_inc_node_tmpref_ilocked(node);
5458 rb_erase(&node->rb_node, &proc->nodes);
5459 binder_inner_proc_unlock(proc);
5460 incoming_refs = binder_node_release(node, incoming_refs);
5461 binder_inner_proc_lock(proc);
5462 }
5463 binder_inner_proc_unlock(proc);
5464
5465 outgoing_refs = 0;
5466 binder_proc_lock(proc);
5467 while ((n = rb_first(&proc->refs_by_desc))) {
5468 struct binder_ref *ref;
5469
5470 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5471 outgoing_refs++;
5472 binder_cleanup_ref_olocked(ref);
5473 binder_proc_unlock(proc);
5474 binder_free_ref(ref);
5475 binder_proc_lock(proc);
5476 }
5477 binder_proc_unlock(proc);
5478
5479 binder_release_work(proc, &proc->todo);
5480 binder_release_work(proc, &proc->delivered_death);
5481
5482 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5483 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5484 __func__, proc->pid, threads, nodes, incoming_refs,
5485 outgoing_refs, active_transactions);
5486
5487 binder_proc_dec_tmpref(proc);
5488}
5489
5490static void binder_deferred_func(struct work_struct *work)
5491{
5492 struct binder_proc *proc;
5493
5494 int defer;
5495
5496 do {
5497 mutex_lock(&binder_deferred_lock);
5498 if (!hlist_empty(&binder_deferred_list)) {
5499 proc = hlist_entry(binder_deferred_list.first,
5500 struct binder_proc, deferred_work_node);
5501 hlist_del_init(&proc->deferred_work_node);
5502 defer = proc->deferred_work;
5503 proc->deferred_work = 0;
5504 } else {
5505 proc = NULL;
5506 defer = 0;
5507 }
5508 mutex_unlock(&binder_deferred_lock);
5509
5510 if (defer & BINDER_DEFERRED_FLUSH)
5511 binder_deferred_flush(proc);
5512
5513 if (defer & BINDER_DEFERRED_RELEASE)
5514 binder_deferred_release(proc); /* frees proc */
5515 } while (proc);
5516}
5517static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5518
5519static void
5520binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5521{
5522 mutex_lock(&binder_deferred_lock);
5523 proc->deferred_work |= defer;
5524 if (hlist_unhashed(&proc->deferred_work_node)) {
5525 hlist_add_head(&proc->deferred_work_node,
5526 &binder_deferred_list);
5527 schedule_work(&binder_deferred_work);
5528 }
5529 mutex_unlock(&binder_deferred_lock);
5530}
5531
5532static void print_binder_transaction_ilocked(struct seq_file *m,
5533 struct binder_proc *proc,
5534 const char *prefix,
5535 struct binder_transaction *t)
5536{
5537 struct binder_proc *to_proc;
5538 struct binder_buffer *buffer = t->buffer;
5539
5540 spin_lock(&t->lock);
5541 to_proc = t->to_proc;
5542 seq_printf(m,
5543 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5544 prefix, t->debug_id, t,
5545 t->from ? t->from->proc->pid : 0,
5546 t->from ? t->from->pid : 0,
5547 to_proc ? to_proc->pid : 0,
5548 t->to_thread ? t->to_thread->pid : 0,
5549 t->code, t->flags, t->priority, t->need_reply);
5550 spin_unlock(&t->lock);
5551
5552 if (proc != to_proc) {
5553 /*
5554 * Can only safely deref buffer if we are holding the
5555 * correct proc inner lock for this node
5556 */
5557 seq_puts(m, "\n");
5558 return;
5559 }
5560
5561 if (buffer == NULL) {
5562 seq_puts(m, " buffer free\n");
5563 return;
5564 }
5565 if (buffer->target_node)
5566 seq_printf(m, " node %d", buffer->target_node->debug_id);
5567 seq_printf(m, " size %zd:%zd data %pK\n",
5568 buffer->data_size, buffer->offsets_size,
5569 buffer->user_data);
5570}
5571
5572static void print_binder_work_ilocked(struct seq_file *m,
5573 struct binder_proc *proc,
5574 const char *prefix,
5575 const char *transaction_prefix,
5576 struct binder_work *w)
5577{
5578 struct binder_node *node;
5579 struct binder_transaction *t;
5580
5581 switch (w->type) {
5582 case BINDER_WORK_TRANSACTION:
5583 t = container_of(w, struct binder_transaction, work);
5584 print_binder_transaction_ilocked(
5585 m, proc, transaction_prefix, t);
5586 break;
5587 case BINDER_WORK_RETURN_ERROR: {
5588 struct binder_error *e = container_of(
5589 w, struct binder_error, work);
5590
5591 seq_printf(m, "%stransaction error: %u\n",
5592 prefix, e->cmd);
5593 } break;
5594 case BINDER_WORK_TRANSACTION_COMPLETE:
5595 seq_printf(m, "%stransaction complete\n", prefix);
5596 break;
5597 case BINDER_WORK_NODE:
5598 node = container_of(w, struct binder_node, work);
5599 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5600 prefix, node->debug_id,
5601 (u64)node->ptr, (u64)node->cookie);
5602 break;
5603 case BINDER_WORK_DEAD_BINDER:
5604 seq_printf(m, "%shas dead binder\n", prefix);
5605 break;
5606 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5607 seq_printf(m, "%shas cleared dead binder\n", prefix);
5608 break;
5609 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5610 seq_printf(m, "%shas cleared death notification\n", prefix);
5611 break;
5612 default:
5613 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5614 break;
5615 }
5616}
5617
5618static void print_binder_thread_ilocked(struct seq_file *m,
5619 struct binder_thread *thread,
5620 int print_always)
5621{
5622 struct binder_transaction *t;
5623 struct binder_work *w;
5624 size_t start_pos = m->count;
5625 size_t header_pos;
5626
5627 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5628 thread->pid, thread->looper,
5629 thread->looper_need_return,
5630 atomic_read(&thread->tmp_ref));
5631 header_pos = m->count;
5632 t = thread->transaction_stack;
5633 while (t) {
5634 if (t->from == thread) {
5635 print_binder_transaction_ilocked(m, thread->proc,
5636 " outgoing transaction", t);
5637 t = t->from_parent;
5638 } else if (t->to_thread == thread) {
5639 print_binder_transaction_ilocked(m, thread->proc,
5640 " incoming transaction", t);
5641 t = t->to_parent;
5642 } else {
5643 print_binder_transaction_ilocked(m, thread->proc,
5644 " bad transaction", t);
5645 t = NULL;
5646 }
5647 }
5648 list_for_each_entry(w, &thread->todo, entry) {
5649 print_binder_work_ilocked(m, thread->proc, " ",
5650 " pending transaction", w);
5651 }
5652 if (!print_always && m->count == header_pos)
5653 m->count = start_pos;
5654}
5655
5656static void print_binder_node_nilocked(struct seq_file *m,
5657 struct binder_node *node)
5658{
5659 struct binder_ref *ref;
5660 struct binder_work *w;
5661 int count;
5662
5663 count = 0;
5664 hlist_for_each_entry(ref, &node->refs, node_entry)
5665 count++;
5666
5667 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5668 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5669 node->has_strong_ref, node->has_weak_ref,
5670 node->local_strong_refs, node->local_weak_refs,
5671 node->internal_strong_refs, count, node->tmp_refs);
5672 if (count) {
5673 seq_puts(m, " proc");
5674 hlist_for_each_entry(ref, &node->refs, node_entry)
5675 seq_printf(m, " %d", ref->proc->pid);
5676 }
5677 seq_puts(m, "\n");
5678 if (node->proc) {
5679 list_for_each_entry(w, &node->async_todo, entry)
5680 print_binder_work_ilocked(m, node->proc, " ",
5681 " pending async transaction", w);
5682 }
5683}
5684
5685static void print_binder_ref_olocked(struct seq_file *m,
5686 struct binder_ref *ref)
5687{
5688 binder_node_lock(ref->node);
5689 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5690 ref->data.debug_id, ref->data.desc,
5691 ref->node->proc ? "" : "dead ",
5692 ref->node->debug_id, ref->data.strong,
5693 ref->data.weak, ref->death);
5694 binder_node_unlock(ref->node);
5695}
5696
5697static void print_binder_proc(struct seq_file *m,
5698 struct binder_proc *proc, int print_all)
5699{
5700 struct binder_work *w;
5701 struct rb_node *n;
5702 size_t start_pos = m->count;
5703 size_t header_pos;
5704 struct binder_node *last_node = NULL;
5705
5706 seq_printf(m, "proc %d\n", proc->pid);
5707 seq_printf(m, "context %s\n", proc->context->name);
5708 header_pos = m->count;
5709
5710 binder_inner_proc_lock(proc);
5711 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5712 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5713 rb_node), print_all);
5714
5715 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5716 struct binder_node *node = rb_entry(n, struct binder_node,
5717 rb_node);
5718 if (!print_all && !node->has_async_transaction)
5719 continue;
5720
5721 /*
5722 * take a temporary reference on the node so it
5723 * survives and isn't removed from the tree
5724 * while we print it.
5725 */
5726 binder_inc_node_tmpref_ilocked(node);
5727 /* Need to drop inner lock to take node lock */
5728 binder_inner_proc_unlock(proc);
5729 if (last_node)
5730 binder_put_node(last_node);
5731 binder_node_inner_lock(node);
5732 print_binder_node_nilocked(m, node);
5733 binder_node_inner_unlock(node);
5734 last_node = node;
5735 binder_inner_proc_lock(proc);
5736 }
5737 binder_inner_proc_unlock(proc);
5738 if (last_node)
5739 binder_put_node(last_node);
5740
5741 if (print_all) {
5742 binder_proc_lock(proc);
5743 for (n = rb_first(&proc->refs_by_desc);
5744 n != NULL;
5745 n = rb_next(n))
5746 print_binder_ref_olocked(m, rb_entry(n,
5747 struct binder_ref,
5748 rb_node_desc));
5749 binder_proc_unlock(proc);
5750 }
5751 binder_alloc_print_allocated(m, &proc->alloc);
5752 binder_inner_proc_lock(proc);
5753 list_for_each_entry(w, &proc->todo, entry)
5754 print_binder_work_ilocked(m, proc, " ",
5755 " pending transaction", w);
5756 list_for_each_entry(w, &proc->delivered_death, entry) {
5757 seq_puts(m, " has delivered dead binder\n");
5758 break;
5759 }
5760 binder_inner_proc_unlock(proc);
5761 if (!print_all && m->count == header_pos)
5762 m->count = start_pos;
5763}
5764
5765static const char * const binder_return_strings[] = {
5766 "BR_ERROR",
5767 "BR_OK",
5768 "BR_TRANSACTION",
5769 "BR_REPLY",
5770 "BR_ACQUIRE_RESULT",
5771 "BR_DEAD_REPLY",
5772 "BR_TRANSACTION_COMPLETE",
5773 "BR_INCREFS",
5774 "BR_ACQUIRE",
5775 "BR_RELEASE",
5776 "BR_DECREFS",
5777 "BR_ATTEMPT_ACQUIRE",
5778 "BR_NOOP",
5779 "BR_SPAWN_LOOPER",
5780 "BR_FINISHED",
5781 "BR_DEAD_BINDER",
5782 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5783 "BR_FAILED_REPLY"
5784};
5785
5786static const char * const binder_command_strings[] = {
5787 "BC_TRANSACTION",
5788 "BC_REPLY",
5789 "BC_ACQUIRE_RESULT",
5790 "BC_FREE_BUFFER",
5791 "BC_INCREFS",
5792 "BC_ACQUIRE",
5793 "BC_RELEASE",
5794 "BC_DECREFS",
5795 "BC_INCREFS_DONE",
5796 "BC_ACQUIRE_DONE",
5797 "BC_ATTEMPT_ACQUIRE",
5798 "BC_REGISTER_LOOPER",
5799 "BC_ENTER_LOOPER",
5800 "BC_EXIT_LOOPER",
5801 "BC_REQUEST_DEATH_NOTIFICATION",
5802 "BC_CLEAR_DEATH_NOTIFICATION",
5803 "BC_DEAD_BINDER_DONE",
5804 "BC_TRANSACTION_SG",
5805 "BC_REPLY_SG",
5806};
5807
5808static const char * const binder_objstat_strings[] = {
5809 "proc",
5810 "thread",
5811 "node",
5812 "ref",
5813 "death",
5814 "transaction",
5815 "transaction_complete"
5816};
5817
5818static void print_binder_stats(struct seq_file *m, const char *prefix,
5819 struct binder_stats *stats)
5820{
5821 int i;
5822
5823 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5824 ARRAY_SIZE(binder_command_strings));
5825 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5826 int temp = atomic_read(&stats->bc[i]);
5827
5828 if (temp)
5829 seq_printf(m, "%s%s: %d\n", prefix,
5830 binder_command_strings[i], temp);
5831 }
5832
5833 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5834 ARRAY_SIZE(binder_return_strings));
5835 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5836 int temp = atomic_read(&stats->br[i]);
5837
5838 if (temp)
5839 seq_printf(m, "%s%s: %d\n", prefix,
5840 binder_return_strings[i], temp);
5841 }
5842
5843 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5844 ARRAY_SIZE(binder_objstat_strings));
5845 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5846 ARRAY_SIZE(stats->obj_deleted));
5847 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5848 int created = atomic_read(&stats->obj_created[i]);
5849 int deleted = atomic_read(&stats->obj_deleted[i]);
5850
5851 if (created || deleted)
5852 seq_printf(m, "%s%s: active %d total %d\n",
5853 prefix,
5854 binder_objstat_strings[i],
5855 created - deleted,
5856 created);
5857 }
5858}
5859
5860static void print_binder_proc_stats(struct seq_file *m,
5861 struct binder_proc *proc)
5862{
5863 struct binder_work *w;
5864 struct binder_thread *thread;
5865 struct rb_node *n;
5866 int count, strong, weak, ready_threads;
5867 size_t free_async_space =
5868 binder_alloc_get_free_async_space(&proc->alloc);
5869
5870 seq_printf(m, "proc %d\n", proc->pid);
5871 seq_printf(m, "context %s\n", proc->context->name);
5872 count = 0;
5873 ready_threads = 0;
5874 binder_inner_proc_lock(proc);
5875 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5876 count++;
5877
5878 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5879 ready_threads++;
5880
5881 seq_printf(m, " threads: %d\n", count);
5882 seq_printf(m, " requested threads: %d+%d/%d\n"
5883 " ready threads %d\n"
5884 " free async space %zd\n", proc->requested_threads,
5885 proc->requested_threads_started, proc->max_threads,
5886 ready_threads,
5887 free_async_space);
5888 count = 0;
5889 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5890 count++;
5891 binder_inner_proc_unlock(proc);
5892 seq_printf(m, " nodes: %d\n", count);
5893 count = 0;
5894 strong = 0;
5895 weak = 0;
5896 binder_proc_lock(proc);
5897 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5898 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5899 rb_node_desc);
5900 count++;
5901 strong += ref->data.strong;
5902 weak += ref->data.weak;
5903 }
5904 binder_proc_unlock(proc);
5905 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5906
5907 count = binder_alloc_get_allocated_count(&proc->alloc);
5908 seq_printf(m, " buffers: %d\n", count);
5909
5910 binder_alloc_print_pages(m, &proc->alloc);
5911
5912 count = 0;
5913 binder_inner_proc_lock(proc);
5914 list_for_each_entry(w, &proc->todo, entry) {
5915 if (w->type == BINDER_WORK_TRANSACTION)
5916 count++;
5917 }
5918 binder_inner_proc_unlock(proc);
5919 seq_printf(m, " pending transactions: %d\n", count);
5920
5921 print_binder_stats(m, " ", &proc->stats);
5922}
5923
5924
5925int binder_state_show(struct seq_file *m, void *unused)
5926{
5927 struct binder_proc *proc;
5928 struct binder_node *node;
5929 struct binder_node *last_node = NULL;
5930
5931 seq_puts(m, "binder state:\n");
5932
5933 spin_lock(&binder_dead_nodes_lock);
5934 if (!hlist_empty(&binder_dead_nodes))
5935 seq_puts(m, "dead nodes:\n");
5936 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5937 /*
5938 * take a temporary reference on the node so it
5939 * survives and isn't removed from the list
5940 * while we print it.
5941 */
5942 node->tmp_refs++;
5943 spin_unlock(&binder_dead_nodes_lock);
5944 if (last_node)
5945 binder_put_node(last_node);
5946 binder_node_lock(node);
5947 print_binder_node_nilocked(m, node);
5948 binder_node_unlock(node);
5949 last_node = node;
5950 spin_lock(&binder_dead_nodes_lock);
5951 }
5952 spin_unlock(&binder_dead_nodes_lock);
5953 if (last_node)
5954 binder_put_node(last_node);
5955
5956 mutex_lock(&binder_procs_lock);
5957 hlist_for_each_entry(proc, &binder_procs, proc_node)
5958 print_binder_proc(m, proc, 1);
5959 mutex_unlock(&binder_procs_lock);
5960
5961 return 0;
5962}
5963
5964int binder_stats_show(struct seq_file *m, void *unused)
5965{
5966 struct binder_proc *proc;
5967
5968 seq_puts(m, "binder stats:\n");
5969
5970 print_binder_stats(m, "", &binder_stats);
5971
5972 mutex_lock(&binder_procs_lock);
5973 hlist_for_each_entry(proc, &binder_procs, proc_node)
5974 print_binder_proc_stats(m, proc);
5975 mutex_unlock(&binder_procs_lock);
5976
5977 return 0;
5978}
5979
5980int binder_transactions_show(struct seq_file *m, void *unused)
5981{
5982 struct binder_proc *proc;
5983
5984 seq_puts(m, "binder transactions:\n");
5985 mutex_lock(&binder_procs_lock);
5986 hlist_for_each_entry(proc, &binder_procs, proc_node)
5987 print_binder_proc(m, proc, 0);
5988 mutex_unlock(&binder_procs_lock);
5989
5990 return 0;
5991}
5992
5993static int proc_show(struct seq_file *m, void *unused)
5994{
5995 struct binder_proc *itr;
5996 int pid = (unsigned long)m->private;
5997
5998 mutex_lock(&binder_procs_lock);
5999 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6000 if (itr->pid == pid) {
6001 seq_puts(m, "binder proc state:\n");
6002 print_binder_proc(m, itr, 1);
6003 }
6004 }
6005 mutex_unlock(&binder_procs_lock);
6006
6007 return 0;
6008}
6009
6010static void print_binder_transaction_log_entry(struct seq_file *m,
6011 struct binder_transaction_log_entry *e)
6012{
6013 int debug_id = READ_ONCE(e->debug_id_done);
6014 /*
6015 * read barrier to guarantee debug_id_done read before
6016 * we print the log values
6017 */
6018 smp_rmb();
6019 seq_printf(m,
6020 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6021 e->debug_id, (e->call_type == 2) ? "reply" :
6022 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6023 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6024 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6025 e->return_error, e->return_error_param,
6026 e->return_error_line);
6027 /*
6028 * read-barrier to guarantee read of debug_id_done after
6029 * done printing the fields of the entry
6030 */
6031 smp_rmb();
6032 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6033 "\n" : " (incomplete)\n");
6034}
6035
6036int binder_transaction_log_show(struct seq_file *m, void *unused)
6037{
6038 struct binder_transaction_log *log = m->private;
6039 unsigned int log_cur = atomic_read(&log->cur);
6040 unsigned int count;
6041 unsigned int cur;
6042 int i;
6043
6044 count = log_cur + 1;
6045 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6046 0 : count % ARRAY_SIZE(log->entry);
6047 if (count > ARRAY_SIZE(log->entry) || log->full)
6048 count = ARRAY_SIZE(log->entry);
6049 for (i = 0; i < count; i++) {
6050 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6051
6052 print_binder_transaction_log_entry(m, &log->entry[index]);
6053 }
6054 return 0;
6055}
6056
6057const struct file_operations binder_fops = {
6058 .owner = THIS_MODULE,
6059 .poll = binder_poll,
6060 .unlocked_ioctl = binder_ioctl,
6061 .compat_ioctl = binder_ioctl,
6062 .mmap = binder_mmap,
6063 .open = binder_open,
6064 .flush = binder_flush,
6065 .release = binder_release,
6066};
6067
6068static int __init init_binder_device(const char *name)
6069{
6070 int ret;
6071 struct binder_device *binder_device;
6072
6073 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6074 if (!binder_device)
6075 return -ENOMEM;
6076
6077 binder_device->miscdev.fops = &binder_fops;
6078 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6079 binder_device->miscdev.name = name;
6080
6081 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6082 binder_device->context.name = name;
6083 mutex_init(&binder_device->context.context_mgr_node_lock);
6084
6085 ret = misc_register(&binder_device->miscdev);
6086 if (ret < 0) {
6087 kfree(binder_device);
6088 return ret;
6089 }
6090
6091 hlist_add_head(&binder_device->hlist, &binder_devices);
6092
6093 return ret;
6094}
6095
6096static int __init binder_init(void)
6097{
6098 int ret;
6099 char *device_name, *device_tmp;
6100 struct binder_device *device;
6101 struct hlist_node *tmp;
6102 char *device_names = NULL;
6103
6104 ret = binder_alloc_shrinker_init();
6105 if (ret)
6106 return ret;
6107
6108 atomic_set(&binder_transaction_log.cur, ~0U);
6109 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6110
6111 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6112 if (binder_debugfs_dir_entry_root)
6113 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6114 binder_debugfs_dir_entry_root);
6115
6116 if (binder_debugfs_dir_entry_root) {
6117 debugfs_create_file("state",
6118 0444,
6119 binder_debugfs_dir_entry_root,
6120 NULL,
6121 &binder_state_fops);
6122 debugfs_create_file("stats",
6123 0444,
6124 binder_debugfs_dir_entry_root,
6125 NULL,
6126 &binder_stats_fops);
6127 debugfs_create_file("transactions",
6128 0444,
6129 binder_debugfs_dir_entry_root,
6130 NULL,
6131 &binder_transactions_fops);
6132 debugfs_create_file("transaction_log",
6133 0444,
6134 binder_debugfs_dir_entry_root,
6135 &binder_transaction_log,
6136 &binder_transaction_log_fops);
6137 debugfs_create_file("failed_transaction_log",
6138 0444,
6139 binder_debugfs_dir_entry_root,
6140 &binder_transaction_log_failed,
6141 &binder_transaction_log_fops);
6142 }
6143
6144 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6145 strcmp(binder_devices_param, "") != 0) {
6146 /*
6147 * Copy the module_parameter string, because we don't want to
6148 * tokenize it in-place.
6149 */
6150 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6151 if (!device_names) {
6152 ret = -ENOMEM;
6153 goto err_alloc_device_names_failed;
6154 }
6155
6156 device_tmp = device_names;
6157 while ((device_name = strsep(&device_tmp, ","))) {
6158 ret = init_binder_device(device_name);
6159 if (ret)
6160 goto err_init_binder_device_failed;
6161 }
6162 }
6163
6164 ret = init_binderfs();
6165 if (ret)
6166 goto err_init_binder_device_failed;
6167
6168 return ret;
6169
6170err_init_binder_device_failed:
6171 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6172 misc_deregister(&device->miscdev);
6173 hlist_del(&device->hlist);
6174 kfree(device);
6175 }
6176
6177 kfree(device_names);
6178
6179err_alloc_device_names_failed:
6180 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6181
6182 return ret;
6183}
6184
6185device_initcall(binder_init);
6186
6187#define CREATE_TRACE_POINTS
6188#include "binder_trace.h"
6189
6190MODULE_LICENSE("GPL v2");