Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* binder.c
   3 *
   4 * Android IPC Subsystem
   5 *
   6 * Copyright (C) 2007-2008 Google, Inc.
   7 */
   8
   9/*
  10 * Locking overview
  11 *
  12 * There are 3 main spinlocks which must be acquired in the
  13 * order shown:
  14 *
  15 * 1) proc->outer_lock : protects binder_ref
  16 *    binder_proc_lock() and binder_proc_unlock() are
  17 *    used to acq/rel.
  18 * 2) node->lock : protects most fields of binder_node.
  19 *    binder_node_lock() and binder_node_unlock() are
  20 *    used to acq/rel
  21 * 3) proc->inner_lock : protects the thread and node lists
  22 *    (proc->threads, proc->waiting_threads, proc->nodes)
  23 *    and all todo lists associated with the binder_proc
  24 *    (proc->todo, thread->todo, proc->delivered_death and
  25 *    node->async_todo), as well as thread->transaction_stack
  26 *    binder_inner_proc_lock() and binder_inner_proc_unlock()
  27 *    are used to acq/rel
  28 *
  29 * Any lock under procA must never be nested under any lock at the same
  30 * level or below on procB.
  31 *
  32 * Functions that require a lock held on entry indicate which lock
  33 * in the suffix of the function name:
  34 *
  35 * foo_olocked() : requires node->outer_lock
  36 * foo_nlocked() : requires node->lock
  37 * foo_ilocked() : requires proc->inner_lock
  38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
  39 * foo_nilocked(): requires node->lock and proc->inner_lock
  40 * ...
  41 */
  42
  43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  44
  45#include <linux/fdtable.h>
  46#include <linux/file.h>
  47#include <linux/freezer.h>
  48#include <linux/fs.h>
  49#include <linux/list.h>
  50#include <linux/miscdevice.h>
  51#include <linux/module.h>
  52#include <linux/mutex.h>
  53#include <linux/nsproxy.h>
  54#include <linux/poll.h>
  55#include <linux/debugfs.h>
  56#include <linux/rbtree.h>
  57#include <linux/sched/signal.h>
  58#include <linux/sched/mm.h>
  59#include <linux/seq_file.h>
  60#include <linux/string.h>
  61#include <linux/uaccess.h>
  62#include <linux/pid_namespace.h>
  63#include <linux/security.h>
  64#include <linux/spinlock.h>
  65#include <linux/ratelimit.h>
  66#include <linux/syscalls.h>
  67#include <linux/task_work.h>
  68#include <linux/sizes.h>
  69
  70#include <uapi/linux/android/binder.h>
  71#include <uapi/linux/android/binderfs.h>
  72
  73#include <asm/cacheflush.h>
  74
  75#include "binder_alloc.h"
  76#include "binder_internal.h"
  77#include "binder_trace.h"
  78
  79static HLIST_HEAD(binder_deferred_list);
  80static DEFINE_MUTEX(binder_deferred_lock);
  81
  82static HLIST_HEAD(binder_devices);
  83static HLIST_HEAD(binder_procs);
  84static DEFINE_MUTEX(binder_procs_lock);
  85
  86static HLIST_HEAD(binder_dead_nodes);
  87static DEFINE_SPINLOCK(binder_dead_nodes_lock);
  88
  89static struct dentry *binder_debugfs_dir_entry_root;
  90static struct dentry *binder_debugfs_dir_entry_proc;
  91static atomic_t binder_last_id;
  92
  93static int proc_show(struct seq_file *m, void *unused);
  94DEFINE_SHOW_ATTRIBUTE(proc);
  95
  96#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
  97
  98enum {
  99	BINDER_DEBUG_USER_ERROR             = 1U << 0,
 100	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
 101	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
 102	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
 103	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
 104	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
 105	BINDER_DEBUG_READ_WRITE             = 1U << 6,
 106	BINDER_DEBUG_USER_REFS              = 1U << 7,
 107	BINDER_DEBUG_THREADS                = 1U << 8,
 108	BINDER_DEBUG_TRANSACTION            = 1U << 9,
 109	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
 110	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
 111	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
 112	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
 113	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
 114};
 115static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
 116	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
 117module_param_named(debug_mask, binder_debug_mask, uint, 0644);
 118
 119char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
 120module_param_named(devices, binder_devices_param, charp, 0444);
 121
 122static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
 123static int binder_stop_on_user_error;
 124
 125static int binder_set_stop_on_user_error(const char *val,
 126					 const struct kernel_param *kp)
 127{
 128	int ret;
 129
 130	ret = param_set_int(val, kp);
 131	if (binder_stop_on_user_error < 2)
 132		wake_up(&binder_user_error_wait);
 133	return ret;
 134}
 135module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
 136	param_get_int, &binder_stop_on_user_error, 0644);
 137
 138#define binder_debug(mask, x...) \
 139	do { \
 140		if (binder_debug_mask & mask) \
 141			pr_info_ratelimited(x); \
 142	} while (0)
 143
 144#define binder_user_error(x...) \
 145	do { \
 146		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
 147			pr_info_ratelimited(x); \
 148		if (binder_stop_on_user_error) \
 149			binder_stop_on_user_error = 2; \
 150	} while (0)
 151
 152#define to_flat_binder_object(hdr) \
 153	container_of(hdr, struct flat_binder_object, hdr)
 154
 155#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
 156
 157#define to_binder_buffer_object(hdr) \
 158	container_of(hdr, struct binder_buffer_object, hdr)
 159
 160#define to_binder_fd_array_object(hdr) \
 161	container_of(hdr, struct binder_fd_array_object, hdr)
 162
 163enum binder_stat_types {
 164	BINDER_STAT_PROC,
 165	BINDER_STAT_THREAD,
 166	BINDER_STAT_NODE,
 167	BINDER_STAT_REF,
 168	BINDER_STAT_DEATH,
 169	BINDER_STAT_TRANSACTION,
 170	BINDER_STAT_TRANSACTION_COMPLETE,
 171	BINDER_STAT_COUNT
 172};
 173
 174struct binder_stats {
 175	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
 176	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
 177	atomic_t obj_created[BINDER_STAT_COUNT];
 178	atomic_t obj_deleted[BINDER_STAT_COUNT];
 179};
 180
 181static struct binder_stats binder_stats;
 182
 183static inline void binder_stats_deleted(enum binder_stat_types type)
 184{
 185	atomic_inc(&binder_stats.obj_deleted[type]);
 186}
 187
 188static inline void binder_stats_created(enum binder_stat_types type)
 189{
 190	atomic_inc(&binder_stats.obj_created[type]);
 191}
 192
 193struct binder_transaction_log binder_transaction_log;
 194struct binder_transaction_log binder_transaction_log_failed;
 195
 196static struct binder_transaction_log_entry *binder_transaction_log_add(
 197	struct binder_transaction_log *log)
 198{
 199	struct binder_transaction_log_entry *e;
 200	unsigned int cur = atomic_inc_return(&log->cur);
 201
 202	if (cur >= ARRAY_SIZE(log->entry))
 203		log->full = true;
 204	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
 205	WRITE_ONCE(e->debug_id_done, 0);
 206	/*
 207	 * write-barrier to synchronize access to e->debug_id_done.
 208	 * We make sure the initialized 0 value is seen before
 209	 * memset() other fields are zeroed by memset.
 210	 */
 211	smp_wmb();
 212	memset(e, 0, sizeof(*e));
 213	return e;
 214}
 215
 216/**
 217 * struct binder_work - work enqueued on a worklist
 218 * @entry:             node enqueued on list
 219 * @type:              type of work to be performed
 220 *
 221 * There are separate work lists for proc, thread, and node (async).
 222 */
 223struct binder_work {
 224	struct list_head entry;
 225
 226	enum {
 227		BINDER_WORK_TRANSACTION = 1,
 228		BINDER_WORK_TRANSACTION_COMPLETE,
 229		BINDER_WORK_RETURN_ERROR,
 230		BINDER_WORK_NODE,
 231		BINDER_WORK_DEAD_BINDER,
 232		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
 233		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
 234	} type;
 235};
 236
 237struct binder_error {
 238	struct binder_work work;
 239	uint32_t cmd;
 240};
 241
 242/**
 243 * struct binder_node - binder node bookkeeping
 244 * @debug_id:             unique ID for debugging
 245 *                        (invariant after initialized)
 246 * @lock:                 lock for node fields
 247 * @work:                 worklist element for node work
 248 *                        (protected by @proc->inner_lock)
 249 * @rb_node:              element for proc->nodes tree
 250 *                        (protected by @proc->inner_lock)
 251 * @dead_node:            element for binder_dead_nodes list
 252 *                        (protected by binder_dead_nodes_lock)
 253 * @proc:                 binder_proc that owns this node
 254 *                        (invariant after initialized)
 255 * @refs:                 list of references on this node
 256 *                        (protected by @lock)
 257 * @internal_strong_refs: used to take strong references when
 258 *                        initiating a transaction
 259 *                        (protected by @proc->inner_lock if @proc
 260 *                        and by @lock)
 261 * @local_weak_refs:      weak user refs from local process
 262 *                        (protected by @proc->inner_lock if @proc
 263 *                        and by @lock)
 264 * @local_strong_refs:    strong user refs from local process
 265 *                        (protected by @proc->inner_lock if @proc
 266 *                        and by @lock)
 267 * @tmp_refs:             temporary kernel refs
 268 *                        (protected by @proc->inner_lock while @proc
 269 *                        is valid, and by binder_dead_nodes_lock
 270 *                        if @proc is NULL. During inc/dec and node release
 271 *                        it is also protected by @lock to provide safety
 272 *                        as the node dies and @proc becomes NULL)
 273 * @ptr:                  userspace pointer for node
 274 *                        (invariant, no lock needed)
 275 * @cookie:               userspace cookie for node
 276 *                        (invariant, no lock needed)
 277 * @has_strong_ref:       userspace notified of strong ref
 278 *                        (protected by @proc->inner_lock if @proc
 279 *                        and by @lock)
 280 * @pending_strong_ref:   userspace has acked notification of strong ref
 281 *                        (protected by @proc->inner_lock if @proc
 282 *                        and by @lock)
 283 * @has_weak_ref:         userspace notified of weak ref
 284 *                        (protected by @proc->inner_lock if @proc
 285 *                        and by @lock)
 286 * @pending_weak_ref:     userspace has acked notification of weak ref
 287 *                        (protected by @proc->inner_lock if @proc
 288 *                        and by @lock)
 289 * @has_async_transaction: async transaction to node in progress
 290 *                        (protected by @lock)
 291 * @accept_fds:           file descriptor operations supported for node
 292 *                        (invariant after initialized)
 293 * @min_priority:         minimum scheduling priority
 294 *                        (invariant after initialized)
 295 * @txn_security_ctx:     require sender's security context
 296 *                        (invariant after initialized)
 297 * @async_todo:           list of async work items
 298 *                        (protected by @proc->inner_lock)
 299 *
 300 * Bookkeeping structure for binder nodes.
 301 */
 302struct binder_node {
 303	int debug_id;
 304	spinlock_t lock;
 305	struct binder_work work;
 306	union {
 307		struct rb_node rb_node;
 308		struct hlist_node dead_node;
 309	};
 310	struct binder_proc *proc;
 311	struct hlist_head refs;
 312	int internal_strong_refs;
 313	int local_weak_refs;
 314	int local_strong_refs;
 315	int tmp_refs;
 316	binder_uintptr_t ptr;
 317	binder_uintptr_t cookie;
 318	struct {
 319		/*
 320		 * bitfield elements protected by
 321		 * proc inner_lock
 322		 */
 323		u8 has_strong_ref:1;
 324		u8 pending_strong_ref:1;
 325		u8 has_weak_ref:1;
 326		u8 pending_weak_ref:1;
 327	};
 328	struct {
 329		/*
 330		 * invariant after initialization
 331		 */
 332		u8 accept_fds:1;
 333		u8 txn_security_ctx:1;
 334		u8 min_priority;
 335	};
 336	bool has_async_transaction;
 337	struct list_head async_todo;
 338};
 339
 340struct binder_ref_death {
 341	/**
 342	 * @work: worklist element for death notifications
 343	 *        (protected by inner_lock of the proc that
 344	 *        this ref belongs to)
 345	 */
 346	struct binder_work work;
 347	binder_uintptr_t cookie;
 348};
 349
 350/**
 351 * struct binder_ref_data - binder_ref counts and id
 352 * @debug_id:        unique ID for the ref
 353 * @desc:            unique userspace handle for ref
 354 * @strong:          strong ref count (debugging only if not locked)
 355 * @weak:            weak ref count (debugging only if not locked)
 356 *
 357 * Structure to hold ref count and ref id information. Since
 358 * the actual ref can only be accessed with a lock, this structure
 359 * is used to return information about the ref to callers of
 360 * ref inc/dec functions.
 361 */
 362struct binder_ref_data {
 363	int debug_id;
 364	uint32_t desc;
 365	int strong;
 366	int weak;
 367};
 368
 369/**
 370 * struct binder_ref - struct to track references on nodes
 371 * @data:        binder_ref_data containing id, handle, and current refcounts
 372 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
 373 * @rb_node_node: node for lookup by @node in proc's rb_tree
 374 * @node_entry:  list entry for node->refs list in target node
 375 *               (protected by @node->lock)
 376 * @proc:        binder_proc containing ref
 377 * @node:        binder_node of target node. When cleaning up a
 378 *               ref for deletion in binder_cleanup_ref, a non-NULL
 379 *               @node indicates the node must be freed
 380 * @death:       pointer to death notification (ref_death) if requested
 381 *               (protected by @node->lock)
 382 *
 383 * Structure to track references from procA to target node (on procB). This
 384 * structure is unsafe to access without holding @proc->outer_lock.
 385 */
 386struct binder_ref {
 387	/* Lookups needed: */
 388	/*   node + proc => ref (transaction) */
 389	/*   desc + proc => ref (transaction, inc/dec ref) */
 390	/*   node => refs + procs (proc exit) */
 391	struct binder_ref_data data;
 392	struct rb_node rb_node_desc;
 393	struct rb_node rb_node_node;
 394	struct hlist_node node_entry;
 395	struct binder_proc *proc;
 396	struct binder_node *node;
 397	struct binder_ref_death *death;
 398};
 399
 400enum binder_deferred_state {
 401	BINDER_DEFERRED_FLUSH        = 0x01,
 402	BINDER_DEFERRED_RELEASE      = 0x02,
 403};
 404
 405/**
 406 * struct binder_proc - binder process bookkeeping
 407 * @proc_node:            element for binder_procs list
 408 * @threads:              rbtree of binder_threads in this proc
 409 *                        (protected by @inner_lock)
 410 * @nodes:                rbtree of binder nodes associated with
 411 *                        this proc ordered by node->ptr
 412 *                        (protected by @inner_lock)
 413 * @refs_by_desc:         rbtree of refs ordered by ref->desc
 414 *                        (protected by @outer_lock)
 415 * @refs_by_node:         rbtree of refs ordered by ref->node
 416 *                        (protected by @outer_lock)
 417 * @waiting_threads:      threads currently waiting for proc work
 418 *                        (protected by @inner_lock)
 419 * @pid                   PID of group_leader of process
 420 *                        (invariant after initialized)
 421 * @tsk                   task_struct for group_leader of process
 422 *                        (invariant after initialized)
 423 * @deferred_work_node:   element for binder_deferred_list
 424 *                        (protected by binder_deferred_lock)
 425 * @deferred_work:        bitmap of deferred work to perform
 426 *                        (protected by binder_deferred_lock)
 427 * @is_dead:              process is dead and awaiting free
 428 *                        when outstanding transactions are cleaned up
 429 *                        (protected by @inner_lock)
 430 * @todo:                 list of work for this process
 431 *                        (protected by @inner_lock)
 432 * @stats:                per-process binder statistics
 433 *                        (atomics, no lock needed)
 434 * @delivered_death:      list of delivered death notification
 435 *                        (protected by @inner_lock)
 436 * @max_threads:          cap on number of binder threads
 437 *                        (protected by @inner_lock)
 438 * @requested_threads:    number of binder threads requested but not
 439 *                        yet started. In current implementation, can
 440 *                        only be 0 or 1.
 441 *                        (protected by @inner_lock)
 442 * @requested_threads_started: number binder threads started
 443 *                        (protected by @inner_lock)
 444 * @tmp_ref:              temporary reference to indicate proc is in use
 445 *                        (protected by @inner_lock)
 446 * @default_priority:     default scheduler priority
 447 *                        (invariant after initialized)
 448 * @debugfs_entry:        debugfs node
 449 * @alloc:                binder allocator bookkeeping
 450 * @context:              binder_context for this proc
 451 *                        (invariant after initialized)
 452 * @inner_lock:           can nest under outer_lock and/or node lock
 453 * @outer_lock:           no nesting under innor or node lock
 454 *                        Lock order: 1) outer, 2) node, 3) inner
 455 * @binderfs_entry:       process-specific binderfs log file
 456 *
 457 * Bookkeeping structure for binder processes
 458 */
 459struct binder_proc {
 460	struct hlist_node proc_node;
 461	struct rb_root threads;
 462	struct rb_root nodes;
 463	struct rb_root refs_by_desc;
 464	struct rb_root refs_by_node;
 465	struct list_head waiting_threads;
 466	int pid;
 467	struct task_struct *tsk;
 468	struct hlist_node deferred_work_node;
 469	int deferred_work;
 470	bool is_dead;
 471
 472	struct list_head todo;
 473	struct binder_stats stats;
 474	struct list_head delivered_death;
 475	int max_threads;
 476	int requested_threads;
 477	int requested_threads_started;
 478	int tmp_ref;
 479	long default_priority;
 480	struct dentry *debugfs_entry;
 481	struct binder_alloc alloc;
 482	struct binder_context *context;
 483	spinlock_t inner_lock;
 484	spinlock_t outer_lock;
 485	struct dentry *binderfs_entry;
 486};
 487
 488enum {
 489	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
 490	BINDER_LOOPER_STATE_ENTERED     = 0x02,
 491	BINDER_LOOPER_STATE_EXITED      = 0x04,
 492	BINDER_LOOPER_STATE_INVALID     = 0x08,
 493	BINDER_LOOPER_STATE_WAITING     = 0x10,
 494	BINDER_LOOPER_STATE_POLL        = 0x20,
 495};
 496
 497/**
 498 * struct binder_thread - binder thread bookkeeping
 499 * @proc:                 binder process for this thread
 500 *                        (invariant after initialization)
 501 * @rb_node:              element for proc->threads rbtree
 502 *                        (protected by @proc->inner_lock)
 503 * @waiting_thread_node:  element for @proc->waiting_threads list
 504 *                        (protected by @proc->inner_lock)
 505 * @pid:                  PID for this thread
 506 *                        (invariant after initialization)
 507 * @looper:               bitmap of looping state
 508 *                        (only accessed by this thread)
 509 * @looper_needs_return:  looping thread needs to exit driver
 510 *                        (no lock needed)
 511 * @transaction_stack:    stack of in-progress transactions for this thread
 512 *                        (protected by @proc->inner_lock)
 513 * @todo:                 list of work to do for this thread
 514 *                        (protected by @proc->inner_lock)
 515 * @process_todo:         whether work in @todo should be processed
 516 *                        (protected by @proc->inner_lock)
 517 * @return_error:         transaction errors reported by this thread
 518 *                        (only accessed by this thread)
 519 * @reply_error:          transaction errors reported by target thread
 520 *                        (protected by @proc->inner_lock)
 521 * @wait:                 wait queue for thread work
 522 * @stats:                per-thread statistics
 523 *                        (atomics, no lock needed)
 524 * @tmp_ref:              temporary reference to indicate thread is in use
 525 *                        (atomic since @proc->inner_lock cannot
 526 *                        always be acquired)
 527 * @is_dead:              thread is dead and awaiting free
 528 *                        when outstanding transactions are cleaned up
 529 *                        (protected by @proc->inner_lock)
 530 *
 531 * Bookkeeping structure for binder threads.
 532 */
 533struct binder_thread {
 534	struct binder_proc *proc;
 535	struct rb_node rb_node;
 536	struct list_head waiting_thread_node;
 537	int pid;
 538	int looper;              /* only modified by this thread */
 539	bool looper_need_return; /* can be written by other thread */
 540	struct binder_transaction *transaction_stack;
 541	struct list_head todo;
 542	bool process_todo;
 543	struct binder_error return_error;
 544	struct binder_error reply_error;
 545	wait_queue_head_t wait;
 546	struct binder_stats stats;
 547	atomic_t tmp_ref;
 548	bool is_dead;
 549};
 550
 551/**
 552 * struct binder_txn_fd_fixup - transaction fd fixup list element
 553 * @fixup_entry:          list entry
 554 * @file:                 struct file to be associated with new fd
 555 * @offset:               offset in buffer data to this fixup
 556 *
 557 * List element for fd fixups in a transaction. Since file
 558 * descriptors need to be allocated in the context of the
 559 * target process, we pass each fd to be processed in this
 560 * struct.
 561 */
 562struct binder_txn_fd_fixup {
 563	struct list_head fixup_entry;
 564	struct file *file;
 565	size_t offset;
 566};
 567
 568struct binder_transaction {
 569	int debug_id;
 570	struct binder_work work;
 571	struct binder_thread *from;
 572	struct binder_transaction *from_parent;
 573	struct binder_proc *to_proc;
 574	struct binder_thread *to_thread;
 575	struct binder_transaction *to_parent;
 576	unsigned need_reply:1;
 577	/* unsigned is_dead:1; */	/* not used at the moment */
 578
 579	struct binder_buffer *buffer;
 580	unsigned int	code;
 581	unsigned int	flags;
 582	long	priority;
 583	long	saved_priority;
 584	kuid_t	sender_euid;
 585	struct list_head fd_fixups;
 586	binder_uintptr_t security_ctx;
 587	/**
 588	 * @lock:  protects @from, @to_proc, and @to_thread
 589	 *
 590	 * @from, @to_proc, and @to_thread can be set to NULL
 591	 * during thread teardown
 592	 */
 593	spinlock_t lock;
 594};
 595
 596/**
 597 * struct binder_object - union of flat binder object types
 598 * @hdr:   generic object header
 599 * @fbo:   binder object (nodes and refs)
 600 * @fdo:   file descriptor object
 601 * @bbo:   binder buffer pointer
 602 * @fdao:  file descriptor array
 603 *
 604 * Used for type-independent object copies
 605 */
 606struct binder_object {
 607	union {
 608		struct binder_object_header hdr;
 609		struct flat_binder_object fbo;
 610		struct binder_fd_object fdo;
 611		struct binder_buffer_object bbo;
 612		struct binder_fd_array_object fdao;
 613	};
 614};
 615
 616/**
 617 * binder_proc_lock() - Acquire outer lock for given binder_proc
 618 * @proc:         struct binder_proc to acquire
 619 *
 620 * Acquires proc->outer_lock. Used to protect binder_ref
 621 * structures associated with the given proc.
 622 */
 623#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
 624static void
 625_binder_proc_lock(struct binder_proc *proc, int line)
 626	__acquires(&proc->outer_lock)
 627{
 628	binder_debug(BINDER_DEBUG_SPINLOCKS,
 629		     "%s: line=%d\n", __func__, line);
 630	spin_lock(&proc->outer_lock);
 631}
 632
 633/**
 634 * binder_proc_unlock() - Release spinlock for given binder_proc
 635 * @proc:         struct binder_proc to acquire
 636 *
 637 * Release lock acquired via binder_proc_lock()
 638 */
 639#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
 640static void
 641_binder_proc_unlock(struct binder_proc *proc, int line)
 642	__releases(&proc->outer_lock)
 643{
 644	binder_debug(BINDER_DEBUG_SPINLOCKS,
 645		     "%s: line=%d\n", __func__, line);
 646	spin_unlock(&proc->outer_lock);
 647}
 648
 649/**
 650 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
 651 * @proc:         struct binder_proc to acquire
 652 *
 653 * Acquires proc->inner_lock. Used to protect todo lists
 654 */
 655#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
 656static void
 657_binder_inner_proc_lock(struct binder_proc *proc, int line)
 658	__acquires(&proc->inner_lock)
 659{
 660	binder_debug(BINDER_DEBUG_SPINLOCKS,
 661		     "%s: line=%d\n", __func__, line);
 662	spin_lock(&proc->inner_lock);
 663}
 664
 665/**
 666 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
 667 * @proc:         struct binder_proc to acquire
 668 *
 669 * Release lock acquired via binder_inner_proc_lock()
 670 */
 671#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
 672static void
 673_binder_inner_proc_unlock(struct binder_proc *proc, int line)
 674	__releases(&proc->inner_lock)
 675{
 676	binder_debug(BINDER_DEBUG_SPINLOCKS,
 677		     "%s: line=%d\n", __func__, line);
 678	spin_unlock(&proc->inner_lock);
 679}
 680
 681/**
 682 * binder_node_lock() - Acquire spinlock for given binder_node
 683 * @node:         struct binder_node to acquire
 684 *
 685 * Acquires node->lock. Used to protect binder_node fields
 686 */
 687#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
 688static void
 689_binder_node_lock(struct binder_node *node, int line)
 690	__acquires(&node->lock)
 691{
 692	binder_debug(BINDER_DEBUG_SPINLOCKS,
 693		     "%s: line=%d\n", __func__, line);
 694	spin_lock(&node->lock);
 695}
 696
 697/**
 698 * binder_node_unlock() - Release spinlock for given binder_proc
 699 * @node:         struct binder_node to acquire
 700 *
 701 * Release lock acquired via binder_node_lock()
 702 */
 703#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
 704static void
 705_binder_node_unlock(struct binder_node *node, int line)
 706	__releases(&node->lock)
 707{
 708	binder_debug(BINDER_DEBUG_SPINLOCKS,
 709		     "%s: line=%d\n", __func__, line);
 710	spin_unlock(&node->lock);
 711}
 712
 713/**
 714 * binder_node_inner_lock() - Acquire node and inner locks
 715 * @node:         struct binder_node to acquire
 716 *
 717 * Acquires node->lock. If node->proc also acquires
 718 * proc->inner_lock. Used to protect binder_node fields
 719 */
 720#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
 721static void
 722_binder_node_inner_lock(struct binder_node *node, int line)
 723	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
 724{
 725	binder_debug(BINDER_DEBUG_SPINLOCKS,
 726		     "%s: line=%d\n", __func__, line);
 727	spin_lock(&node->lock);
 728	if (node->proc)
 729		binder_inner_proc_lock(node->proc);
 730	else
 731		/* annotation for sparse */
 732		__acquire(&node->proc->inner_lock);
 733}
 734
 735/**
 736 * binder_node_unlock() - Release node and inner locks
 737 * @node:         struct binder_node to acquire
 738 *
 739 * Release lock acquired via binder_node_lock()
 740 */
 741#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
 742static void
 743_binder_node_inner_unlock(struct binder_node *node, int line)
 744	__releases(&node->lock) __releases(&node->proc->inner_lock)
 745{
 746	struct binder_proc *proc = node->proc;
 747
 748	binder_debug(BINDER_DEBUG_SPINLOCKS,
 749		     "%s: line=%d\n", __func__, line);
 750	if (proc)
 751		binder_inner_proc_unlock(proc);
 752	else
 753		/* annotation for sparse */
 754		__release(&node->proc->inner_lock);
 755	spin_unlock(&node->lock);
 756}
 757
 758static bool binder_worklist_empty_ilocked(struct list_head *list)
 759{
 760	return list_empty(list);
 761}
 762
 763/**
 764 * binder_worklist_empty() - Check if no items on the work list
 765 * @proc:       binder_proc associated with list
 766 * @list:	list to check
 767 *
 768 * Return: true if there are no items on list, else false
 769 */
 770static bool binder_worklist_empty(struct binder_proc *proc,
 771				  struct list_head *list)
 772{
 773	bool ret;
 774
 775	binder_inner_proc_lock(proc);
 776	ret = binder_worklist_empty_ilocked(list);
 777	binder_inner_proc_unlock(proc);
 778	return ret;
 779}
 780
 781/**
 782 * binder_enqueue_work_ilocked() - Add an item to the work list
 783 * @work:         struct binder_work to add to list
 784 * @target_list:  list to add work to
 785 *
 786 * Adds the work to the specified list. Asserts that work
 787 * is not already on a list.
 788 *
 789 * Requires the proc->inner_lock to be held.
 790 */
 791static void
 792binder_enqueue_work_ilocked(struct binder_work *work,
 793			   struct list_head *target_list)
 794{
 795	BUG_ON(target_list == NULL);
 796	BUG_ON(work->entry.next && !list_empty(&work->entry));
 797	list_add_tail(&work->entry, target_list);
 798}
 799
 800/**
 801 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
 802 * @thread:       thread to queue work to
 803 * @work:         struct binder_work to add to list
 804 *
 805 * Adds the work to the todo list of the thread. Doesn't set the process_todo
 806 * flag, which means that (if it wasn't already set) the thread will go to
 807 * sleep without handling this work when it calls read.
 808 *
 809 * Requires the proc->inner_lock to be held.
 810 */
 811static void
 812binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
 813					    struct binder_work *work)
 814{
 815	WARN_ON(!list_empty(&thread->waiting_thread_node));
 816	binder_enqueue_work_ilocked(work, &thread->todo);
 817}
 818
 819/**
 820 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
 821 * @thread:       thread to queue work to
 822 * @work:         struct binder_work to add to list
 823 *
 824 * Adds the work to the todo list of the thread, and enables processing
 825 * of the todo queue.
 826 *
 827 * Requires the proc->inner_lock to be held.
 828 */
 829static void
 830binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
 831				   struct binder_work *work)
 832{
 833	WARN_ON(!list_empty(&thread->waiting_thread_node));
 834	binder_enqueue_work_ilocked(work, &thread->todo);
 835	thread->process_todo = true;
 836}
 837
 838/**
 839 * binder_enqueue_thread_work() - Add an item to the thread work list
 840 * @thread:       thread to queue work to
 841 * @work:         struct binder_work to add to list
 842 *
 843 * Adds the work to the todo list of the thread, and enables processing
 844 * of the todo queue.
 845 */
 846static void
 847binder_enqueue_thread_work(struct binder_thread *thread,
 848			   struct binder_work *work)
 849{
 850	binder_inner_proc_lock(thread->proc);
 851	binder_enqueue_thread_work_ilocked(thread, work);
 852	binder_inner_proc_unlock(thread->proc);
 853}
 854
 855static void
 856binder_dequeue_work_ilocked(struct binder_work *work)
 857{
 858	list_del_init(&work->entry);
 859}
 860
 861/**
 862 * binder_dequeue_work() - Removes an item from the work list
 863 * @proc:         binder_proc associated with list
 864 * @work:         struct binder_work to remove from list
 865 *
 866 * Removes the specified work item from whatever list it is on.
 867 * Can safely be called if work is not on any list.
 868 */
 869static void
 870binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
 871{
 872	binder_inner_proc_lock(proc);
 873	binder_dequeue_work_ilocked(work);
 874	binder_inner_proc_unlock(proc);
 875}
 876
 877static struct binder_work *binder_dequeue_work_head_ilocked(
 878					struct list_head *list)
 879{
 880	struct binder_work *w;
 881
 882	w = list_first_entry_or_null(list, struct binder_work, entry);
 883	if (w)
 884		list_del_init(&w->entry);
 885	return w;
 886}
 887
 888/**
 889 * binder_dequeue_work_head() - Dequeues the item at head of list
 890 * @proc:         binder_proc associated with list
 891 * @list:         list to dequeue head
 892 *
 893 * Removes the head of the list if there are items on the list
 894 *
 895 * Return: pointer dequeued binder_work, NULL if list was empty
 896 */
 897static struct binder_work *binder_dequeue_work_head(
 898					struct binder_proc *proc,
 899					struct list_head *list)
 900{
 901	struct binder_work *w;
 902
 903	binder_inner_proc_lock(proc);
 904	w = binder_dequeue_work_head_ilocked(list);
 905	binder_inner_proc_unlock(proc);
 906	return w;
 907}
 908
 909static void
 910binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
 911static void binder_free_thread(struct binder_thread *thread);
 912static void binder_free_proc(struct binder_proc *proc);
 913static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 914
 915static bool binder_has_work_ilocked(struct binder_thread *thread,
 916				    bool do_proc_work)
 917{
 918	return thread->process_todo ||
 919		thread->looper_need_return ||
 920		(do_proc_work &&
 921		 !binder_worklist_empty_ilocked(&thread->proc->todo));
 922}
 923
 924static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
 925{
 926	bool has_work;
 927
 928	binder_inner_proc_lock(thread->proc);
 929	has_work = binder_has_work_ilocked(thread, do_proc_work);
 930	binder_inner_proc_unlock(thread->proc);
 931
 932	return has_work;
 933}
 934
 935static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
 936{
 937	return !thread->transaction_stack &&
 938		binder_worklist_empty_ilocked(&thread->todo) &&
 939		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
 940				   BINDER_LOOPER_STATE_REGISTERED));
 941}
 942
 943static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
 944					       bool sync)
 945{
 946	struct rb_node *n;
 947	struct binder_thread *thread;
 948
 949	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
 950		thread = rb_entry(n, struct binder_thread, rb_node);
 951		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
 952		    binder_available_for_proc_work_ilocked(thread)) {
 953			if (sync)
 954				wake_up_interruptible_sync(&thread->wait);
 955			else
 956				wake_up_interruptible(&thread->wait);
 957		}
 958	}
 959}
 960
 961/**
 962 * binder_select_thread_ilocked() - selects a thread for doing proc work.
 963 * @proc:	process to select a thread from
 964 *
 965 * Note that calling this function moves the thread off the waiting_threads
 966 * list, so it can only be woken up by the caller of this function, or a
 967 * signal. Therefore, callers *should* always wake up the thread this function
 968 * returns.
 969 *
 970 * Return:	If there's a thread currently waiting for process work,
 971 *		returns that thread. Otherwise returns NULL.
 972 */
 973static struct binder_thread *
 974binder_select_thread_ilocked(struct binder_proc *proc)
 975{
 976	struct binder_thread *thread;
 977
 978	assert_spin_locked(&proc->inner_lock);
 979	thread = list_first_entry_or_null(&proc->waiting_threads,
 980					  struct binder_thread,
 981					  waiting_thread_node);
 982
 983	if (thread)
 984		list_del_init(&thread->waiting_thread_node);
 985
 986	return thread;
 987}
 988
 989/**
 990 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
 991 * @proc:	process to wake up a thread in
 992 * @thread:	specific thread to wake-up (may be NULL)
 993 * @sync:	whether to do a synchronous wake-up
 994 *
 995 * This function wakes up a thread in the @proc process.
 996 * The caller may provide a specific thread to wake-up in
 997 * the @thread parameter. If @thread is NULL, this function
 998 * will wake up threads that have called poll().
 999 *
1000 * Note that for this function to work as expected, callers
1001 * should first call binder_select_thread() to find a thread
1002 * to handle the work (if they don't have a thread already),
1003 * and pass the result into the @thread parameter.
1004 */
1005static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1006					 struct binder_thread *thread,
1007					 bool sync)
1008{
1009	assert_spin_locked(&proc->inner_lock);
1010
1011	if (thread) {
1012		if (sync)
1013			wake_up_interruptible_sync(&thread->wait);
1014		else
1015			wake_up_interruptible(&thread->wait);
1016		return;
1017	}
1018
1019	/* Didn't find a thread waiting for proc work; this can happen
1020	 * in two scenarios:
1021	 * 1. All threads are busy handling transactions
1022	 *    In that case, one of those threads should call back into
1023	 *    the kernel driver soon and pick up this work.
1024	 * 2. Threads are using the (e)poll interface, in which case
1025	 *    they may be blocked on the waitqueue without having been
1026	 *    added to waiting_threads. For this case, we just iterate
1027	 *    over all threads not handling transaction work, and
1028	 *    wake them all up. We wake all because we don't know whether
1029	 *    a thread that called into (e)poll is handling non-binder
1030	 *    work currently.
1031	 */
1032	binder_wakeup_poll_threads_ilocked(proc, sync);
1033}
1034
1035static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1036{
1037	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1038
1039	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1040}
1041
1042static void binder_set_nice(long nice)
1043{
1044	long min_nice;
1045
1046	if (can_nice(current, nice)) {
1047		set_user_nice(current, nice);
1048		return;
1049	}
1050	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1051	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1052		     "%d: nice value %ld not allowed use %ld instead\n",
1053		      current->pid, nice, min_nice);
1054	set_user_nice(current, min_nice);
1055	if (min_nice <= MAX_NICE)
1056		return;
1057	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1058}
1059
1060static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1061						   binder_uintptr_t ptr)
1062{
1063	struct rb_node *n = proc->nodes.rb_node;
1064	struct binder_node *node;
1065
1066	assert_spin_locked(&proc->inner_lock);
1067
1068	while (n) {
1069		node = rb_entry(n, struct binder_node, rb_node);
1070
1071		if (ptr < node->ptr)
1072			n = n->rb_left;
1073		else if (ptr > node->ptr)
1074			n = n->rb_right;
1075		else {
1076			/*
1077			 * take an implicit weak reference
1078			 * to ensure node stays alive until
1079			 * call to binder_put_node()
1080			 */
1081			binder_inc_node_tmpref_ilocked(node);
1082			return node;
1083		}
1084	}
1085	return NULL;
1086}
1087
1088static struct binder_node *binder_get_node(struct binder_proc *proc,
1089					   binder_uintptr_t ptr)
1090{
1091	struct binder_node *node;
1092
1093	binder_inner_proc_lock(proc);
1094	node = binder_get_node_ilocked(proc, ptr);
1095	binder_inner_proc_unlock(proc);
1096	return node;
1097}
1098
1099static struct binder_node *binder_init_node_ilocked(
1100						struct binder_proc *proc,
1101						struct binder_node *new_node,
1102						struct flat_binder_object *fp)
1103{
1104	struct rb_node **p = &proc->nodes.rb_node;
1105	struct rb_node *parent = NULL;
1106	struct binder_node *node;
1107	binder_uintptr_t ptr = fp ? fp->binder : 0;
1108	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1109	__u32 flags = fp ? fp->flags : 0;
1110
1111	assert_spin_locked(&proc->inner_lock);
1112
1113	while (*p) {
1114
1115		parent = *p;
1116		node = rb_entry(parent, struct binder_node, rb_node);
1117
1118		if (ptr < node->ptr)
1119			p = &(*p)->rb_left;
1120		else if (ptr > node->ptr)
1121			p = &(*p)->rb_right;
1122		else {
1123			/*
1124			 * A matching node is already in
1125			 * the rb tree. Abandon the init
1126			 * and return it.
1127			 */
1128			binder_inc_node_tmpref_ilocked(node);
1129			return node;
1130		}
1131	}
1132	node = new_node;
1133	binder_stats_created(BINDER_STAT_NODE);
1134	node->tmp_refs++;
1135	rb_link_node(&node->rb_node, parent, p);
1136	rb_insert_color(&node->rb_node, &proc->nodes);
1137	node->debug_id = atomic_inc_return(&binder_last_id);
1138	node->proc = proc;
1139	node->ptr = ptr;
1140	node->cookie = cookie;
1141	node->work.type = BINDER_WORK_NODE;
1142	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1143	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1144	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1145	spin_lock_init(&node->lock);
1146	INIT_LIST_HEAD(&node->work.entry);
1147	INIT_LIST_HEAD(&node->async_todo);
1148	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1149		     "%d:%d node %d u%016llx c%016llx created\n",
1150		     proc->pid, current->pid, node->debug_id,
1151		     (u64)node->ptr, (u64)node->cookie);
1152
1153	return node;
1154}
1155
1156static struct binder_node *binder_new_node(struct binder_proc *proc,
1157					   struct flat_binder_object *fp)
1158{
1159	struct binder_node *node;
1160	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1161
1162	if (!new_node)
1163		return NULL;
1164	binder_inner_proc_lock(proc);
1165	node = binder_init_node_ilocked(proc, new_node, fp);
1166	binder_inner_proc_unlock(proc);
1167	if (node != new_node)
1168		/*
1169		 * The node was already added by another thread
1170		 */
1171		kfree(new_node);
1172
1173	return node;
1174}
1175
1176static void binder_free_node(struct binder_node *node)
1177{
1178	kfree(node);
1179	binder_stats_deleted(BINDER_STAT_NODE);
1180}
1181
1182static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1183				    int internal,
1184				    struct list_head *target_list)
1185{
1186	struct binder_proc *proc = node->proc;
1187
1188	assert_spin_locked(&node->lock);
1189	if (proc)
1190		assert_spin_locked(&proc->inner_lock);
1191	if (strong) {
1192		if (internal) {
1193			if (target_list == NULL &&
1194			    node->internal_strong_refs == 0 &&
1195			    !(node->proc &&
1196			      node == node->proc->context->binder_context_mgr_node &&
1197			      node->has_strong_ref)) {
1198				pr_err("invalid inc strong node for %d\n",
1199					node->debug_id);
1200				return -EINVAL;
1201			}
1202			node->internal_strong_refs++;
1203		} else
1204			node->local_strong_refs++;
1205		if (!node->has_strong_ref && target_list) {
1206			struct binder_thread *thread = container_of(target_list,
1207						    struct binder_thread, todo);
1208			binder_dequeue_work_ilocked(&node->work);
1209			BUG_ON(&thread->todo != target_list);
1210			binder_enqueue_deferred_thread_work_ilocked(thread,
1211								   &node->work);
1212		}
1213	} else {
1214		if (!internal)
1215			node->local_weak_refs++;
1216		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1217			if (target_list == NULL) {
1218				pr_err("invalid inc weak node for %d\n",
1219					node->debug_id);
1220				return -EINVAL;
1221			}
1222			/*
1223			 * See comment above
1224			 */
1225			binder_enqueue_work_ilocked(&node->work, target_list);
1226		}
1227	}
1228	return 0;
1229}
1230
1231static int binder_inc_node(struct binder_node *node, int strong, int internal,
1232			   struct list_head *target_list)
1233{
1234	int ret;
1235
1236	binder_node_inner_lock(node);
1237	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1238	binder_node_inner_unlock(node);
1239
1240	return ret;
1241}
1242
1243static bool binder_dec_node_nilocked(struct binder_node *node,
1244				     int strong, int internal)
1245{
1246	struct binder_proc *proc = node->proc;
1247
1248	assert_spin_locked(&node->lock);
1249	if (proc)
1250		assert_spin_locked(&proc->inner_lock);
1251	if (strong) {
1252		if (internal)
1253			node->internal_strong_refs--;
1254		else
1255			node->local_strong_refs--;
1256		if (node->local_strong_refs || node->internal_strong_refs)
1257			return false;
1258	} else {
1259		if (!internal)
1260			node->local_weak_refs--;
1261		if (node->local_weak_refs || node->tmp_refs ||
1262				!hlist_empty(&node->refs))
1263			return false;
1264	}
1265
1266	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1267		if (list_empty(&node->work.entry)) {
1268			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1269			binder_wakeup_proc_ilocked(proc);
1270		}
1271	} else {
1272		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1273		    !node->local_weak_refs && !node->tmp_refs) {
1274			if (proc) {
1275				binder_dequeue_work_ilocked(&node->work);
1276				rb_erase(&node->rb_node, &proc->nodes);
1277				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1278					     "refless node %d deleted\n",
1279					     node->debug_id);
1280			} else {
1281				BUG_ON(!list_empty(&node->work.entry));
1282				spin_lock(&binder_dead_nodes_lock);
1283				/*
1284				 * tmp_refs could have changed so
1285				 * check it again
1286				 */
1287				if (node->tmp_refs) {
1288					spin_unlock(&binder_dead_nodes_lock);
1289					return false;
1290				}
1291				hlist_del(&node->dead_node);
1292				spin_unlock(&binder_dead_nodes_lock);
1293				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1294					     "dead node %d deleted\n",
1295					     node->debug_id);
1296			}
1297			return true;
1298		}
1299	}
1300	return false;
1301}
1302
1303static void binder_dec_node(struct binder_node *node, int strong, int internal)
1304{
1305	bool free_node;
1306
1307	binder_node_inner_lock(node);
1308	free_node = binder_dec_node_nilocked(node, strong, internal);
1309	binder_node_inner_unlock(node);
1310	if (free_node)
1311		binder_free_node(node);
1312}
1313
1314static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1315{
1316	/*
1317	 * No call to binder_inc_node() is needed since we
1318	 * don't need to inform userspace of any changes to
1319	 * tmp_refs
1320	 */
1321	node->tmp_refs++;
1322}
1323
1324/**
1325 * binder_inc_node_tmpref() - take a temporary reference on node
1326 * @node:	node to reference
1327 *
1328 * Take reference on node to prevent the node from being freed
1329 * while referenced only by a local variable. The inner lock is
1330 * needed to serialize with the node work on the queue (which
1331 * isn't needed after the node is dead). If the node is dead
1332 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1333 * node->tmp_refs against dead-node-only cases where the node
1334 * lock cannot be acquired (eg traversing the dead node list to
1335 * print nodes)
1336 */
1337static void binder_inc_node_tmpref(struct binder_node *node)
1338{
1339	binder_node_lock(node);
1340	if (node->proc)
1341		binder_inner_proc_lock(node->proc);
1342	else
1343		spin_lock(&binder_dead_nodes_lock);
1344	binder_inc_node_tmpref_ilocked(node);
1345	if (node->proc)
1346		binder_inner_proc_unlock(node->proc);
1347	else
1348		spin_unlock(&binder_dead_nodes_lock);
1349	binder_node_unlock(node);
1350}
1351
1352/**
1353 * binder_dec_node_tmpref() - remove a temporary reference on node
1354 * @node:	node to reference
1355 *
1356 * Release temporary reference on node taken via binder_inc_node_tmpref()
1357 */
1358static void binder_dec_node_tmpref(struct binder_node *node)
1359{
1360	bool free_node;
1361
1362	binder_node_inner_lock(node);
1363	if (!node->proc)
1364		spin_lock(&binder_dead_nodes_lock);
1365	else
1366		__acquire(&binder_dead_nodes_lock);
1367	node->tmp_refs--;
1368	BUG_ON(node->tmp_refs < 0);
1369	if (!node->proc)
1370		spin_unlock(&binder_dead_nodes_lock);
1371	else
1372		__release(&binder_dead_nodes_lock);
1373	/*
1374	 * Call binder_dec_node() to check if all refcounts are 0
1375	 * and cleanup is needed. Calling with strong=0 and internal=1
1376	 * causes no actual reference to be released in binder_dec_node().
1377	 * If that changes, a change is needed here too.
1378	 */
1379	free_node = binder_dec_node_nilocked(node, 0, 1);
1380	binder_node_inner_unlock(node);
1381	if (free_node)
1382		binder_free_node(node);
1383}
1384
1385static void binder_put_node(struct binder_node *node)
1386{
1387	binder_dec_node_tmpref(node);
1388}
1389
1390static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1391						 u32 desc, bool need_strong_ref)
1392{
1393	struct rb_node *n = proc->refs_by_desc.rb_node;
1394	struct binder_ref *ref;
1395
1396	while (n) {
1397		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1398
1399		if (desc < ref->data.desc) {
1400			n = n->rb_left;
1401		} else if (desc > ref->data.desc) {
1402			n = n->rb_right;
1403		} else if (need_strong_ref && !ref->data.strong) {
1404			binder_user_error("tried to use weak ref as strong ref\n");
1405			return NULL;
1406		} else {
1407			return ref;
1408		}
1409	}
1410	return NULL;
1411}
1412
1413/**
1414 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1415 * @proc:	binder_proc that owns the ref
1416 * @node:	binder_node of target
1417 * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1418 *
1419 * Look up the ref for the given node and return it if it exists
1420 *
1421 * If it doesn't exist and the caller provides a newly allocated
1422 * ref, initialize the fields of the newly allocated ref and insert
1423 * into the given proc rb_trees and node refs list.
1424 *
1425 * Return:	the ref for node. It is possible that another thread
1426 *		allocated/initialized the ref first in which case the
1427 *		returned ref would be different than the passed-in
1428 *		new_ref. new_ref must be kfree'd by the caller in
1429 *		this case.
1430 */
1431static struct binder_ref *binder_get_ref_for_node_olocked(
1432					struct binder_proc *proc,
1433					struct binder_node *node,
1434					struct binder_ref *new_ref)
1435{
1436	struct binder_context *context = proc->context;
1437	struct rb_node **p = &proc->refs_by_node.rb_node;
1438	struct rb_node *parent = NULL;
1439	struct binder_ref *ref;
1440	struct rb_node *n;
1441
1442	while (*p) {
1443		parent = *p;
1444		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1445
1446		if (node < ref->node)
1447			p = &(*p)->rb_left;
1448		else if (node > ref->node)
1449			p = &(*p)->rb_right;
1450		else
1451			return ref;
1452	}
1453	if (!new_ref)
1454		return NULL;
1455
1456	binder_stats_created(BINDER_STAT_REF);
1457	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1458	new_ref->proc = proc;
1459	new_ref->node = node;
1460	rb_link_node(&new_ref->rb_node_node, parent, p);
1461	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1462
1463	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1464	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1465		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1466		if (ref->data.desc > new_ref->data.desc)
1467			break;
1468		new_ref->data.desc = ref->data.desc + 1;
1469	}
1470
1471	p = &proc->refs_by_desc.rb_node;
1472	while (*p) {
1473		parent = *p;
1474		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1475
1476		if (new_ref->data.desc < ref->data.desc)
1477			p = &(*p)->rb_left;
1478		else if (new_ref->data.desc > ref->data.desc)
1479			p = &(*p)->rb_right;
1480		else
1481			BUG();
1482	}
1483	rb_link_node(&new_ref->rb_node_desc, parent, p);
1484	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1485
1486	binder_node_lock(node);
1487	hlist_add_head(&new_ref->node_entry, &node->refs);
1488
1489	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1490		     "%d new ref %d desc %d for node %d\n",
1491		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1492		      node->debug_id);
1493	binder_node_unlock(node);
1494	return new_ref;
1495}
1496
1497static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1498{
1499	bool delete_node = false;
1500
1501	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1502		     "%d delete ref %d desc %d for node %d\n",
1503		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1504		      ref->node->debug_id);
1505
1506	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1507	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1508
1509	binder_node_inner_lock(ref->node);
1510	if (ref->data.strong)
1511		binder_dec_node_nilocked(ref->node, 1, 1);
1512
1513	hlist_del(&ref->node_entry);
1514	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1515	binder_node_inner_unlock(ref->node);
1516	/*
1517	 * Clear ref->node unless we want the caller to free the node
1518	 */
1519	if (!delete_node) {
1520		/*
1521		 * The caller uses ref->node to determine
1522		 * whether the node needs to be freed. Clear
1523		 * it since the node is still alive.
1524		 */
1525		ref->node = NULL;
1526	}
1527
1528	if (ref->death) {
1529		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1530			     "%d delete ref %d desc %d has death notification\n",
1531			      ref->proc->pid, ref->data.debug_id,
1532			      ref->data.desc);
1533		binder_dequeue_work(ref->proc, &ref->death->work);
1534		binder_stats_deleted(BINDER_STAT_DEATH);
1535	}
1536	binder_stats_deleted(BINDER_STAT_REF);
1537}
1538
1539/**
1540 * binder_inc_ref_olocked() - increment the ref for given handle
1541 * @ref:         ref to be incremented
1542 * @strong:      if true, strong increment, else weak
1543 * @target_list: list to queue node work on
1544 *
1545 * Increment the ref. @ref->proc->outer_lock must be held on entry
1546 *
1547 * Return: 0, if successful, else errno
1548 */
1549static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1550				  struct list_head *target_list)
1551{
1552	int ret;
1553
1554	if (strong) {
1555		if (ref->data.strong == 0) {
1556			ret = binder_inc_node(ref->node, 1, 1, target_list);
1557			if (ret)
1558				return ret;
1559		}
1560		ref->data.strong++;
1561	} else {
1562		if (ref->data.weak == 0) {
1563			ret = binder_inc_node(ref->node, 0, 1, target_list);
1564			if (ret)
1565				return ret;
1566		}
1567		ref->data.weak++;
1568	}
1569	return 0;
1570}
1571
1572/**
1573 * binder_dec_ref() - dec the ref for given handle
1574 * @ref:	ref to be decremented
1575 * @strong:	if true, strong decrement, else weak
1576 *
1577 * Decrement the ref.
1578 *
1579 * Return: true if ref is cleaned up and ready to be freed
1580 */
1581static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1582{
1583	if (strong) {
1584		if (ref->data.strong == 0) {
1585			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1586					  ref->proc->pid, ref->data.debug_id,
1587					  ref->data.desc, ref->data.strong,
1588					  ref->data.weak);
1589			return false;
1590		}
1591		ref->data.strong--;
1592		if (ref->data.strong == 0)
1593			binder_dec_node(ref->node, strong, 1);
1594	} else {
1595		if (ref->data.weak == 0) {
1596			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1597					  ref->proc->pid, ref->data.debug_id,
1598					  ref->data.desc, ref->data.strong,
1599					  ref->data.weak);
1600			return false;
1601		}
1602		ref->data.weak--;
1603	}
1604	if (ref->data.strong == 0 && ref->data.weak == 0) {
1605		binder_cleanup_ref_olocked(ref);
1606		return true;
1607	}
1608	return false;
1609}
1610
1611/**
1612 * binder_get_node_from_ref() - get the node from the given proc/desc
1613 * @proc:	proc containing the ref
1614 * @desc:	the handle associated with the ref
1615 * @need_strong_ref: if true, only return node if ref is strong
1616 * @rdata:	the id/refcount data for the ref
1617 *
1618 * Given a proc and ref handle, return the associated binder_node
1619 *
1620 * Return: a binder_node or NULL if not found or not strong when strong required
1621 */
1622static struct binder_node *binder_get_node_from_ref(
1623		struct binder_proc *proc,
1624		u32 desc, bool need_strong_ref,
1625		struct binder_ref_data *rdata)
1626{
1627	struct binder_node *node;
1628	struct binder_ref *ref;
1629
1630	binder_proc_lock(proc);
1631	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1632	if (!ref)
1633		goto err_no_ref;
1634	node = ref->node;
1635	/*
1636	 * Take an implicit reference on the node to ensure
1637	 * it stays alive until the call to binder_put_node()
1638	 */
1639	binder_inc_node_tmpref(node);
1640	if (rdata)
1641		*rdata = ref->data;
1642	binder_proc_unlock(proc);
1643
1644	return node;
1645
1646err_no_ref:
1647	binder_proc_unlock(proc);
1648	return NULL;
1649}
1650
1651/**
1652 * binder_free_ref() - free the binder_ref
1653 * @ref:	ref to free
1654 *
1655 * Free the binder_ref. Free the binder_node indicated by ref->node
1656 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1657 */
1658static void binder_free_ref(struct binder_ref *ref)
1659{
1660	if (ref->node)
1661		binder_free_node(ref->node);
1662	kfree(ref->death);
1663	kfree(ref);
1664}
1665
1666/**
1667 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1668 * @proc:	proc containing the ref
1669 * @desc:	the handle associated with the ref
1670 * @increment:	true=inc reference, false=dec reference
1671 * @strong:	true=strong reference, false=weak reference
1672 * @rdata:	the id/refcount data for the ref
1673 *
1674 * Given a proc and ref handle, increment or decrement the ref
1675 * according to "increment" arg.
1676 *
1677 * Return: 0 if successful, else errno
1678 */
1679static int binder_update_ref_for_handle(struct binder_proc *proc,
1680		uint32_t desc, bool increment, bool strong,
1681		struct binder_ref_data *rdata)
1682{
1683	int ret = 0;
1684	struct binder_ref *ref;
1685	bool delete_ref = false;
1686
1687	binder_proc_lock(proc);
1688	ref = binder_get_ref_olocked(proc, desc, strong);
1689	if (!ref) {
1690		ret = -EINVAL;
1691		goto err_no_ref;
1692	}
1693	if (increment)
1694		ret = binder_inc_ref_olocked(ref, strong, NULL);
1695	else
1696		delete_ref = binder_dec_ref_olocked(ref, strong);
1697
1698	if (rdata)
1699		*rdata = ref->data;
1700	binder_proc_unlock(proc);
1701
1702	if (delete_ref)
1703		binder_free_ref(ref);
1704	return ret;
1705
1706err_no_ref:
1707	binder_proc_unlock(proc);
1708	return ret;
1709}
1710
1711/**
1712 * binder_dec_ref_for_handle() - dec the ref for given handle
1713 * @proc:	proc containing the ref
1714 * @desc:	the handle associated with the ref
1715 * @strong:	true=strong reference, false=weak reference
1716 * @rdata:	the id/refcount data for the ref
1717 *
1718 * Just calls binder_update_ref_for_handle() to decrement the ref.
1719 *
1720 * Return: 0 if successful, else errno
1721 */
1722static int binder_dec_ref_for_handle(struct binder_proc *proc,
1723		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1724{
1725	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1726}
1727
1728
1729/**
1730 * binder_inc_ref_for_node() - increment the ref for given proc/node
1731 * @proc:	 proc containing the ref
1732 * @node:	 target node
1733 * @strong:	 true=strong reference, false=weak reference
1734 * @target_list: worklist to use if node is incremented
1735 * @rdata:	 the id/refcount data for the ref
1736 *
1737 * Given a proc and node, increment the ref. Create the ref if it
1738 * doesn't already exist
1739 *
1740 * Return: 0 if successful, else errno
1741 */
1742static int binder_inc_ref_for_node(struct binder_proc *proc,
1743			struct binder_node *node,
1744			bool strong,
1745			struct list_head *target_list,
1746			struct binder_ref_data *rdata)
1747{
1748	struct binder_ref *ref;
1749	struct binder_ref *new_ref = NULL;
1750	int ret = 0;
1751
1752	binder_proc_lock(proc);
1753	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1754	if (!ref) {
1755		binder_proc_unlock(proc);
1756		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1757		if (!new_ref)
1758			return -ENOMEM;
1759		binder_proc_lock(proc);
1760		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1761	}
1762	ret = binder_inc_ref_olocked(ref, strong, target_list);
1763	*rdata = ref->data;
1764	binder_proc_unlock(proc);
1765	if (new_ref && ref != new_ref)
1766		/*
1767		 * Another thread created the ref first so
1768		 * free the one we allocated
1769		 */
1770		kfree(new_ref);
1771	return ret;
1772}
1773
1774static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1775					   struct binder_transaction *t)
1776{
1777	BUG_ON(!target_thread);
1778	assert_spin_locked(&target_thread->proc->inner_lock);
1779	BUG_ON(target_thread->transaction_stack != t);
1780	BUG_ON(target_thread->transaction_stack->from != target_thread);
1781	target_thread->transaction_stack =
1782		target_thread->transaction_stack->from_parent;
1783	t->from = NULL;
1784}
1785
1786/**
1787 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1788 * @thread:	thread to decrement
1789 *
1790 * A thread needs to be kept alive while being used to create or
1791 * handle a transaction. binder_get_txn_from() is used to safely
1792 * extract t->from from a binder_transaction and keep the thread
1793 * indicated by t->from from being freed. When done with that
1794 * binder_thread, this function is called to decrement the
1795 * tmp_ref and free if appropriate (thread has been released
1796 * and no transaction being processed by the driver)
1797 */
1798static void binder_thread_dec_tmpref(struct binder_thread *thread)
1799{
1800	/*
1801	 * atomic is used to protect the counter value while
1802	 * it cannot reach zero or thread->is_dead is false
1803	 */
1804	binder_inner_proc_lock(thread->proc);
1805	atomic_dec(&thread->tmp_ref);
1806	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1807		binder_inner_proc_unlock(thread->proc);
1808		binder_free_thread(thread);
1809		return;
1810	}
1811	binder_inner_proc_unlock(thread->proc);
1812}
1813
1814/**
1815 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1816 * @proc:	proc to decrement
1817 *
1818 * A binder_proc needs to be kept alive while being used to create or
1819 * handle a transaction. proc->tmp_ref is incremented when
1820 * creating a new transaction or the binder_proc is currently in-use
1821 * by threads that are being released. When done with the binder_proc,
1822 * this function is called to decrement the counter and free the
1823 * proc if appropriate (proc has been released, all threads have
1824 * been released and not currenly in-use to process a transaction).
1825 */
1826static void binder_proc_dec_tmpref(struct binder_proc *proc)
1827{
1828	binder_inner_proc_lock(proc);
1829	proc->tmp_ref--;
1830	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1831			!proc->tmp_ref) {
1832		binder_inner_proc_unlock(proc);
1833		binder_free_proc(proc);
1834		return;
1835	}
1836	binder_inner_proc_unlock(proc);
1837}
1838
1839/**
1840 * binder_get_txn_from() - safely extract the "from" thread in transaction
1841 * @t:	binder transaction for t->from
1842 *
1843 * Atomically return the "from" thread and increment the tmp_ref
1844 * count for the thread to ensure it stays alive until
1845 * binder_thread_dec_tmpref() is called.
1846 *
1847 * Return: the value of t->from
1848 */
1849static struct binder_thread *binder_get_txn_from(
1850		struct binder_transaction *t)
1851{
1852	struct binder_thread *from;
1853
1854	spin_lock(&t->lock);
1855	from = t->from;
1856	if (from)
1857		atomic_inc(&from->tmp_ref);
1858	spin_unlock(&t->lock);
1859	return from;
1860}
1861
1862/**
1863 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1864 * @t:	binder transaction for t->from
1865 *
1866 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1867 * to guarantee that the thread cannot be released while operating on it.
1868 * The caller must call binder_inner_proc_unlock() to release the inner lock
1869 * as well as call binder_dec_thread_txn() to release the reference.
1870 *
1871 * Return: the value of t->from
1872 */
1873static struct binder_thread *binder_get_txn_from_and_acq_inner(
1874		struct binder_transaction *t)
1875	__acquires(&t->from->proc->inner_lock)
1876{
1877	struct binder_thread *from;
1878
1879	from = binder_get_txn_from(t);
1880	if (!from) {
1881		__acquire(&from->proc->inner_lock);
1882		return NULL;
1883	}
1884	binder_inner_proc_lock(from->proc);
1885	if (t->from) {
1886		BUG_ON(from != t->from);
1887		return from;
1888	}
1889	binder_inner_proc_unlock(from->proc);
1890	__acquire(&from->proc->inner_lock);
1891	binder_thread_dec_tmpref(from);
1892	return NULL;
1893}
1894
1895/**
1896 * binder_free_txn_fixups() - free unprocessed fd fixups
1897 * @t:	binder transaction for t->from
1898 *
1899 * If the transaction is being torn down prior to being
1900 * processed by the target process, free all of the
1901 * fd fixups and fput the file structs. It is safe to
1902 * call this function after the fixups have been
1903 * processed -- in that case, the list will be empty.
1904 */
1905static void binder_free_txn_fixups(struct binder_transaction *t)
1906{
1907	struct binder_txn_fd_fixup *fixup, *tmp;
1908
1909	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1910		fput(fixup->file);
1911		list_del(&fixup->fixup_entry);
1912		kfree(fixup);
1913	}
1914}
1915
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1916static void binder_free_transaction(struct binder_transaction *t)
1917{
1918	struct binder_proc *target_proc = t->to_proc;
1919
1920	if (target_proc) {
1921		binder_inner_proc_lock(target_proc);
 
 
 
 
 
 
1922		if (t->buffer)
1923			t->buffer->transaction = NULL;
1924		binder_inner_proc_unlock(target_proc);
1925	}
 
 
1926	/*
1927	 * If the transaction has no target_proc, then
1928	 * t->buffer->transaction has already been cleared.
1929	 */
1930	binder_free_txn_fixups(t);
1931	kfree(t);
1932	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1933}
1934
1935static void binder_send_failed_reply(struct binder_transaction *t,
1936				     uint32_t error_code)
1937{
1938	struct binder_thread *target_thread;
1939	struct binder_transaction *next;
1940
1941	BUG_ON(t->flags & TF_ONE_WAY);
1942	while (1) {
1943		target_thread = binder_get_txn_from_and_acq_inner(t);
1944		if (target_thread) {
1945			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1946				     "send failed reply for transaction %d to %d:%d\n",
1947				      t->debug_id,
1948				      target_thread->proc->pid,
1949				      target_thread->pid);
1950
1951			binder_pop_transaction_ilocked(target_thread, t);
1952			if (target_thread->reply_error.cmd == BR_OK) {
1953				target_thread->reply_error.cmd = error_code;
1954				binder_enqueue_thread_work_ilocked(
1955					target_thread,
1956					&target_thread->reply_error.work);
1957				wake_up_interruptible(&target_thread->wait);
1958			} else {
1959				/*
1960				 * Cannot get here for normal operation, but
1961				 * we can if multiple synchronous transactions
1962				 * are sent without blocking for responses.
1963				 * Just ignore the 2nd error in this case.
1964				 */
1965				pr_warn("Unexpected reply error: %u\n",
1966					target_thread->reply_error.cmd);
1967			}
1968			binder_inner_proc_unlock(target_thread->proc);
1969			binder_thread_dec_tmpref(target_thread);
1970			binder_free_transaction(t);
1971			return;
1972		}
1973		__release(&target_thread->proc->inner_lock);
1974		next = t->from_parent;
1975
1976		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1977			     "send failed reply for transaction %d, target dead\n",
1978			     t->debug_id);
1979
1980		binder_free_transaction(t);
1981		if (next == NULL) {
1982			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1983				     "reply failed, no target thread at root\n");
1984			return;
1985		}
1986		t = next;
1987		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1988			     "reply failed, no target thread -- retry %d\n",
1989			      t->debug_id);
1990	}
1991}
1992
1993/**
1994 * binder_cleanup_transaction() - cleans up undelivered transaction
1995 * @t:		transaction that needs to be cleaned up
1996 * @reason:	reason the transaction wasn't delivered
1997 * @error_code:	error to return to caller (if synchronous call)
1998 */
1999static void binder_cleanup_transaction(struct binder_transaction *t,
2000				       const char *reason,
2001				       uint32_t error_code)
2002{
2003	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2004		binder_send_failed_reply(t, error_code);
2005	} else {
2006		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2007			"undelivered transaction %d, %s\n",
2008			t->debug_id, reason);
2009		binder_free_transaction(t);
2010	}
2011}
2012
2013/**
2014 * binder_get_object() - gets object and checks for valid metadata
2015 * @proc:	binder_proc owning the buffer
2016 * @buffer:	binder_buffer that we're parsing.
2017 * @offset:	offset in the @buffer at which to validate an object.
2018 * @object:	struct binder_object to read into
2019 *
2020 * Return:	If there's a valid metadata object at @offset in @buffer, the
2021 *		size of that object. Otherwise, it returns zero. The object
2022 *		is read into the struct binder_object pointed to by @object.
2023 */
2024static size_t binder_get_object(struct binder_proc *proc,
2025				struct binder_buffer *buffer,
2026				unsigned long offset,
2027				struct binder_object *object)
2028{
2029	size_t read_size;
2030	struct binder_object_header *hdr;
2031	size_t object_size = 0;
2032
2033	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2034	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2035	    binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2036					  offset, read_size))
2037		return 0;
2038
2039	/* Ok, now see if we read a complete object. */
2040	hdr = &object->hdr;
2041	switch (hdr->type) {
2042	case BINDER_TYPE_BINDER:
2043	case BINDER_TYPE_WEAK_BINDER:
2044	case BINDER_TYPE_HANDLE:
2045	case BINDER_TYPE_WEAK_HANDLE:
2046		object_size = sizeof(struct flat_binder_object);
2047		break;
2048	case BINDER_TYPE_FD:
2049		object_size = sizeof(struct binder_fd_object);
2050		break;
2051	case BINDER_TYPE_PTR:
2052		object_size = sizeof(struct binder_buffer_object);
2053		break;
2054	case BINDER_TYPE_FDA:
2055		object_size = sizeof(struct binder_fd_array_object);
2056		break;
2057	default:
2058		return 0;
2059	}
2060	if (offset <= buffer->data_size - object_size &&
2061	    buffer->data_size >= object_size)
2062		return object_size;
2063	else
2064		return 0;
2065}
2066
2067/**
2068 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2069 * @proc:	binder_proc owning the buffer
2070 * @b:		binder_buffer containing the object
2071 * @object:	struct binder_object to read into
2072 * @index:	index in offset array at which the binder_buffer_object is
2073 *		located
2074 * @start_offset: points to the start of the offset array
2075 * @object_offsetp: offset of @object read from @b
2076 * @num_valid:	the number of valid offsets in the offset array
2077 *
2078 * Return:	If @index is within the valid range of the offset array
2079 *		described by @start and @num_valid, and if there's a valid
2080 *		binder_buffer_object at the offset found in index @index
2081 *		of the offset array, that object is returned. Otherwise,
2082 *		%NULL is returned.
2083 *		Note that the offset found in index @index itself is not
2084 *		verified; this function assumes that @num_valid elements
2085 *		from @start were previously verified to have valid offsets.
2086 *		If @object_offsetp is non-NULL, then the offset within
2087 *		@b is written to it.
2088 */
2089static struct binder_buffer_object *binder_validate_ptr(
2090						struct binder_proc *proc,
2091						struct binder_buffer *b,
2092						struct binder_object *object,
2093						binder_size_t index,
2094						binder_size_t start_offset,
2095						binder_size_t *object_offsetp,
2096						binder_size_t num_valid)
2097{
2098	size_t object_size;
2099	binder_size_t object_offset;
2100	unsigned long buffer_offset;
2101
2102	if (index >= num_valid)
2103		return NULL;
2104
2105	buffer_offset = start_offset + sizeof(binder_size_t) * index;
2106	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2107					  b, buffer_offset,
2108					  sizeof(object_offset)))
2109		return NULL;
2110	object_size = binder_get_object(proc, b, object_offset, object);
2111	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2112		return NULL;
2113	if (object_offsetp)
2114		*object_offsetp = object_offset;
2115
2116	return &object->bbo;
2117}
2118
2119/**
2120 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2121 * @proc:		binder_proc owning the buffer
2122 * @b:			transaction buffer
2123 * @objects_start_offset: offset to start of objects buffer
2124 * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
2125 * @fixup_offset:	start offset in @buffer to fix up
2126 * @last_obj_offset:	offset to last binder_buffer_object that we fixed
2127 * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
2128 *
2129 * Return:		%true if a fixup in buffer @buffer at offset @offset is
2130 *			allowed.
2131 *
2132 * For safety reasons, we only allow fixups inside a buffer to happen
2133 * at increasing offsets; additionally, we only allow fixup on the last
2134 * buffer object that was verified, or one of its parents.
2135 *
2136 * Example of what is allowed:
2137 *
2138 * A
2139 *   B (parent = A, offset = 0)
2140 *   C (parent = A, offset = 16)
2141 *     D (parent = C, offset = 0)
2142 *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2143 *
2144 * Examples of what is not allowed:
2145 *
2146 * Decreasing offsets within the same parent:
2147 * A
2148 *   C (parent = A, offset = 16)
2149 *   B (parent = A, offset = 0) // decreasing offset within A
2150 *
2151 * Referring to a parent that wasn't the last object or any of its parents:
2152 * A
2153 *   B (parent = A, offset = 0)
2154 *   C (parent = A, offset = 0)
2155 *   C (parent = A, offset = 16)
2156 *     D (parent = B, offset = 0) // B is not A or any of A's parents
2157 */
2158static bool binder_validate_fixup(struct binder_proc *proc,
2159				  struct binder_buffer *b,
2160				  binder_size_t objects_start_offset,
2161				  binder_size_t buffer_obj_offset,
2162				  binder_size_t fixup_offset,
2163				  binder_size_t last_obj_offset,
2164				  binder_size_t last_min_offset)
2165{
2166	if (!last_obj_offset) {
2167		/* Nothing to fix up in */
2168		return false;
2169	}
2170
2171	while (last_obj_offset != buffer_obj_offset) {
2172		unsigned long buffer_offset;
2173		struct binder_object last_object;
2174		struct binder_buffer_object *last_bbo;
2175		size_t object_size = binder_get_object(proc, b, last_obj_offset,
2176						       &last_object);
2177		if (object_size != sizeof(*last_bbo))
2178			return false;
2179
2180		last_bbo = &last_object.bbo;
2181		/*
2182		 * Safe to retrieve the parent of last_obj, since it
2183		 * was already previously verified by the driver.
2184		 */
2185		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2186			return false;
2187		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2188		buffer_offset = objects_start_offset +
2189			sizeof(binder_size_t) * last_bbo->parent;
2190		if (binder_alloc_copy_from_buffer(&proc->alloc,
2191						  &last_obj_offset,
2192						  b, buffer_offset,
2193						  sizeof(last_obj_offset)))
2194			return false;
2195	}
2196	return (fixup_offset >= last_min_offset);
2197}
2198
2199/**
2200 * struct binder_task_work_cb - for deferred close
2201 *
2202 * @twork:                callback_head for task work
2203 * @fd:                   fd to close
2204 *
2205 * Structure to pass task work to be handled after
2206 * returning from binder_ioctl() via task_work_add().
2207 */
2208struct binder_task_work_cb {
2209	struct callback_head twork;
2210	struct file *file;
2211};
2212
2213/**
2214 * binder_do_fd_close() - close list of file descriptors
2215 * @twork:	callback head for task work
2216 *
2217 * It is not safe to call ksys_close() during the binder_ioctl()
2218 * function if there is a chance that binder's own file descriptor
2219 * might be closed. This is to meet the requirements for using
2220 * fdget() (see comments for __fget_light()). Therefore use
2221 * task_work_add() to schedule the close operation once we have
2222 * returned from binder_ioctl(). This function is a callback
2223 * for that mechanism and does the actual ksys_close() on the
2224 * given file descriptor.
2225 */
2226static void binder_do_fd_close(struct callback_head *twork)
2227{
2228	struct binder_task_work_cb *twcb = container_of(twork,
2229			struct binder_task_work_cb, twork);
2230
2231	fput(twcb->file);
2232	kfree(twcb);
2233}
2234
2235/**
2236 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2237 * @fd:		file-descriptor to close
2238 *
2239 * See comments in binder_do_fd_close(). This function is used to schedule
2240 * a file-descriptor to be closed after returning from binder_ioctl().
2241 */
2242static void binder_deferred_fd_close(int fd)
2243{
2244	struct binder_task_work_cb *twcb;
2245
2246	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2247	if (!twcb)
2248		return;
2249	init_task_work(&twcb->twork, binder_do_fd_close);
2250	__close_fd_get_file(fd, &twcb->file);
2251	if (twcb->file) {
2252		filp_close(twcb->file, current->files);
2253		task_work_add(current, &twcb->twork, true);
2254	} else {
2255		kfree(twcb);
2256	}
2257}
2258
2259static void binder_transaction_buffer_release(struct binder_proc *proc,
 
2260					      struct binder_buffer *buffer,
2261					      binder_size_t failed_at,
2262					      bool is_failure)
2263{
2264	int debug_id = buffer->debug_id;
2265	binder_size_t off_start_offset, buffer_offset, off_end_offset;
2266
2267	binder_debug(BINDER_DEBUG_TRANSACTION,
2268		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2269		     proc->pid, buffer->debug_id,
2270		     buffer->data_size, buffer->offsets_size,
2271		     (unsigned long long)failed_at);
2272
2273	if (buffer->target_node)
2274		binder_dec_node(buffer->target_node, 1, 0);
2275
2276	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2277	off_end_offset = is_failure ? failed_at :
2278				off_start_offset + buffer->offsets_size;
2279	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2280	     buffer_offset += sizeof(binder_size_t)) {
2281		struct binder_object_header *hdr;
2282		size_t object_size = 0;
2283		struct binder_object object;
2284		binder_size_t object_offset;
2285
2286		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2287						   buffer, buffer_offset,
2288						   sizeof(object_offset)))
2289			object_size = binder_get_object(proc, buffer,
2290							object_offset, &object);
2291		if (object_size == 0) {
2292			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2293			       debug_id, (u64)object_offset, buffer->data_size);
2294			continue;
2295		}
2296		hdr = &object.hdr;
2297		switch (hdr->type) {
2298		case BINDER_TYPE_BINDER:
2299		case BINDER_TYPE_WEAK_BINDER: {
2300			struct flat_binder_object *fp;
2301			struct binder_node *node;
2302
2303			fp = to_flat_binder_object(hdr);
2304			node = binder_get_node(proc, fp->binder);
2305			if (node == NULL) {
2306				pr_err("transaction release %d bad node %016llx\n",
2307				       debug_id, (u64)fp->binder);
2308				break;
2309			}
2310			binder_debug(BINDER_DEBUG_TRANSACTION,
2311				     "        node %d u%016llx\n",
2312				     node->debug_id, (u64)node->ptr);
2313			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2314					0);
2315			binder_put_node(node);
2316		} break;
2317		case BINDER_TYPE_HANDLE:
2318		case BINDER_TYPE_WEAK_HANDLE: {
2319			struct flat_binder_object *fp;
2320			struct binder_ref_data rdata;
2321			int ret;
2322
2323			fp = to_flat_binder_object(hdr);
2324			ret = binder_dec_ref_for_handle(proc, fp->handle,
2325				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2326
2327			if (ret) {
2328				pr_err("transaction release %d bad handle %d, ret = %d\n",
2329				 debug_id, fp->handle, ret);
2330				break;
2331			}
2332			binder_debug(BINDER_DEBUG_TRANSACTION,
2333				     "        ref %d desc %d\n",
2334				     rdata.debug_id, rdata.desc);
2335		} break;
2336
2337		case BINDER_TYPE_FD: {
2338			/*
2339			 * No need to close the file here since user-space
2340			 * closes it for for successfully delivered
2341			 * transactions. For transactions that weren't
2342			 * delivered, the new fd was never allocated so
2343			 * there is no need to close and the fput on the
2344			 * file is done when the transaction is torn
2345			 * down.
2346			 */
2347			WARN_ON(failed_at &&
2348				proc->tsk == current->group_leader);
2349		} break;
2350		case BINDER_TYPE_PTR:
2351			/*
2352			 * Nothing to do here, this will get cleaned up when the
2353			 * transaction buffer gets freed
2354			 */
2355			break;
2356		case BINDER_TYPE_FDA: {
2357			struct binder_fd_array_object *fda;
2358			struct binder_buffer_object *parent;
2359			struct binder_object ptr_object;
2360			binder_size_t fda_offset;
2361			size_t fd_index;
2362			binder_size_t fd_buf_size;
2363			binder_size_t num_valid;
2364
2365			if (proc->tsk != current->group_leader) {
2366				/*
2367				 * Nothing to do if running in sender context
2368				 * The fd fixups have not been applied so no
2369				 * fds need to be closed.
2370				 */
2371				continue;
2372			}
2373
2374			num_valid = (buffer_offset - off_start_offset) /
2375						sizeof(binder_size_t);
2376			fda = to_binder_fd_array_object(hdr);
2377			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2378						     fda->parent,
2379						     off_start_offset,
2380						     NULL,
2381						     num_valid);
2382			if (!parent) {
2383				pr_err("transaction release %d bad parent offset\n",
2384				       debug_id);
2385				continue;
2386			}
2387			fd_buf_size = sizeof(u32) * fda->num_fds;
2388			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2389				pr_err("transaction release %d invalid number of fds (%lld)\n",
2390				       debug_id, (u64)fda->num_fds);
2391				continue;
2392			}
2393			if (fd_buf_size > parent->length ||
2394			    fda->parent_offset > parent->length - fd_buf_size) {
2395				/* No space for all file descriptors here. */
2396				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2397				       debug_id, (u64)fda->num_fds);
2398				continue;
2399			}
2400			/*
2401			 * the source data for binder_buffer_object is visible
2402			 * to user-space and the @buffer element is the user
2403			 * pointer to the buffer_object containing the fd_array.
2404			 * Convert the address to an offset relative to
2405			 * the base of the transaction buffer.
2406			 */
2407			fda_offset =
2408			    (parent->buffer - (uintptr_t)buffer->user_data) +
2409			    fda->parent_offset;
2410			for (fd_index = 0; fd_index < fda->num_fds;
2411			     fd_index++) {
2412				u32 fd;
2413				int err;
2414				binder_size_t offset = fda_offset +
2415					fd_index * sizeof(fd);
2416
2417				err = binder_alloc_copy_from_buffer(
2418						&proc->alloc, &fd, buffer,
2419						offset, sizeof(fd));
2420				WARN_ON(err);
2421				if (!err)
2422					binder_deferred_fd_close(fd);
 
 
 
 
 
 
 
 
2423			}
2424		} break;
2425		default:
2426			pr_err("transaction release %d bad object type %x\n",
2427				debug_id, hdr->type);
2428			break;
2429		}
2430	}
2431}
2432
2433static int binder_translate_binder(struct flat_binder_object *fp,
2434				   struct binder_transaction *t,
2435				   struct binder_thread *thread)
2436{
2437	struct binder_node *node;
2438	struct binder_proc *proc = thread->proc;
2439	struct binder_proc *target_proc = t->to_proc;
2440	struct binder_ref_data rdata;
2441	int ret = 0;
2442
2443	node = binder_get_node(proc, fp->binder);
2444	if (!node) {
2445		node = binder_new_node(proc, fp);
2446		if (!node)
2447			return -ENOMEM;
2448	}
2449	if (fp->cookie != node->cookie) {
2450		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2451				  proc->pid, thread->pid, (u64)fp->binder,
2452				  node->debug_id, (u64)fp->cookie,
2453				  (u64)node->cookie);
2454		ret = -EINVAL;
2455		goto done;
2456	}
2457	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2458		ret = -EPERM;
2459		goto done;
2460	}
2461
2462	ret = binder_inc_ref_for_node(target_proc, node,
2463			fp->hdr.type == BINDER_TYPE_BINDER,
2464			&thread->todo, &rdata);
2465	if (ret)
2466		goto done;
2467
2468	if (fp->hdr.type == BINDER_TYPE_BINDER)
2469		fp->hdr.type = BINDER_TYPE_HANDLE;
2470	else
2471		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2472	fp->binder = 0;
2473	fp->handle = rdata.desc;
2474	fp->cookie = 0;
2475
2476	trace_binder_transaction_node_to_ref(t, node, &rdata);
2477	binder_debug(BINDER_DEBUG_TRANSACTION,
2478		     "        node %d u%016llx -> ref %d desc %d\n",
2479		     node->debug_id, (u64)node->ptr,
2480		     rdata.debug_id, rdata.desc);
2481done:
2482	binder_put_node(node);
2483	return ret;
2484}
2485
2486static int binder_translate_handle(struct flat_binder_object *fp,
2487				   struct binder_transaction *t,
2488				   struct binder_thread *thread)
2489{
2490	struct binder_proc *proc = thread->proc;
2491	struct binder_proc *target_proc = t->to_proc;
2492	struct binder_node *node;
2493	struct binder_ref_data src_rdata;
2494	int ret = 0;
2495
2496	node = binder_get_node_from_ref(proc, fp->handle,
2497			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2498	if (!node) {
2499		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2500				  proc->pid, thread->pid, fp->handle);
2501		return -EINVAL;
2502	}
2503	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2504		ret = -EPERM;
2505		goto done;
2506	}
2507
2508	binder_node_lock(node);
2509	if (node->proc == target_proc) {
2510		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2511			fp->hdr.type = BINDER_TYPE_BINDER;
2512		else
2513			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2514		fp->binder = node->ptr;
2515		fp->cookie = node->cookie;
2516		if (node->proc)
2517			binder_inner_proc_lock(node->proc);
2518		else
2519			__acquire(&node->proc->inner_lock);
2520		binder_inc_node_nilocked(node,
2521					 fp->hdr.type == BINDER_TYPE_BINDER,
2522					 0, NULL);
2523		if (node->proc)
2524			binder_inner_proc_unlock(node->proc);
2525		else
2526			__release(&node->proc->inner_lock);
2527		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2528		binder_debug(BINDER_DEBUG_TRANSACTION,
2529			     "        ref %d desc %d -> node %d u%016llx\n",
2530			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2531			     (u64)node->ptr);
2532		binder_node_unlock(node);
2533	} else {
2534		struct binder_ref_data dest_rdata;
2535
2536		binder_node_unlock(node);
2537		ret = binder_inc_ref_for_node(target_proc, node,
2538				fp->hdr.type == BINDER_TYPE_HANDLE,
2539				NULL, &dest_rdata);
2540		if (ret)
2541			goto done;
2542
2543		fp->binder = 0;
2544		fp->handle = dest_rdata.desc;
2545		fp->cookie = 0;
2546		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2547						    &dest_rdata);
2548		binder_debug(BINDER_DEBUG_TRANSACTION,
2549			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2550			     src_rdata.debug_id, src_rdata.desc,
2551			     dest_rdata.debug_id, dest_rdata.desc,
2552			     node->debug_id);
2553	}
2554done:
2555	binder_put_node(node);
2556	return ret;
2557}
2558
2559static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2560			       struct binder_transaction *t,
2561			       struct binder_thread *thread,
2562			       struct binder_transaction *in_reply_to)
2563{
2564	struct binder_proc *proc = thread->proc;
2565	struct binder_proc *target_proc = t->to_proc;
2566	struct binder_txn_fd_fixup *fixup;
2567	struct file *file;
2568	int ret = 0;
2569	bool target_allows_fd;
2570
2571	if (in_reply_to)
2572		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2573	else
2574		target_allows_fd = t->buffer->target_node->accept_fds;
2575	if (!target_allows_fd) {
2576		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2577				  proc->pid, thread->pid,
2578				  in_reply_to ? "reply" : "transaction",
2579				  fd);
2580		ret = -EPERM;
2581		goto err_fd_not_accepted;
2582	}
2583
2584	file = fget(fd);
2585	if (!file) {
2586		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2587				  proc->pid, thread->pid, fd);
2588		ret = -EBADF;
2589		goto err_fget;
2590	}
2591	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2592	if (ret < 0) {
2593		ret = -EPERM;
2594		goto err_security;
2595	}
2596
2597	/*
2598	 * Add fixup record for this transaction. The allocation
2599	 * of the fd in the target needs to be done from a
2600	 * target thread.
2601	 */
2602	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2603	if (!fixup) {
2604		ret = -ENOMEM;
2605		goto err_alloc;
2606	}
2607	fixup->file = file;
2608	fixup->offset = fd_offset;
2609	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2610	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2611
2612	return ret;
2613
2614err_alloc:
2615err_security:
2616	fput(file);
2617err_fget:
2618err_fd_not_accepted:
2619	return ret;
2620}
2621
2622static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2623				     struct binder_buffer_object *parent,
2624				     struct binder_transaction *t,
2625				     struct binder_thread *thread,
2626				     struct binder_transaction *in_reply_to)
2627{
2628	binder_size_t fdi, fd_buf_size;
2629	binder_size_t fda_offset;
2630	struct binder_proc *proc = thread->proc;
2631	struct binder_proc *target_proc = t->to_proc;
2632
2633	fd_buf_size = sizeof(u32) * fda->num_fds;
2634	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2635		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2636				  proc->pid, thread->pid, (u64)fda->num_fds);
2637		return -EINVAL;
2638	}
2639	if (fd_buf_size > parent->length ||
2640	    fda->parent_offset > parent->length - fd_buf_size) {
2641		/* No space for all file descriptors here. */
2642		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2643				  proc->pid, thread->pid, (u64)fda->num_fds);
2644		return -EINVAL;
2645	}
2646	/*
2647	 * the source data for binder_buffer_object is visible
2648	 * to user-space and the @buffer element is the user
2649	 * pointer to the buffer_object containing the fd_array.
2650	 * Convert the address to an offset relative to
2651	 * the base of the transaction buffer.
2652	 */
2653	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2654		fda->parent_offset;
2655	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2656		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2657				  proc->pid, thread->pid);
2658		return -EINVAL;
2659	}
2660	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2661		u32 fd;
2662		int ret;
2663		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2664
2665		ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2666						    &fd, t->buffer,
2667						    offset, sizeof(fd));
2668		if (!ret)
2669			ret = binder_translate_fd(fd, offset, t, thread,
2670						  in_reply_to);
2671		if (ret < 0)
2672			return ret;
2673	}
2674	return 0;
2675}
2676
2677static int binder_fixup_parent(struct binder_transaction *t,
2678			       struct binder_thread *thread,
2679			       struct binder_buffer_object *bp,
2680			       binder_size_t off_start_offset,
2681			       binder_size_t num_valid,
2682			       binder_size_t last_fixup_obj_off,
2683			       binder_size_t last_fixup_min_off)
2684{
2685	struct binder_buffer_object *parent;
2686	struct binder_buffer *b = t->buffer;
2687	struct binder_proc *proc = thread->proc;
2688	struct binder_proc *target_proc = t->to_proc;
2689	struct binder_object object;
2690	binder_size_t buffer_offset;
2691	binder_size_t parent_offset;
2692
2693	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2694		return 0;
2695
2696	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2697				     off_start_offset, &parent_offset,
2698				     num_valid);
2699	if (!parent) {
2700		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2701				  proc->pid, thread->pid);
2702		return -EINVAL;
2703	}
2704
2705	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2706				   parent_offset, bp->parent_offset,
2707				   last_fixup_obj_off,
2708				   last_fixup_min_off)) {
2709		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2710				  proc->pid, thread->pid);
2711		return -EINVAL;
2712	}
2713
2714	if (parent->length < sizeof(binder_uintptr_t) ||
2715	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2716		/* No space for a pointer here! */
2717		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2718				  proc->pid, thread->pid);
2719		return -EINVAL;
2720	}
2721	buffer_offset = bp->parent_offset +
2722			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2723	if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2724					&bp->buffer, sizeof(bp->buffer))) {
2725		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2726				  proc->pid, thread->pid);
2727		return -EINVAL;
2728	}
2729
2730	return 0;
2731}
2732
2733/**
2734 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2735 * @t:		transaction to send
2736 * @proc:	process to send the transaction to
2737 * @thread:	thread in @proc to send the transaction to (may be NULL)
2738 *
2739 * This function queues a transaction to the specified process. It will try
2740 * to find a thread in the target process to handle the transaction and
2741 * wake it up. If no thread is found, the work is queued to the proc
2742 * waitqueue.
2743 *
2744 * If the @thread parameter is not NULL, the transaction is always queued
2745 * to the waitlist of that specific thread.
2746 *
2747 * Return:	true if the transactions was successfully queued
2748 *		false if the target process or thread is dead
 
2749 */
2750static bool binder_proc_transaction(struct binder_transaction *t,
2751				    struct binder_proc *proc,
2752				    struct binder_thread *thread)
2753{
2754	struct binder_node *node = t->buffer->target_node;
2755	bool oneway = !!(t->flags & TF_ONE_WAY);
2756	bool pending_async = false;
2757
2758	BUG_ON(!node);
2759	binder_node_lock(node);
2760	if (oneway) {
2761		BUG_ON(thread);
2762		if (node->has_async_transaction)
2763			pending_async = true;
2764		else
2765			node->has_async_transaction = true;
2766	}
2767
2768	binder_inner_proc_lock(proc);
 
 
 
 
2769
2770	if (proc->is_dead || (thread && thread->is_dead)) {
 
2771		binder_inner_proc_unlock(proc);
2772		binder_node_unlock(node);
2773		return false;
2774	}
2775
2776	if (!thread && !pending_async)
2777		thread = binder_select_thread_ilocked(proc);
2778
2779	if (thread)
2780		binder_enqueue_thread_work_ilocked(thread, &t->work);
2781	else if (!pending_async)
2782		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2783	else
2784		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2785
2786	if (!pending_async)
2787		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2788
 
2789	binder_inner_proc_unlock(proc);
2790	binder_node_unlock(node);
2791
2792	return true;
2793}
2794
2795/**
2796 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2797 * @node:         struct binder_node for which to get refs
2798 * @proc:         returns @node->proc if valid
2799 * @error:        if no @proc then returns BR_DEAD_REPLY
2800 *
2801 * User-space normally keeps the node alive when creating a transaction
2802 * since it has a reference to the target. The local strong ref keeps it
2803 * alive if the sending process dies before the target process processes
2804 * the transaction. If the source process is malicious or has a reference
2805 * counting bug, relying on the local strong ref can fail.
2806 *
2807 * Since user-space can cause the local strong ref to go away, we also take
2808 * a tmpref on the node to ensure it survives while we are constructing
2809 * the transaction. We also need a tmpref on the proc while we are
2810 * constructing the transaction, so we take that here as well.
2811 *
2812 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2813 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2814 * target proc has died, @error is set to BR_DEAD_REPLY
2815 */
2816static struct binder_node *binder_get_node_refs_for_txn(
2817		struct binder_node *node,
2818		struct binder_proc **procp,
2819		uint32_t *error)
2820{
2821	struct binder_node *target_node = NULL;
2822
2823	binder_node_inner_lock(node);
2824	if (node->proc) {
2825		target_node = node;
2826		binder_inc_node_nilocked(node, 1, 0, NULL);
2827		binder_inc_node_tmpref_ilocked(node);
2828		node->proc->tmp_ref++;
2829		*procp = node->proc;
2830	} else
2831		*error = BR_DEAD_REPLY;
2832	binder_node_inner_unlock(node);
2833
2834	return target_node;
2835}
2836
2837static void binder_transaction(struct binder_proc *proc,
2838			       struct binder_thread *thread,
2839			       struct binder_transaction_data *tr, int reply,
2840			       binder_size_t extra_buffers_size)
2841{
2842	int ret;
2843	struct binder_transaction *t;
2844	struct binder_work *w;
2845	struct binder_work *tcomplete;
2846	binder_size_t buffer_offset = 0;
2847	binder_size_t off_start_offset, off_end_offset;
2848	binder_size_t off_min;
2849	binder_size_t sg_buf_offset, sg_buf_end_offset;
2850	struct binder_proc *target_proc = NULL;
2851	struct binder_thread *target_thread = NULL;
2852	struct binder_node *target_node = NULL;
2853	struct binder_transaction *in_reply_to = NULL;
2854	struct binder_transaction_log_entry *e;
2855	uint32_t return_error = 0;
2856	uint32_t return_error_param = 0;
2857	uint32_t return_error_line = 0;
2858	binder_size_t last_fixup_obj_off = 0;
2859	binder_size_t last_fixup_min_off = 0;
2860	struct binder_context *context = proc->context;
2861	int t_debug_id = atomic_inc_return(&binder_last_id);
2862	char *secctx = NULL;
2863	u32 secctx_sz = 0;
2864
2865	e = binder_transaction_log_add(&binder_transaction_log);
2866	e->debug_id = t_debug_id;
2867	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2868	e->from_proc = proc->pid;
2869	e->from_thread = thread->pid;
2870	e->target_handle = tr->target.handle;
2871	e->data_size = tr->data_size;
2872	e->offsets_size = tr->offsets_size;
2873	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2874
2875	if (reply) {
2876		binder_inner_proc_lock(proc);
2877		in_reply_to = thread->transaction_stack;
2878		if (in_reply_to == NULL) {
2879			binder_inner_proc_unlock(proc);
2880			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2881					  proc->pid, thread->pid);
2882			return_error = BR_FAILED_REPLY;
2883			return_error_param = -EPROTO;
2884			return_error_line = __LINE__;
2885			goto err_empty_call_stack;
2886		}
2887		if (in_reply_to->to_thread != thread) {
2888			spin_lock(&in_reply_to->lock);
2889			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2890				proc->pid, thread->pid, in_reply_to->debug_id,
2891				in_reply_to->to_proc ?
2892				in_reply_to->to_proc->pid : 0,
2893				in_reply_to->to_thread ?
2894				in_reply_to->to_thread->pid : 0);
2895			spin_unlock(&in_reply_to->lock);
2896			binder_inner_proc_unlock(proc);
2897			return_error = BR_FAILED_REPLY;
2898			return_error_param = -EPROTO;
2899			return_error_line = __LINE__;
2900			in_reply_to = NULL;
2901			goto err_bad_call_stack;
2902		}
2903		thread->transaction_stack = in_reply_to->to_parent;
2904		binder_inner_proc_unlock(proc);
2905		binder_set_nice(in_reply_to->saved_priority);
2906		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2907		if (target_thread == NULL) {
2908			/* annotation for sparse */
2909			__release(&target_thread->proc->inner_lock);
2910			return_error = BR_DEAD_REPLY;
2911			return_error_line = __LINE__;
2912			goto err_dead_binder;
2913		}
2914		if (target_thread->transaction_stack != in_reply_to) {
2915			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2916				proc->pid, thread->pid,
2917				target_thread->transaction_stack ?
2918				target_thread->transaction_stack->debug_id : 0,
2919				in_reply_to->debug_id);
2920			binder_inner_proc_unlock(target_thread->proc);
2921			return_error = BR_FAILED_REPLY;
2922			return_error_param = -EPROTO;
2923			return_error_line = __LINE__;
2924			in_reply_to = NULL;
2925			target_thread = NULL;
2926			goto err_dead_binder;
2927		}
2928		target_proc = target_thread->proc;
2929		target_proc->tmp_ref++;
2930		binder_inner_proc_unlock(target_thread->proc);
2931	} else {
2932		if (tr->target.handle) {
2933			struct binder_ref *ref;
2934
2935			/*
2936			 * There must already be a strong ref
2937			 * on this node. If so, do a strong
2938			 * increment on the node to ensure it
2939			 * stays alive until the transaction is
2940			 * done.
2941			 */
2942			binder_proc_lock(proc);
2943			ref = binder_get_ref_olocked(proc, tr->target.handle,
2944						     true);
2945			if (ref) {
2946				target_node = binder_get_node_refs_for_txn(
2947						ref->node, &target_proc,
2948						&return_error);
2949			} else {
2950				binder_user_error("%d:%d got transaction to invalid handle\n",
2951						  proc->pid, thread->pid);
2952				return_error = BR_FAILED_REPLY;
2953			}
2954			binder_proc_unlock(proc);
2955		} else {
2956			mutex_lock(&context->context_mgr_node_lock);
2957			target_node = context->binder_context_mgr_node;
2958			if (target_node)
2959				target_node = binder_get_node_refs_for_txn(
2960						target_node, &target_proc,
2961						&return_error);
2962			else
2963				return_error = BR_DEAD_REPLY;
2964			mutex_unlock(&context->context_mgr_node_lock);
2965			if (target_node && target_proc->pid == proc->pid) {
2966				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2967						  proc->pid, thread->pid);
2968				return_error = BR_FAILED_REPLY;
2969				return_error_param = -EINVAL;
2970				return_error_line = __LINE__;
2971				goto err_invalid_target_handle;
2972			}
2973		}
2974		if (!target_node) {
2975			/*
2976			 * return_error is set above
2977			 */
2978			return_error_param = -EINVAL;
2979			return_error_line = __LINE__;
2980			goto err_dead_binder;
2981		}
2982		e->to_node = target_node->debug_id;
2983		if (WARN_ON(proc == target_proc)) {
2984			return_error = BR_FAILED_REPLY;
2985			return_error_param = -EINVAL;
2986			return_error_line = __LINE__;
2987			goto err_invalid_target_handle;
2988		}
2989		if (security_binder_transaction(proc->tsk,
2990						target_proc->tsk) < 0) {
2991			return_error = BR_FAILED_REPLY;
2992			return_error_param = -EPERM;
2993			return_error_line = __LINE__;
2994			goto err_invalid_target_handle;
2995		}
2996		binder_inner_proc_lock(proc);
2997
2998		w = list_first_entry_or_null(&thread->todo,
2999					     struct binder_work, entry);
3000		if (!(tr->flags & TF_ONE_WAY) && w &&
3001		    w->type == BINDER_WORK_TRANSACTION) {
3002			/*
3003			 * Do not allow new outgoing transaction from a
3004			 * thread that has a transaction at the head of
3005			 * its todo list. Only need to check the head
3006			 * because binder_select_thread_ilocked picks a
3007			 * thread from proc->waiting_threads to enqueue
3008			 * the transaction, and nothing is queued to the
3009			 * todo list while the thread is on waiting_threads.
3010			 */
3011			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3012					  proc->pid, thread->pid);
3013			binder_inner_proc_unlock(proc);
3014			return_error = BR_FAILED_REPLY;
3015			return_error_param = -EPROTO;
3016			return_error_line = __LINE__;
3017			goto err_bad_todo_list;
3018		}
3019
3020		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3021			struct binder_transaction *tmp;
3022
3023			tmp = thread->transaction_stack;
3024			if (tmp->to_thread != thread) {
3025				spin_lock(&tmp->lock);
3026				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3027					proc->pid, thread->pid, tmp->debug_id,
3028					tmp->to_proc ? tmp->to_proc->pid : 0,
3029					tmp->to_thread ?
3030					tmp->to_thread->pid : 0);
3031				spin_unlock(&tmp->lock);
3032				binder_inner_proc_unlock(proc);
3033				return_error = BR_FAILED_REPLY;
3034				return_error_param = -EPROTO;
3035				return_error_line = __LINE__;
3036				goto err_bad_call_stack;
3037			}
3038			while (tmp) {
3039				struct binder_thread *from;
3040
3041				spin_lock(&tmp->lock);
3042				from = tmp->from;
3043				if (from && from->proc == target_proc) {
3044					atomic_inc(&from->tmp_ref);
3045					target_thread = from;
3046					spin_unlock(&tmp->lock);
3047					break;
3048				}
3049				spin_unlock(&tmp->lock);
3050				tmp = tmp->from_parent;
3051			}
3052		}
3053		binder_inner_proc_unlock(proc);
3054	}
3055	if (target_thread)
3056		e->to_thread = target_thread->pid;
3057	e->to_proc = target_proc->pid;
3058
3059	/* TODO: reuse incoming transaction for reply */
3060	t = kzalloc(sizeof(*t), GFP_KERNEL);
3061	if (t == NULL) {
3062		return_error = BR_FAILED_REPLY;
3063		return_error_param = -ENOMEM;
3064		return_error_line = __LINE__;
3065		goto err_alloc_t_failed;
3066	}
3067	INIT_LIST_HEAD(&t->fd_fixups);
3068	binder_stats_created(BINDER_STAT_TRANSACTION);
3069	spin_lock_init(&t->lock);
3070
3071	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3072	if (tcomplete == NULL) {
3073		return_error = BR_FAILED_REPLY;
3074		return_error_param = -ENOMEM;
3075		return_error_line = __LINE__;
3076		goto err_alloc_tcomplete_failed;
3077	}
3078	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3079
3080	t->debug_id = t_debug_id;
3081
3082	if (reply)
3083		binder_debug(BINDER_DEBUG_TRANSACTION,
3084			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3085			     proc->pid, thread->pid, t->debug_id,
3086			     target_proc->pid, target_thread->pid,
3087			     (u64)tr->data.ptr.buffer,
3088			     (u64)tr->data.ptr.offsets,
3089			     (u64)tr->data_size, (u64)tr->offsets_size,
3090			     (u64)extra_buffers_size);
3091	else
3092		binder_debug(BINDER_DEBUG_TRANSACTION,
3093			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3094			     proc->pid, thread->pid, t->debug_id,
3095			     target_proc->pid, target_node->debug_id,
3096			     (u64)tr->data.ptr.buffer,
3097			     (u64)tr->data.ptr.offsets,
3098			     (u64)tr->data_size, (u64)tr->offsets_size,
3099			     (u64)extra_buffers_size);
3100
3101	if (!reply && !(tr->flags & TF_ONE_WAY))
3102		t->from = thread;
3103	else
3104		t->from = NULL;
3105	t->sender_euid = task_euid(proc->tsk);
3106	t->to_proc = target_proc;
3107	t->to_thread = target_thread;
3108	t->code = tr->code;
3109	t->flags = tr->flags;
3110	t->priority = task_nice(current);
3111
3112	if (target_node && target_node->txn_security_ctx) {
3113		u32 secid;
3114		size_t added_size;
3115
3116		security_task_getsecid(proc->tsk, &secid);
 
 
 
 
 
 
 
 
 
3117		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3118		if (ret) {
3119			return_error = BR_FAILED_REPLY;
3120			return_error_param = ret;
3121			return_error_line = __LINE__;
3122			goto err_get_secctx_failed;
3123		}
3124		added_size = ALIGN(secctx_sz, sizeof(u64));
3125		extra_buffers_size += added_size;
3126		if (extra_buffers_size < added_size) {
3127			/* integer overflow of extra_buffers_size */
3128			return_error = BR_FAILED_REPLY;
3129			return_error_param = EINVAL;
3130			return_error_line = __LINE__;
3131			goto err_bad_extra_size;
3132		}
3133	}
3134
3135	trace_binder_transaction(reply, t, target_node);
3136
3137	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3138		tr->offsets_size, extra_buffers_size,
3139		!reply && (t->flags & TF_ONE_WAY));
3140	if (IS_ERR(t->buffer)) {
3141		/*
3142		 * -ESRCH indicates VMA cleared. The target is dying.
3143		 */
3144		return_error_param = PTR_ERR(t->buffer);
3145		return_error = return_error_param == -ESRCH ?
3146			BR_DEAD_REPLY : BR_FAILED_REPLY;
3147		return_error_line = __LINE__;
3148		t->buffer = NULL;
3149		goto err_binder_alloc_buf_failed;
3150	}
3151	if (secctx) {
3152		int err;
3153		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3154				    ALIGN(tr->offsets_size, sizeof(void *)) +
3155				    ALIGN(extra_buffers_size, sizeof(void *)) -
3156				    ALIGN(secctx_sz, sizeof(u64));
3157
3158		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3159		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3160						  t->buffer, buf_offset,
3161						  secctx, secctx_sz);
3162		if (err) {
3163			t->security_ctx = 0;
3164			WARN_ON(1);
3165		}
3166		security_release_secctx(secctx, secctx_sz);
3167		secctx = NULL;
3168	}
3169	t->buffer->debug_id = t->debug_id;
3170	t->buffer->transaction = t;
3171	t->buffer->target_node = target_node;
 
3172	trace_binder_transaction_alloc_buf(t->buffer);
3173
3174	if (binder_alloc_copy_user_to_buffer(
3175				&target_proc->alloc,
3176				t->buffer, 0,
3177				(const void __user *)
3178					(uintptr_t)tr->data.ptr.buffer,
3179				tr->data_size)) {
3180		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3181				proc->pid, thread->pid);
3182		return_error = BR_FAILED_REPLY;
3183		return_error_param = -EFAULT;
3184		return_error_line = __LINE__;
3185		goto err_copy_data_failed;
3186	}
3187	if (binder_alloc_copy_user_to_buffer(
3188				&target_proc->alloc,
3189				t->buffer,
3190				ALIGN(tr->data_size, sizeof(void *)),
3191				(const void __user *)
3192					(uintptr_t)tr->data.ptr.offsets,
3193				tr->offsets_size)) {
3194		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3195				proc->pid, thread->pid);
3196		return_error = BR_FAILED_REPLY;
3197		return_error_param = -EFAULT;
3198		return_error_line = __LINE__;
3199		goto err_copy_data_failed;
3200	}
3201	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3202		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3203				proc->pid, thread->pid, (u64)tr->offsets_size);
3204		return_error = BR_FAILED_REPLY;
3205		return_error_param = -EINVAL;
3206		return_error_line = __LINE__;
3207		goto err_bad_offset;
3208	}
3209	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3210		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3211				  proc->pid, thread->pid,
3212				  (u64)extra_buffers_size);
3213		return_error = BR_FAILED_REPLY;
3214		return_error_param = -EINVAL;
3215		return_error_line = __LINE__;
3216		goto err_bad_offset;
3217	}
3218	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3219	buffer_offset = off_start_offset;
3220	off_end_offset = off_start_offset + tr->offsets_size;
3221	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3222	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3223		ALIGN(secctx_sz, sizeof(u64));
3224	off_min = 0;
3225	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3226	     buffer_offset += sizeof(binder_size_t)) {
3227		struct binder_object_header *hdr;
3228		size_t object_size;
3229		struct binder_object object;
3230		binder_size_t object_offset;
3231
3232		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3233						  &object_offset,
3234						  t->buffer,
3235						  buffer_offset,
3236						  sizeof(object_offset))) {
3237			return_error = BR_FAILED_REPLY;
3238			return_error_param = -EINVAL;
3239			return_error_line = __LINE__;
3240			goto err_bad_offset;
3241		}
3242		object_size = binder_get_object(target_proc, t->buffer,
3243						object_offset, &object);
3244		if (object_size == 0 || object_offset < off_min) {
3245			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3246					  proc->pid, thread->pid,
3247					  (u64)object_offset,
3248					  (u64)off_min,
3249					  (u64)t->buffer->data_size);
3250			return_error = BR_FAILED_REPLY;
3251			return_error_param = -EINVAL;
3252			return_error_line = __LINE__;
3253			goto err_bad_offset;
3254		}
3255
3256		hdr = &object.hdr;
3257		off_min = object_offset + object_size;
3258		switch (hdr->type) {
3259		case BINDER_TYPE_BINDER:
3260		case BINDER_TYPE_WEAK_BINDER: {
3261			struct flat_binder_object *fp;
3262
3263			fp = to_flat_binder_object(hdr);
3264			ret = binder_translate_binder(fp, t, thread);
3265
3266			if (ret < 0 ||
3267			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3268							t->buffer,
3269							object_offset,
3270							fp, sizeof(*fp))) {
3271				return_error = BR_FAILED_REPLY;
3272				return_error_param = ret;
3273				return_error_line = __LINE__;
3274				goto err_translate_failed;
3275			}
3276		} break;
3277		case BINDER_TYPE_HANDLE:
3278		case BINDER_TYPE_WEAK_HANDLE: {
3279			struct flat_binder_object *fp;
3280
3281			fp = to_flat_binder_object(hdr);
3282			ret = binder_translate_handle(fp, t, thread);
3283			if (ret < 0 ||
3284			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3285							t->buffer,
3286							object_offset,
3287							fp, sizeof(*fp))) {
3288				return_error = BR_FAILED_REPLY;
3289				return_error_param = ret;
3290				return_error_line = __LINE__;
3291				goto err_translate_failed;
3292			}
3293		} break;
3294
3295		case BINDER_TYPE_FD: {
3296			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3297			binder_size_t fd_offset = object_offset +
3298				(uintptr_t)&fp->fd - (uintptr_t)fp;
3299			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3300						      thread, in_reply_to);
3301
3302			fp->pad_binder = 0;
3303			if (ret < 0 ||
3304			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3305							t->buffer,
3306							object_offset,
3307							fp, sizeof(*fp))) {
3308				return_error = BR_FAILED_REPLY;
3309				return_error_param = ret;
3310				return_error_line = __LINE__;
3311				goto err_translate_failed;
3312			}
3313		} break;
3314		case BINDER_TYPE_FDA: {
3315			struct binder_object ptr_object;
3316			binder_size_t parent_offset;
3317			struct binder_fd_array_object *fda =
3318				to_binder_fd_array_object(hdr);
3319			size_t num_valid = (buffer_offset - off_start_offset) /
3320						sizeof(binder_size_t);
3321			struct binder_buffer_object *parent =
3322				binder_validate_ptr(target_proc, t->buffer,
3323						    &ptr_object, fda->parent,
3324						    off_start_offset,
3325						    &parent_offset,
3326						    num_valid);
3327			if (!parent) {
3328				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3329						  proc->pid, thread->pid);
3330				return_error = BR_FAILED_REPLY;
3331				return_error_param = -EINVAL;
3332				return_error_line = __LINE__;
3333				goto err_bad_parent;
3334			}
3335			if (!binder_validate_fixup(target_proc, t->buffer,
3336						   off_start_offset,
3337						   parent_offset,
3338						   fda->parent_offset,
3339						   last_fixup_obj_off,
3340						   last_fixup_min_off)) {
3341				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3342						  proc->pid, thread->pid);
3343				return_error = BR_FAILED_REPLY;
3344				return_error_param = -EINVAL;
3345				return_error_line = __LINE__;
3346				goto err_bad_parent;
3347			}
3348			ret = binder_translate_fd_array(fda, parent, t, thread,
3349							in_reply_to);
3350			if (ret < 0) {
3351				return_error = BR_FAILED_REPLY;
3352				return_error_param = ret;
3353				return_error_line = __LINE__;
3354				goto err_translate_failed;
3355			}
3356			last_fixup_obj_off = parent_offset;
3357			last_fixup_min_off =
3358				fda->parent_offset + sizeof(u32) * fda->num_fds;
3359		} break;
3360		case BINDER_TYPE_PTR: {
3361			struct binder_buffer_object *bp =
3362				to_binder_buffer_object(hdr);
3363			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3364			size_t num_valid;
3365
3366			if (bp->length > buf_left) {
3367				binder_user_error("%d:%d got transaction with too large buffer\n",
3368						  proc->pid, thread->pid);
3369				return_error = BR_FAILED_REPLY;
3370				return_error_param = -EINVAL;
3371				return_error_line = __LINE__;
3372				goto err_bad_offset;
3373			}
3374			if (binder_alloc_copy_user_to_buffer(
3375						&target_proc->alloc,
3376						t->buffer,
3377						sg_buf_offset,
3378						(const void __user *)
3379							(uintptr_t)bp->buffer,
3380						bp->length)) {
3381				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3382						  proc->pid, thread->pid);
3383				return_error_param = -EFAULT;
3384				return_error = BR_FAILED_REPLY;
3385				return_error_line = __LINE__;
3386				goto err_copy_data_failed;
3387			}
3388			/* Fixup buffer pointer to target proc address space */
3389			bp->buffer = (uintptr_t)
3390				t->buffer->user_data + sg_buf_offset;
3391			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3392
3393			num_valid = (buffer_offset - off_start_offset) /
3394					sizeof(binder_size_t);
3395			ret = binder_fixup_parent(t, thread, bp,
3396						  off_start_offset,
3397						  num_valid,
3398						  last_fixup_obj_off,
3399						  last_fixup_min_off);
3400			if (ret < 0 ||
3401			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3402							t->buffer,
3403							object_offset,
3404							bp, sizeof(*bp))) {
3405				return_error = BR_FAILED_REPLY;
3406				return_error_param = ret;
3407				return_error_line = __LINE__;
3408				goto err_translate_failed;
3409			}
3410			last_fixup_obj_off = object_offset;
3411			last_fixup_min_off = 0;
3412		} break;
3413		default:
3414			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3415				proc->pid, thread->pid, hdr->type);
3416			return_error = BR_FAILED_REPLY;
3417			return_error_param = -EINVAL;
3418			return_error_line = __LINE__;
3419			goto err_bad_object_type;
3420		}
3421	}
3422	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
 
 
 
3423	t->work.type = BINDER_WORK_TRANSACTION;
3424
3425	if (reply) {
3426		binder_enqueue_thread_work(thread, tcomplete);
3427		binder_inner_proc_lock(target_proc);
3428		if (target_thread->is_dead) {
 
3429			binder_inner_proc_unlock(target_proc);
3430			goto err_dead_proc_or_thread;
3431		}
3432		BUG_ON(t->buffer->async_transaction != 0);
3433		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3434		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
 
3435		binder_inner_proc_unlock(target_proc);
3436		wake_up_interruptible_sync(&target_thread->wait);
3437		binder_free_transaction(in_reply_to);
3438	} else if (!(t->flags & TF_ONE_WAY)) {
3439		BUG_ON(t->buffer->async_transaction != 0);
3440		binder_inner_proc_lock(proc);
3441		/*
3442		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3443		 * userspace immediately; this allows the target process to
3444		 * immediately start processing this transaction, reducing
3445		 * latency. We will then return the TRANSACTION_COMPLETE when
3446		 * the target replies (or there is an error).
3447		 */
3448		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3449		t->need_reply = 1;
3450		t->from_parent = thread->transaction_stack;
3451		thread->transaction_stack = t;
3452		binder_inner_proc_unlock(proc);
3453		if (!binder_proc_transaction(t, target_proc, target_thread)) {
 
 
3454			binder_inner_proc_lock(proc);
3455			binder_pop_transaction_ilocked(thread, t);
3456			binder_inner_proc_unlock(proc);
3457			goto err_dead_proc_or_thread;
3458		}
3459	} else {
3460		BUG_ON(target_node == NULL);
3461		BUG_ON(t->buffer->async_transaction != 1);
3462		binder_enqueue_thread_work(thread, tcomplete);
3463		if (!binder_proc_transaction(t, target_proc, NULL))
 
3464			goto err_dead_proc_or_thread;
3465	}
3466	if (target_thread)
3467		binder_thread_dec_tmpref(target_thread);
3468	binder_proc_dec_tmpref(target_proc);
3469	if (target_node)
3470		binder_dec_node_tmpref(target_node);
3471	/*
3472	 * write barrier to synchronize with initialization
3473	 * of log entry
3474	 */
3475	smp_wmb();
3476	WRITE_ONCE(e->debug_id_done, t_debug_id);
3477	return;
3478
3479err_dead_proc_or_thread:
3480	return_error = BR_DEAD_REPLY;
3481	return_error_line = __LINE__;
3482	binder_dequeue_work(proc, tcomplete);
3483err_translate_failed:
3484err_bad_object_type:
3485err_bad_offset:
3486err_bad_parent:
3487err_copy_data_failed:
3488	binder_free_txn_fixups(t);
3489	trace_binder_transaction_failed_buffer_release(t->buffer);
3490	binder_transaction_buffer_release(target_proc, t->buffer,
3491					  buffer_offset, true);
3492	if (target_node)
3493		binder_dec_node_tmpref(target_node);
3494	target_node = NULL;
3495	t->buffer->transaction = NULL;
3496	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3497err_binder_alloc_buf_failed:
3498err_bad_extra_size:
3499	if (secctx)
3500		security_release_secctx(secctx, secctx_sz);
3501err_get_secctx_failed:
3502	kfree(tcomplete);
3503	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3504err_alloc_tcomplete_failed:
 
 
3505	kfree(t);
3506	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3507err_alloc_t_failed:
3508err_bad_todo_list:
3509err_bad_call_stack:
3510err_empty_call_stack:
3511err_dead_binder:
3512err_invalid_target_handle:
3513	if (target_thread)
3514		binder_thread_dec_tmpref(target_thread);
3515	if (target_proc)
3516		binder_proc_dec_tmpref(target_proc);
3517	if (target_node) {
3518		binder_dec_node(target_node, 1, 0);
3519		binder_dec_node_tmpref(target_node);
3520	}
3521
3522	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3523		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3524		     proc->pid, thread->pid, return_error, return_error_param,
3525		     (u64)tr->data_size, (u64)tr->offsets_size,
3526		     return_error_line);
3527
3528	{
3529		struct binder_transaction_log_entry *fe;
3530
3531		e->return_error = return_error;
3532		e->return_error_param = return_error_param;
3533		e->return_error_line = return_error_line;
3534		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3535		*fe = *e;
3536		/*
3537		 * write barrier to synchronize with initialization
3538		 * of log entry
3539		 */
3540		smp_wmb();
3541		WRITE_ONCE(e->debug_id_done, t_debug_id);
3542		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3543	}
3544
3545	BUG_ON(thread->return_error.cmd != BR_OK);
3546	if (in_reply_to) {
3547		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3548		binder_enqueue_thread_work(thread, &thread->return_error.work);
3549		binder_send_failed_reply(in_reply_to, return_error);
3550	} else {
3551		thread->return_error.cmd = return_error;
3552		binder_enqueue_thread_work(thread, &thread->return_error.work);
3553	}
3554}
3555
3556/**
3557 * binder_free_buf() - free the specified buffer
3558 * @proc:	binder proc that owns buffer
3559 * @buffer:	buffer to be freed
3560 *
3561 * If buffer for an async transaction, enqueue the next async
3562 * transaction from the node.
3563 *
3564 * Cleanup buffer and free it.
3565 */
3566static void
3567binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
 
 
3568{
3569	binder_inner_proc_lock(proc);
3570	if (buffer->transaction) {
3571		buffer->transaction->buffer = NULL;
3572		buffer->transaction = NULL;
3573	}
3574	binder_inner_proc_unlock(proc);
3575	if (buffer->async_transaction && buffer->target_node) {
3576		struct binder_node *buf_node;
3577		struct binder_work *w;
3578
3579		buf_node = buffer->target_node;
3580		binder_node_inner_lock(buf_node);
3581		BUG_ON(!buf_node->has_async_transaction);
3582		BUG_ON(buf_node->proc != proc);
3583		w = binder_dequeue_work_head_ilocked(
3584				&buf_node->async_todo);
3585		if (!w) {
3586			buf_node->has_async_transaction = false;
3587		} else {
3588			binder_enqueue_work_ilocked(
3589					w, &proc->todo);
3590			binder_wakeup_proc_ilocked(proc);
3591		}
3592		binder_node_inner_unlock(buf_node);
3593	}
3594	trace_binder_transaction_buffer_release(buffer);
3595	binder_transaction_buffer_release(proc, buffer, 0, false);
3596	binder_alloc_free_buf(&proc->alloc, buffer);
3597}
3598
3599static int binder_thread_write(struct binder_proc *proc,
3600			struct binder_thread *thread,
3601			binder_uintptr_t binder_buffer, size_t size,
3602			binder_size_t *consumed)
3603{
3604	uint32_t cmd;
3605	struct binder_context *context = proc->context;
3606	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3607	void __user *ptr = buffer + *consumed;
3608	void __user *end = buffer + size;
3609
3610	while (ptr < end && thread->return_error.cmd == BR_OK) {
3611		int ret;
3612
3613		if (get_user(cmd, (uint32_t __user *)ptr))
3614			return -EFAULT;
3615		ptr += sizeof(uint32_t);
3616		trace_binder_command(cmd);
3617		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3618			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3619			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3620			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3621		}
3622		switch (cmd) {
3623		case BC_INCREFS:
3624		case BC_ACQUIRE:
3625		case BC_RELEASE:
3626		case BC_DECREFS: {
3627			uint32_t target;
3628			const char *debug_string;
3629			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3630			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3631			struct binder_ref_data rdata;
3632
3633			if (get_user(target, (uint32_t __user *)ptr))
3634				return -EFAULT;
3635
3636			ptr += sizeof(uint32_t);
3637			ret = -1;
3638			if (increment && !target) {
3639				struct binder_node *ctx_mgr_node;
 
3640				mutex_lock(&context->context_mgr_node_lock);
3641				ctx_mgr_node = context->binder_context_mgr_node;
3642				if (ctx_mgr_node) {
3643					if (ctx_mgr_node->proc == proc) {
3644						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3645								  proc->pid, thread->pid);
3646						mutex_unlock(&context->context_mgr_node_lock);
3647						return -EINVAL;
3648					}
3649					ret = binder_inc_ref_for_node(
3650							proc, ctx_mgr_node,
3651							strong, NULL, &rdata);
3652				}
3653				mutex_unlock(&context->context_mgr_node_lock);
3654			}
3655			if (ret)
3656				ret = binder_update_ref_for_handle(
3657						proc, target, increment, strong,
3658						&rdata);
3659			if (!ret && rdata.desc != target) {
3660				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3661					proc->pid, thread->pid,
3662					target, rdata.desc);
3663			}
3664			switch (cmd) {
3665			case BC_INCREFS:
3666				debug_string = "IncRefs";
3667				break;
3668			case BC_ACQUIRE:
3669				debug_string = "Acquire";
3670				break;
3671			case BC_RELEASE:
3672				debug_string = "Release";
3673				break;
3674			case BC_DECREFS:
3675			default:
3676				debug_string = "DecRefs";
3677				break;
3678			}
3679			if (ret) {
3680				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3681					proc->pid, thread->pid, debug_string,
3682					strong, target, ret);
3683				break;
3684			}
3685			binder_debug(BINDER_DEBUG_USER_REFS,
3686				     "%d:%d %s ref %d desc %d s %d w %d\n",
3687				     proc->pid, thread->pid, debug_string,
3688				     rdata.debug_id, rdata.desc, rdata.strong,
3689				     rdata.weak);
3690			break;
3691		}
3692		case BC_INCREFS_DONE:
3693		case BC_ACQUIRE_DONE: {
3694			binder_uintptr_t node_ptr;
3695			binder_uintptr_t cookie;
3696			struct binder_node *node;
3697			bool free_node;
3698
3699			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3700				return -EFAULT;
3701			ptr += sizeof(binder_uintptr_t);
3702			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3703				return -EFAULT;
3704			ptr += sizeof(binder_uintptr_t);
3705			node = binder_get_node(proc, node_ptr);
3706			if (node == NULL) {
3707				binder_user_error("%d:%d %s u%016llx no match\n",
3708					proc->pid, thread->pid,
3709					cmd == BC_INCREFS_DONE ?
3710					"BC_INCREFS_DONE" :
3711					"BC_ACQUIRE_DONE",
3712					(u64)node_ptr);
3713				break;
3714			}
3715			if (cookie != node->cookie) {
3716				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3717					proc->pid, thread->pid,
3718					cmd == BC_INCREFS_DONE ?
3719					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3720					(u64)node_ptr, node->debug_id,
3721					(u64)cookie, (u64)node->cookie);
3722				binder_put_node(node);
3723				break;
3724			}
3725			binder_node_inner_lock(node);
3726			if (cmd == BC_ACQUIRE_DONE) {
3727				if (node->pending_strong_ref == 0) {
3728					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3729						proc->pid, thread->pid,
3730						node->debug_id);
3731					binder_node_inner_unlock(node);
3732					binder_put_node(node);
3733					break;
3734				}
3735				node->pending_strong_ref = 0;
3736			} else {
3737				if (node->pending_weak_ref == 0) {
3738					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3739						proc->pid, thread->pid,
3740						node->debug_id);
3741					binder_node_inner_unlock(node);
3742					binder_put_node(node);
3743					break;
3744				}
3745				node->pending_weak_ref = 0;
3746			}
3747			free_node = binder_dec_node_nilocked(node,
3748					cmd == BC_ACQUIRE_DONE, 0);
3749			WARN_ON(free_node);
3750			binder_debug(BINDER_DEBUG_USER_REFS,
3751				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3752				     proc->pid, thread->pid,
3753				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3754				     node->debug_id, node->local_strong_refs,
3755				     node->local_weak_refs, node->tmp_refs);
3756			binder_node_inner_unlock(node);
3757			binder_put_node(node);
3758			break;
3759		}
3760		case BC_ATTEMPT_ACQUIRE:
3761			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3762			return -EINVAL;
3763		case BC_ACQUIRE_RESULT:
3764			pr_err("BC_ACQUIRE_RESULT not supported\n");
3765			return -EINVAL;
3766
3767		case BC_FREE_BUFFER: {
3768			binder_uintptr_t data_ptr;
3769			struct binder_buffer *buffer;
3770
3771			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3772				return -EFAULT;
3773			ptr += sizeof(binder_uintptr_t);
3774
3775			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3776							      data_ptr);
3777			if (IS_ERR_OR_NULL(buffer)) {
3778				if (PTR_ERR(buffer) == -EPERM) {
3779					binder_user_error(
3780						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3781						proc->pid, thread->pid,
3782						(u64)data_ptr);
3783				} else {
3784					binder_user_error(
3785						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3786						proc->pid, thread->pid,
3787						(u64)data_ptr);
3788				}
3789				break;
3790			}
3791			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3792				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3793				     proc->pid, thread->pid, (u64)data_ptr,
3794				     buffer->debug_id,
3795				     buffer->transaction ? "active" : "finished");
3796			binder_free_buf(proc, buffer);
3797			break;
3798		}
3799
3800		case BC_TRANSACTION_SG:
3801		case BC_REPLY_SG: {
3802			struct binder_transaction_data_sg tr;
3803
3804			if (copy_from_user(&tr, ptr, sizeof(tr)))
3805				return -EFAULT;
3806			ptr += sizeof(tr);
3807			binder_transaction(proc, thread, &tr.transaction_data,
3808					   cmd == BC_REPLY_SG, tr.buffers_size);
3809			break;
3810		}
3811		case BC_TRANSACTION:
3812		case BC_REPLY: {
3813			struct binder_transaction_data tr;
3814
3815			if (copy_from_user(&tr, ptr, sizeof(tr)))
3816				return -EFAULT;
3817			ptr += sizeof(tr);
3818			binder_transaction(proc, thread, &tr,
3819					   cmd == BC_REPLY, 0);
3820			break;
3821		}
3822
3823		case BC_REGISTER_LOOPER:
3824			binder_debug(BINDER_DEBUG_THREADS,
3825				     "%d:%d BC_REGISTER_LOOPER\n",
3826				     proc->pid, thread->pid);
3827			binder_inner_proc_lock(proc);
3828			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3829				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3830				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3831					proc->pid, thread->pid);
3832			} else if (proc->requested_threads == 0) {
3833				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3834				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3835					proc->pid, thread->pid);
3836			} else {
3837				proc->requested_threads--;
3838				proc->requested_threads_started++;
3839			}
3840			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3841			binder_inner_proc_unlock(proc);
3842			break;
3843		case BC_ENTER_LOOPER:
3844			binder_debug(BINDER_DEBUG_THREADS,
3845				     "%d:%d BC_ENTER_LOOPER\n",
3846				     proc->pid, thread->pid);
3847			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3848				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3849				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3850					proc->pid, thread->pid);
3851			}
3852			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3853			break;
3854		case BC_EXIT_LOOPER:
3855			binder_debug(BINDER_DEBUG_THREADS,
3856				     "%d:%d BC_EXIT_LOOPER\n",
3857				     proc->pid, thread->pid);
3858			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3859			break;
3860
3861		case BC_REQUEST_DEATH_NOTIFICATION:
3862		case BC_CLEAR_DEATH_NOTIFICATION: {
3863			uint32_t target;
3864			binder_uintptr_t cookie;
3865			struct binder_ref *ref;
3866			struct binder_ref_death *death = NULL;
3867
3868			if (get_user(target, (uint32_t __user *)ptr))
3869				return -EFAULT;
3870			ptr += sizeof(uint32_t);
3871			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3872				return -EFAULT;
3873			ptr += sizeof(binder_uintptr_t);
3874			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3875				/*
3876				 * Allocate memory for death notification
3877				 * before taking lock
3878				 */
3879				death = kzalloc(sizeof(*death), GFP_KERNEL);
3880				if (death == NULL) {
3881					WARN_ON(thread->return_error.cmd !=
3882						BR_OK);
3883					thread->return_error.cmd = BR_ERROR;
3884					binder_enqueue_thread_work(
3885						thread,
3886						&thread->return_error.work);
3887					binder_debug(
3888						BINDER_DEBUG_FAILED_TRANSACTION,
3889						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3890						proc->pid, thread->pid);
3891					break;
3892				}
3893			}
3894			binder_proc_lock(proc);
3895			ref = binder_get_ref_olocked(proc, target, false);
3896			if (ref == NULL) {
3897				binder_user_error("%d:%d %s invalid ref %d\n",
3898					proc->pid, thread->pid,
3899					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3900					"BC_REQUEST_DEATH_NOTIFICATION" :
3901					"BC_CLEAR_DEATH_NOTIFICATION",
3902					target);
3903				binder_proc_unlock(proc);
3904				kfree(death);
3905				break;
3906			}
3907
3908			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3909				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3910				     proc->pid, thread->pid,
3911				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3912				     "BC_REQUEST_DEATH_NOTIFICATION" :
3913				     "BC_CLEAR_DEATH_NOTIFICATION",
3914				     (u64)cookie, ref->data.debug_id,
3915				     ref->data.desc, ref->data.strong,
3916				     ref->data.weak, ref->node->debug_id);
3917
3918			binder_node_lock(ref->node);
3919			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3920				if (ref->death) {
3921					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3922						proc->pid, thread->pid);
3923					binder_node_unlock(ref->node);
3924					binder_proc_unlock(proc);
3925					kfree(death);
3926					break;
3927				}
3928				binder_stats_created(BINDER_STAT_DEATH);
3929				INIT_LIST_HEAD(&death->work.entry);
3930				death->cookie = cookie;
3931				ref->death = death;
3932				if (ref->node->proc == NULL) {
3933					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3934
3935					binder_inner_proc_lock(proc);
3936					binder_enqueue_work_ilocked(
3937						&ref->death->work, &proc->todo);
3938					binder_wakeup_proc_ilocked(proc);
3939					binder_inner_proc_unlock(proc);
3940				}
3941			} else {
3942				if (ref->death == NULL) {
3943					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3944						proc->pid, thread->pid);
3945					binder_node_unlock(ref->node);
3946					binder_proc_unlock(proc);
3947					break;
3948				}
3949				death = ref->death;
3950				if (death->cookie != cookie) {
3951					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3952						proc->pid, thread->pid,
3953						(u64)death->cookie,
3954						(u64)cookie);
3955					binder_node_unlock(ref->node);
3956					binder_proc_unlock(proc);
3957					break;
3958				}
3959				ref->death = NULL;
3960				binder_inner_proc_lock(proc);
3961				if (list_empty(&death->work.entry)) {
3962					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3963					if (thread->looper &
3964					    (BINDER_LOOPER_STATE_REGISTERED |
3965					     BINDER_LOOPER_STATE_ENTERED))
3966						binder_enqueue_thread_work_ilocked(
3967								thread,
3968								&death->work);
3969					else {
3970						binder_enqueue_work_ilocked(
3971								&death->work,
3972								&proc->todo);
3973						binder_wakeup_proc_ilocked(
3974								proc);
3975					}
3976				} else {
3977					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3978					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3979				}
3980				binder_inner_proc_unlock(proc);
3981			}
3982			binder_node_unlock(ref->node);
3983			binder_proc_unlock(proc);
3984		} break;
3985		case BC_DEAD_BINDER_DONE: {
3986			struct binder_work *w;
3987			binder_uintptr_t cookie;
3988			struct binder_ref_death *death = NULL;
3989
3990			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3991				return -EFAULT;
3992
3993			ptr += sizeof(cookie);
3994			binder_inner_proc_lock(proc);
3995			list_for_each_entry(w, &proc->delivered_death,
3996					    entry) {
3997				struct binder_ref_death *tmp_death =
3998					container_of(w,
3999						     struct binder_ref_death,
4000						     work);
4001
4002				if (tmp_death->cookie == cookie) {
4003					death = tmp_death;
4004					break;
4005				}
4006			}
4007			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4008				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4009				     proc->pid, thread->pid, (u64)cookie,
4010				     death);
4011			if (death == NULL) {
4012				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4013					proc->pid, thread->pid, (u64)cookie);
4014				binder_inner_proc_unlock(proc);
4015				break;
4016			}
4017			binder_dequeue_work_ilocked(&death->work);
4018			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4019				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4020				if (thread->looper &
4021					(BINDER_LOOPER_STATE_REGISTERED |
4022					 BINDER_LOOPER_STATE_ENTERED))
4023					binder_enqueue_thread_work_ilocked(
4024						thread, &death->work);
4025				else {
4026					binder_enqueue_work_ilocked(
4027							&death->work,
4028							&proc->todo);
4029					binder_wakeup_proc_ilocked(proc);
4030				}
4031			}
4032			binder_inner_proc_unlock(proc);
4033		} break;
4034
4035		default:
4036			pr_err("%d:%d unknown command %d\n",
4037			       proc->pid, thread->pid, cmd);
4038			return -EINVAL;
4039		}
4040		*consumed = ptr - buffer;
4041	}
4042	return 0;
4043}
4044
4045static void binder_stat_br(struct binder_proc *proc,
4046			   struct binder_thread *thread, uint32_t cmd)
4047{
4048	trace_binder_return(cmd);
4049	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4050		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4051		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4052		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4053	}
4054}
4055
4056static int binder_put_node_cmd(struct binder_proc *proc,
4057			       struct binder_thread *thread,
4058			       void __user **ptrp,
4059			       binder_uintptr_t node_ptr,
4060			       binder_uintptr_t node_cookie,
4061			       int node_debug_id,
4062			       uint32_t cmd, const char *cmd_name)
4063{
4064	void __user *ptr = *ptrp;
4065
4066	if (put_user(cmd, (uint32_t __user *)ptr))
4067		return -EFAULT;
4068	ptr += sizeof(uint32_t);
4069
4070	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4071		return -EFAULT;
4072	ptr += sizeof(binder_uintptr_t);
4073
4074	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4075		return -EFAULT;
4076	ptr += sizeof(binder_uintptr_t);
4077
4078	binder_stat_br(proc, thread, cmd);
4079	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4080		     proc->pid, thread->pid, cmd_name, node_debug_id,
4081		     (u64)node_ptr, (u64)node_cookie);
4082
4083	*ptrp = ptr;
4084	return 0;
4085}
4086
4087static int binder_wait_for_work(struct binder_thread *thread,
4088				bool do_proc_work)
4089{
4090	DEFINE_WAIT(wait);
4091	struct binder_proc *proc = thread->proc;
4092	int ret = 0;
4093
4094	freezer_do_not_count();
4095	binder_inner_proc_lock(proc);
4096	for (;;) {
4097		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4098		if (binder_has_work_ilocked(thread, do_proc_work))
4099			break;
4100		if (do_proc_work)
4101			list_add(&thread->waiting_thread_node,
4102				 &proc->waiting_threads);
4103		binder_inner_proc_unlock(proc);
4104		schedule();
4105		binder_inner_proc_lock(proc);
4106		list_del_init(&thread->waiting_thread_node);
4107		if (signal_pending(current)) {
4108			ret = -ERESTARTSYS;
4109			break;
4110		}
4111	}
4112	finish_wait(&thread->wait, &wait);
4113	binder_inner_proc_unlock(proc);
4114	freezer_count();
4115
4116	return ret;
4117}
4118
4119/**
4120 * binder_apply_fd_fixups() - finish fd translation
4121 * @proc:         binder_proc associated @t->buffer
4122 * @t:	binder transaction with list of fd fixups
4123 *
4124 * Now that we are in the context of the transaction target
4125 * process, we can allocate and install fds. Process the
4126 * list of fds to translate and fixup the buffer with the
4127 * new fds.
4128 *
4129 * If we fail to allocate an fd, then free the resources by
4130 * fput'ing files that have not been processed and ksys_close'ing
4131 * any fds that have already been allocated.
4132 */
4133static int binder_apply_fd_fixups(struct binder_proc *proc,
4134				  struct binder_transaction *t)
4135{
4136	struct binder_txn_fd_fixup *fixup, *tmp;
4137	int ret = 0;
4138
4139	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4140		int fd = get_unused_fd_flags(O_CLOEXEC);
4141
4142		if (fd < 0) {
4143			binder_debug(BINDER_DEBUG_TRANSACTION,
4144				     "failed fd fixup txn %d fd %d\n",
4145				     t->debug_id, fd);
4146			ret = -ENOMEM;
4147			break;
4148		}
4149		binder_debug(BINDER_DEBUG_TRANSACTION,
4150			     "fd fixup txn %d fd %d\n",
4151			     t->debug_id, fd);
4152		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4153		fd_install(fd, fixup->file);
4154		fixup->file = NULL;
4155		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4156						fixup->offset, &fd,
4157						sizeof(u32))) {
4158			ret = -EINVAL;
4159			break;
4160		}
4161	}
4162	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4163		if (fixup->file) {
4164			fput(fixup->file);
4165		} else if (ret) {
4166			u32 fd;
4167			int err;
4168
4169			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4170							    t->buffer,
4171							    fixup->offset,
4172							    sizeof(fd));
4173			WARN_ON(err);
4174			if (!err)
4175				binder_deferred_fd_close(fd);
4176		}
4177		list_del(&fixup->fixup_entry);
4178		kfree(fixup);
4179	}
4180
4181	return ret;
4182}
4183
4184static int binder_thread_read(struct binder_proc *proc,
4185			      struct binder_thread *thread,
4186			      binder_uintptr_t binder_buffer, size_t size,
4187			      binder_size_t *consumed, int non_block)
4188{
4189	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4190	void __user *ptr = buffer + *consumed;
4191	void __user *end = buffer + size;
4192
4193	int ret = 0;
4194	int wait_for_proc_work;
4195
4196	if (*consumed == 0) {
4197		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4198			return -EFAULT;
4199		ptr += sizeof(uint32_t);
4200	}
4201
4202retry:
4203	binder_inner_proc_lock(proc);
4204	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4205	binder_inner_proc_unlock(proc);
4206
4207	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4208
4209	trace_binder_wait_for_work(wait_for_proc_work,
4210				   !!thread->transaction_stack,
4211				   !binder_worklist_empty(proc, &thread->todo));
4212	if (wait_for_proc_work) {
4213		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4214					BINDER_LOOPER_STATE_ENTERED))) {
4215			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4216				proc->pid, thread->pid, thread->looper);
4217			wait_event_interruptible(binder_user_error_wait,
4218						 binder_stop_on_user_error < 2);
4219		}
4220		binder_set_nice(proc->default_priority);
4221	}
4222
4223	if (non_block) {
4224		if (!binder_has_work(thread, wait_for_proc_work))
4225			ret = -EAGAIN;
4226	} else {
4227		ret = binder_wait_for_work(thread, wait_for_proc_work);
4228	}
4229
4230	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4231
4232	if (ret)
4233		return ret;
4234
4235	while (1) {
4236		uint32_t cmd;
4237		struct binder_transaction_data_secctx tr;
4238		struct binder_transaction_data *trd = &tr.transaction_data;
4239		struct binder_work *w = NULL;
4240		struct list_head *list = NULL;
4241		struct binder_transaction *t = NULL;
4242		struct binder_thread *t_from;
4243		size_t trsize = sizeof(*trd);
4244
4245		binder_inner_proc_lock(proc);
4246		if (!binder_worklist_empty_ilocked(&thread->todo))
4247			list = &thread->todo;
4248		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4249			   wait_for_proc_work)
4250			list = &proc->todo;
4251		else {
4252			binder_inner_proc_unlock(proc);
4253
4254			/* no data added */
4255			if (ptr - buffer == 4 && !thread->looper_need_return)
4256				goto retry;
4257			break;
4258		}
4259
4260		if (end - ptr < sizeof(tr) + 4) {
4261			binder_inner_proc_unlock(proc);
4262			break;
4263		}
4264		w = binder_dequeue_work_head_ilocked(list);
4265		if (binder_worklist_empty_ilocked(&thread->todo))
4266			thread->process_todo = false;
4267
4268		switch (w->type) {
4269		case BINDER_WORK_TRANSACTION: {
4270			binder_inner_proc_unlock(proc);
4271			t = container_of(w, struct binder_transaction, work);
4272		} break;
4273		case BINDER_WORK_RETURN_ERROR: {
4274			struct binder_error *e = container_of(
4275					w, struct binder_error, work);
4276
4277			WARN_ON(e->cmd == BR_OK);
4278			binder_inner_proc_unlock(proc);
4279			if (put_user(e->cmd, (uint32_t __user *)ptr))
4280				return -EFAULT;
4281			cmd = e->cmd;
4282			e->cmd = BR_OK;
4283			ptr += sizeof(uint32_t);
4284
4285			binder_stat_br(proc, thread, cmd);
4286		} break;
4287		case BINDER_WORK_TRANSACTION_COMPLETE: {
 
 
 
 
 
 
4288			binder_inner_proc_unlock(proc);
4289			cmd = BR_TRANSACTION_COMPLETE;
4290			kfree(w);
4291			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4292			if (put_user(cmd, (uint32_t __user *)ptr))
4293				return -EFAULT;
4294			ptr += sizeof(uint32_t);
4295
4296			binder_stat_br(proc, thread, cmd);
4297			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4298				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4299				     proc->pid, thread->pid);
4300		} break;
4301		case BINDER_WORK_NODE: {
4302			struct binder_node *node = container_of(w, struct binder_node, work);
4303			int strong, weak;
4304			binder_uintptr_t node_ptr = node->ptr;
4305			binder_uintptr_t node_cookie = node->cookie;
4306			int node_debug_id = node->debug_id;
4307			int has_weak_ref;
4308			int has_strong_ref;
4309			void __user *orig_ptr = ptr;
4310
4311			BUG_ON(proc != node->proc);
4312			strong = node->internal_strong_refs ||
4313					node->local_strong_refs;
4314			weak = !hlist_empty(&node->refs) ||
4315					node->local_weak_refs ||
4316					node->tmp_refs || strong;
4317			has_strong_ref = node->has_strong_ref;
4318			has_weak_ref = node->has_weak_ref;
4319
4320			if (weak && !has_weak_ref) {
4321				node->has_weak_ref = 1;
4322				node->pending_weak_ref = 1;
4323				node->local_weak_refs++;
4324			}
4325			if (strong && !has_strong_ref) {
4326				node->has_strong_ref = 1;
4327				node->pending_strong_ref = 1;
4328				node->local_strong_refs++;
4329			}
4330			if (!strong && has_strong_ref)
4331				node->has_strong_ref = 0;
4332			if (!weak && has_weak_ref)
4333				node->has_weak_ref = 0;
4334			if (!weak && !strong) {
4335				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4336					     "%d:%d node %d u%016llx c%016llx deleted\n",
4337					     proc->pid, thread->pid,
4338					     node_debug_id,
4339					     (u64)node_ptr,
4340					     (u64)node_cookie);
4341				rb_erase(&node->rb_node, &proc->nodes);
4342				binder_inner_proc_unlock(proc);
4343				binder_node_lock(node);
4344				/*
4345				 * Acquire the node lock before freeing the
4346				 * node to serialize with other threads that
4347				 * may have been holding the node lock while
4348				 * decrementing this node (avoids race where
4349				 * this thread frees while the other thread
4350				 * is unlocking the node after the final
4351				 * decrement)
4352				 */
4353				binder_node_unlock(node);
4354				binder_free_node(node);
4355			} else
4356				binder_inner_proc_unlock(proc);
4357
4358			if (weak && !has_weak_ref)
4359				ret = binder_put_node_cmd(
4360						proc, thread, &ptr, node_ptr,
4361						node_cookie, node_debug_id,
4362						BR_INCREFS, "BR_INCREFS");
4363			if (!ret && strong && !has_strong_ref)
4364				ret = binder_put_node_cmd(
4365						proc, thread, &ptr, node_ptr,
4366						node_cookie, node_debug_id,
4367						BR_ACQUIRE, "BR_ACQUIRE");
4368			if (!ret && !strong && has_strong_ref)
4369				ret = binder_put_node_cmd(
4370						proc, thread, &ptr, node_ptr,
4371						node_cookie, node_debug_id,
4372						BR_RELEASE, "BR_RELEASE");
4373			if (!ret && !weak && has_weak_ref)
4374				ret = binder_put_node_cmd(
4375						proc, thread, &ptr, node_ptr,
4376						node_cookie, node_debug_id,
4377						BR_DECREFS, "BR_DECREFS");
4378			if (orig_ptr == ptr)
4379				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4380					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4381					     proc->pid, thread->pid,
4382					     node_debug_id,
4383					     (u64)node_ptr,
4384					     (u64)node_cookie);
4385			if (ret)
4386				return ret;
4387		} break;
4388		case BINDER_WORK_DEAD_BINDER:
4389		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4390		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4391			struct binder_ref_death *death;
4392			uint32_t cmd;
4393			binder_uintptr_t cookie;
4394
4395			death = container_of(w, struct binder_ref_death, work);
4396			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4397				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4398			else
4399				cmd = BR_DEAD_BINDER;
4400			cookie = death->cookie;
4401
4402			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4403				     "%d:%d %s %016llx\n",
4404				      proc->pid, thread->pid,
4405				      cmd == BR_DEAD_BINDER ?
4406				      "BR_DEAD_BINDER" :
4407				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4408				      (u64)cookie);
4409			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4410				binder_inner_proc_unlock(proc);
4411				kfree(death);
4412				binder_stats_deleted(BINDER_STAT_DEATH);
4413			} else {
4414				binder_enqueue_work_ilocked(
4415						w, &proc->delivered_death);
4416				binder_inner_proc_unlock(proc);
4417			}
4418			if (put_user(cmd, (uint32_t __user *)ptr))
4419				return -EFAULT;
4420			ptr += sizeof(uint32_t);
4421			if (put_user(cookie,
4422				     (binder_uintptr_t __user *)ptr))
4423				return -EFAULT;
4424			ptr += sizeof(binder_uintptr_t);
4425			binder_stat_br(proc, thread, cmd);
4426			if (cmd == BR_DEAD_BINDER)
4427				goto done; /* DEAD_BINDER notifications can cause transactions */
4428		} break;
4429		default:
4430			binder_inner_proc_unlock(proc);
4431			pr_err("%d:%d: bad work type %d\n",
4432			       proc->pid, thread->pid, w->type);
4433			break;
4434		}
4435
4436		if (!t)
4437			continue;
4438
4439		BUG_ON(t->buffer == NULL);
4440		if (t->buffer->target_node) {
4441			struct binder_node *target_node = t->buffer->target_node;
4442
4443			trd->target.ptr = target_node->ptr;
4444			trd->cookie =  target_node->cookie;
4445			t->saved_priority = task_nice(current);
4446			if (t->priority < target_node->min_priority &&
4447			    !(t->flags & TF_ONE_WAY))
4448				binder_set_nice(t->priority);
4449			else if (!(t->flags & TF_ONE_WAY) ||
4450				 t->saved_priority > target_node->min_priority)
4451				binder_set_nice(target_node->min_priority);
4452			cmd = BR_TRANSACTION;
4453		} else {
4454			trd->target.ptr = 0;
4455			trd->cookie = 0;
4456			cmd = BR_REPLY;
4457		}
4458		trd->code = t->code;
4459		trd->flags = t->flags;
4460		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4461
4462		t_from = binder_get_txn_from(t);
4463		if (t_from) {
4464			struct task_struct *sender = t_from->proc->tsk;
4465
4466			trd->sender_pid =
4467				task_tgid_nr_ns(sender,
4468						task_active_pid_ns(current));
4469		} else {
4470			trd->sender_pid = 0;
4471		}
4472
4473		ret = binder_apply_fd_fixups(proc, t);
4474		if (ret) {
4475			struct binder_buffer *buffer = t->buffer;
4476			bool oneway = !!(t->flags & TF_ONE_WAY);
4477			int tid = t->debug_id;
4478
4479			if (t_from)
4480				binder_thread_dec_tmpref(t_from);
4481			buffer->transaction = NULL;
4482			binder_cleanup_transaction(t, "fd fixups failed",
4483						   BR_FAILED_REPLY);
4484			binder_free_buf(proc, buffer);
4485			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4486				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4487				     proc->pid, thread->pid,
4488				     oneway ? "async " :
4489					(cmd == BR_REPLY ? "reply " : ""),
4490				     tid, BR_FAILED_REPLY, ret, __LINE__);
4491			if (cmd == BR_REPLY) {
4492				cmd = BR_FAILED_REPLY;
4493				if (put_user(cmd, (uint32_t __user *)ptr))
4494					return -EFAULT;
4495				ptr += sizeof(uint32_t);
4496				binder_stat_br(proc, thread, cmd);
4497				break;
4498			}
4499			continue;
4500		}
4501		trd->data_size = t->buffer->data_size;
4502		trd->offsets_size = t->buffer->offsets_size;
4503		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4504		trd->data.ptr.offsets = trd->data.ptr.buffer +
4505					ALIGN(t->buffer->data_size,
4506					    sizeof(void *));
4507
4508		tr.secctx = t->security_ctx;
4509		if (t->security_ctx) {
4510			cmd = BR_TRANSACTION_SEC_CTX;
4511			trsize = sizeof(tr);
4512		}
4513		if (put_user(cmd, (uint32_t __user *)ptr)) {
4514			if (t_from)
4515				binder_thread_dec_tmpref(t_from);
4516
4517			binder_cleanup_transaction(t, "put_user failed",
4518						   BR_FAILED_REPLY);
4519
4520			return -EFAULT;
4521		}
4522		ptr += sizeof(uint32_t);
4523		if (copy_to_user(ptr, &tr, trsize)) {
4524			if (t_from)
4525				binder_thread_dec_tmpref(t_from);
4526
4527			binder_cleanup_transaction(t, "copy_to_user failed",
4528						   BR_FAILED_REPLY);
4529
4530			return -EFAULT;
4531		}
4532		ptr += trsize;
4533
4534		trace_binder_transaction_received(t);
4535		binder_stat_br(proc, thread, cmd);
4536		binder_debug(BINDER_DEBUG_TRANSACTION,
4537			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4538			     proc->pid, thread->pid,
4539			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4540				(cmd == BR_TRANSACTION_SEC_CTX) ?
4541				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4542			     t->debug_id, t_from ? t_from->proc->pid : 0,
4543			     t_from ? t_from->pid : 0, cmd,
4544			     t->buffer->data_size, t->buffer->offsets_size,
4545			     (u64)trd->data.ptr.buffer,
4546			     (u64)trd->data.ptr.offsets);
4547
4548		if (t_from)
4549			binder_thread_dec_tmpref(t_from);
4550		t->buffer->allow_user_free = 1;
4551		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4552			binder_inner_proc_lock(thread->proc);
4553			t->to_parent = thread->transaction_stack;
4554			t->to_thread = thread;
4555			thread->transaction_stack = t;
4556			binder_inner_proc_unlock(thread->proc);
4557		} else {
4558			binder_free_transaction(t);
4559		}
4560		break;
4561	}
4562
4563done:
4564
4565	*consumed = ptr - buffer;
4566	binder_inner_proc_lock(proc);
4567	if (proc->requested_threads == 0 &&
4568	    list_empty(&thread->proc->waiting_threads) &&
4569	    proc->requested_threads_started < proc->max_threads &&
4570	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4571	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4572	     /*spawn a new thread if we leave this out */) {
4573		proc->requested_threads++;
4574		binder_inner_proc_unlock(proc);
4575		binder_debug(BINDER_DEBUG_THREADS,
4576			     "%d:%d BR_SPAWN_LOOPER\n",
4577			     proc->pid, thread->pid);
4578		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4579			return -EFAULT;
4580		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4581	} else
4582		binder_inner_proc_unlock(proc);
4583	return 0;
4584}
4585
4586static void binder_release_work(struct binder_proc *proc,
4587				struct list_head *list)
4588{
4589	struct binder_work *w;
 
4590
4591	while (1) {
4592		w = binder_dequeue_work_head(proc, list);
 
 
 
4593		if (!w)
4594			return;
4595
4596		switch (w->type) {
4597		case BINDER_WORK_TRANSACTION: {
4598			struct binder_transaction *t;
4599
4600			t = container_of(w, struct binder_transaction, work);
4601
4602			binder_cleanup_transaction(t, "process died.",
4603						   BR_DEAD_REPLY);
4604		} break;
4605		case BINDER_WORK_RETURN_ERROR: {
4606			struct binder_error *e = container_of(
4607					w, struct binder_error, work);
4608
4609			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4610				"undelivered TRANSACTION_ERROR: %u\n",
4611				e->cmd);
4612		} break;
4613		case BINDER_WORK_TRANSACTION_COMPLETE: {
4614			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4615				"undelivered TRANSACTION_COMPLETE\n");
4616			kfree(w);
4617			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4618		} break;
4619		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4620		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4621			struct binder_ref_death *death;
4622
4623			death = container_of(w, struct binder_ref_death, work);
4624			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4625				"undelivered death notification, %016llx\n",
4626				(u64)death->cookie);
4627			kfree(death);
4628			binder_stats_deleted(BINDER_STAT_DEATH);
4629		} break;
 
 
4630		default:
4631			pr_err("unexpected work type, %d, not freed\n",
4632			       w->type);
4633			break;
4634		}
4635	}
4636
4637}
4638
4639static struct binder_thread *binder_get_thread_ilocked(
4640		struct binder_proc *proc, struct binder_thread *new_thread)
4641{
4642	struct binder_thread *thread = NULL;
4643	struct rb_node *parent = NULL;
4644	struct rb_node **p = &proc->threads.rb_node;
4645
4646	while (*p) {
4647		parent = *p;
4648		thread = rb_entry(parent, struct binder_thread, rb_node);
4649
4650		if (current->pid < thread->pid)
4651			p = &(*p)->rb_left;
4652		else if (current->pid > thread->pid)
4653			p = &(*p)->rb_right;
4654		else
4655			return thread;
4656	}
4657	if (!new_thread)
4658		return NULL;
4659	thread = new_thread;
4660	binder_stats_created(BINDER_STAT_THREAD);
4661	thread->proc = proc;
4662	thread->pid = current->pid;
4663	atomic_set(&thread->tmp_ref, 0);
4664	init_waitqueue_head(&thread->wait);
4665	INIT_LIST_HEAD(&thread->todo);
4666	rb_link_node(&thread->rb_node, parent, p);
4667	rb_insert_color(&thread->rb_node, &proc->threads);
4668	thread->looper_need_return = true;
4669	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4670	thread->return_error.cmd = BR_OK;
4671	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4672	thread->reply_error.cmd = BR_OK;
4673	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4674	return thread;
4675}
4676
4677static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4678{
4679	struct binder_thread *thread;
4680	struct binder_thread *new_thread;
4681
4682	binder_inner_proc_lock(proc);
4683	thread = binder_get_thread_ilocked(proc, NULL);
4684	binder_inner_proc_unlock(proc);
4685	if (!thread) {
4686		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4687		if (new_thread == NULL)
4688			return NULL;
4689		binder_inner_proc_lock(proc);
4690		thread = binder_get_thread_ilocked(proc, new_thread);
4691		binder_inner_proc_unlock(proc);
4692		if (thread != new_thread)
4693			kfree(new_thread);
4694	}
4695	return thread;
4696}
4697
4698static void binder_free_proc(struct binder_proc *proc)
4699{
4700	struct binder_device *device;
4701
4702	BUG_ON(!list_empty(&proc->todo));
4703	BUG_ON(!list_empty(&proc->delivered_death));
 
 
 
4704	device = container_of(proc->context, struct binder_device, context);
4705	if (refcount_dec_and_test(&device->ref)) {
4706		kfree(proc->context->name);
4707		kfree(device);
4708	}
4709	binder_alloc_deferred_release(&proc->alloc);
4710	put_task_struct(proc->tsk);
4711	binder_stats_deleted(BINDER_STAT_PROC);
4712	kfree(proc);
4713}
4714
4715static void binder_free_thread(struct binder_thread *thread)
4716{
4717	BUG_ON(!list_empty(&thread->todo));
4718	binder_stats_deleted(BINDER_STAT_THREAD);
4719	binder_proc_dec_tmpref(thread->proc);
4720	kfree(thread);
4721}
4722
4723static int binder_thread_release(struct binder_proc *proc,
4724				 struct binder_thread *thread)
4725{
4726	struct binder_transaction *t;
4727	struct binder_transaction *send_reply = NULL;
4728	int active_transactions = 0;
4729	struct binder_transaction *last_t = NULL;
4730
4731	binder_inner_proc_lock(thread->proc);
4732	/*
4733	 * take a ref on the proc so it survives
4734	 * after we remove this thread from proc->threads.
4735	 * The corresponding dec is when we actually
4736	 * free the thread in binder_free_thread()
4737	 */
4738	proc->tmp_ref++;
4739	/*
4740	 * take a ref on this thread to ensure it
4741	 * survives while we are releasing it
4742	 */
4743	atomic_inc(&thread->tmp_ref);
4744	rb_erase(&thread->rb_node, &proc->threads);
4745	t = thread->transaction_stack;
4746	if (t) {
4747		spin_lock(&t->lock);
4748		if (t->to_thread == thread)
4749			send_reply = t;
4750	} else {
4751		__acquire(&t->lock);
4752	}
4753	thread->is_dead = true;
4754
4755	while (t) {
4756		last_t = t;
4757		active_transactions++;
4758		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4759			     "release %d:%d transaction %d %s, still active\n",
4760			      proc->pid, thread->pid,
4761			     t->debug_id,
4762			     (t->to_thread == thread) ? "in" : "out");
4763
4764		if (t->to_thread == thread) {
 
4765			t->to_proc = NULL;
4766			t->to_thread = NULL;
4767			if (t->buffer) {
4768				t->buffer->transaction = NULL;
4769				t->buffer = NULL;
4770			}
4771			t = t->to_parent;
4772		} else if (t->from == thread) {
4773			t->from = NULL;
4774			t = t->from_parent;
4775		} else
4776			BUG();
4777		spin_unlock(&last_t->lock);
4778		if (t)
4779			spin_lock(&t->lock);
4780		else
4781			__acquire(&t->lock);
4782	}
4783	/* annotation for sparse, lock not acquired in last iteration above */
4784	__release(&t->lock);
4785
4786	/*
4787	 * If this thread used poll, make sure we remove the waitqueue
4788	 * from any epoll data structures holding it with POLLFREE.
4789	 * waitqueue_active() is safe to use here because we're holding
4790	 * the inner lock.
4791	 */
4792	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4793	    waitqueue_active(&thread->wait)) {
4794		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4795	}
4796
4797	binder_inner_proc_unlock(thread->proc);
4798
4799	/*
4800	 * This is needed to avoid races between wake_up_poll() above and
4801	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4802	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4803	 * lock, so we can be sure it's done after calling synchronize_rcu().
4804	 */
4805	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4806		synchronize_rcu();
4807
4808	if (send_reply)
4809		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4810	binder_release_work(proc, &thread->todo);
4811	binder_thread_dec_tmpref(thread);
4812	return active_transactions;
4813}
4814
4815static __poll_t binder_poll(struct file *filp,
4816				struct poll_table_struct *wait)
4817{
4818	struct binder_proc *proc = filp->private_data;
4819	struct binder_thread *thread = NULL;
4820	bool wait_for_proc_work;
4821
4822	thread = binder_get_thread(proc);
4823	if (!thread)
4824		return POLLERR;
4825
4826	binder_inner_proc_lock(thread->proc);
4827	thread->looper |= BINDER_LOOPER_STATE_POLL;
4828	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4829
4830	binder_inner_proc_unlock(thread->proc);
4831
4832	poll_wait(filp, &thread->wait, wait);
4833
4834	if (binder_has_work(thread, wait_for_proc_work))
4835		return EPOLLIN;
4836
4837	return 0;
4838}
4839
4840static int binder_ioctl_write_read(struct file *filp,
4841				unsigned int cmd, unsigned long arg,
4842				struct binder_thread *thread)
4843{
4844	int ret = 0;
4845	struct binder_proc *proc = filp->private_data;
4846	unsigned int size = _IOC_SIZE(cmd);
4847	void __user *ubuf = (void __user *)arg;
4848	struct binder_write_read bwr;
4849
4850	if (size != sizeof(struct binder_write_read)) {
4851		ret = -EINVAL;
4852		goto out;
4853	}
4854	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4855		ret = -EFAULT;
4856		goto out;
4857	}
4858	binder_debug(BINDER_DEBUG_READ_WRITE,
4859		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4860		     proc->pid, thread->pid,
4861		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4862		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4863
4864	if (bwr.write_size > 0) {
4865		ret = binder_thread_write(proc, thread,
4866					  bwr.write_buffer,
4867					  bwr.write_size,
4868					  &bwr.write_consumed);
4869		trace_binder_write_done(ret);
4870		if (ret < 0) {
4871			bwr.read_consumed = 0;
4872			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4873				ret = -EFAULT;
4874			goto out;
4875		}
4876	}
4877	if (bwr.read_size > 0) {
4878		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4879					 bwr.read_size,
4880					 &bwr.read_consumed,
4881					 filp->f_flags & O_NONBLOCK);
4882		trace_binder_read_done(ret);
4883		binder_inner_proc_lock(proc);
4884		if (!binder_worklist_empty_ilocked(&proc->todo))
4885			binder_wakeup_proc_ilocked(proc);
4886		binder_inner_proc_unlock(proc);
4887		if (ret < 0) {
4888			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4889				ret = -EFAULT;
4890			goto out;
4891		}
4892	}
4893	binder_debug(BINDER_DEBUG_READ_WRITE,
4894		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4895		     proc->pid, thread->pid,
4896		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4897		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4898	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4899		ret = -EFAULT;
4900		goto out;
4901	}
4902out:
4903	return ret;
4904}
4905
4906static int binder_ioctl_set_ctx_mgr(struct file *filp,
4907				    struct flat_binder_object *fbo)
4908{
4909	int ret = 0;
4910	struct binder_proc *proc = filp->private_data;
4911	struct binder_context *context = proc->context;
4912	struct binder_node *new_node;
4913	kuid_t curr_euid = current_euid();
4914
4915	mutex_lock(&context->context_mgr_node_lock);
4916	if (context->binder_context_mgr_node) {
4917		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4918		ret = -EBUSY;
4919		goto out;
4920	}
4921	ret = security_binder_set_context_mgr(proc->tsk);
4922	if (ret < 0)
4923		goto out;
4924	if (uid_valid(context->binder_context_mgr_uid)) {
4925		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4926			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4927			       from_kuid(&init_user_ns, curr_euid),
4928			       from_kuid(&init_user_ns,
4929					 context->binder_context_mgr_uid));
4930			ret = -EPERM;
4931			goto out;
4932		}
4933	} else {
4934		context->binder_context_mgr_uid = curr_euid;
4935	}
4936	new_node = binder_new_node(proc, fbo);
4937	if (!new_node) {
4938		ret = -ENOMEM;
4939		goto out;
4940	}
4941	binder_node_lock(new_node);
4942	new_node->local_weak_refs++;
4943	new_node->local_strong_refs++;
4944	new_node->has_strong_ref = 1;
4945	new_node->has_weak_ref = 1;
4946	context->binder_context_mgr_node = new_node;
4947	binder_node_unlock(new_node);
4948	binder_put_node(new_node);
4949out:
4950	mutex_unlock(&context->context_mgr_node_lock);
4951	return ret;
4952}
4953
4954static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4955		struct binder_node_info_for_ref *info)
4956{
4957	struct binder_node *node;
4958	struct binder_context *context = proc->context;
4959	__u32 handle = info->handle;
4960
4961	if (info->strong_count || info->weak_count || info->reserved1 ||
4962	    info->reserved2 || info->reserved3) {
4963		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4964				  proc->pid);
4965		return -EINVAL;
4966	}
4967
4968	/* This ioctl may only be used by the context manager */
4969	mutex_lock(&context->context_mgr_node_lock);
4970	if (!context->binder_context_mgr_node ||
4971		context->binder_context_mgr_node->proc != proc) {
4972		mutex_unlock(&context->context_mgr_node_lock);
4973		return -EPERM;
4974	}
4975	mutex_unlock(&context->context_mgr_node_lock);
4976
4977	node = binder_get_node_from_ref(proc, handle, true, NULL);
4978	if (!node)
4979		return -EINVAL;
4980
4981	info->strong_count = node->local_strong_refs +
4982		node->internal_strong_refs;
4983	info->weak_count = node->local_weak_refs;
4984
4985	binder_put_node(node);
4986
4987	return 0;
4988}
4989
4990static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4991				struct binder_node_debug_info *info)
4992{
4993	struct rb_node *n;
4994	binder_uintptr_t ptr = info->ptr;
4995
4996	memset(info, 0, sizeof(*info));
4997
4998	binder_inner_proc_lock(proc);
4999	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5000		struct binder_node *node = rb_entry(n, struct binder_node,
5001						    rb_node);
5002		if (node->ptr > ptr) {
5003			info->ptr = node->ptr;
5004			info->cookie = node->cookie;
5005			info->has_strong_ref = node->has_strong_ref;
5006			info->has_weak_ref = node->has_weak_ref;
5007			break;
5008		}
5009	}
5010	binder_inner_proc_unlock(proc);
5011
5012	return 0;
5013}
5014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5015static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5016{
5017	int ret;
5018	struct binder_proc *proc = filp->private_data;
5019	struct binder_thread *thread;
5020	unsigned int size = _IOC_SIZE(cmd);
5021	void __user *ubuf = (void __user *)arg;
5022
5023	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5024			proc->pid, current->pid, cmd, arg);*/
5025
5026	binder_selftest_alloc(&proc->alloc);
5027
5028	trace_binder_ioctl(cmd, arg);
5029
5030	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5031	if (ret)
5032		goto err_unlocked;
5033
5034	thread = binder_get_thread(proc);
5035	if (thread == NULL) {
5036		ret = -ENOMEM;
5037		goto err;
5038	}
5039
5040	switch (cmd) {
5041	case BINDER_WRITE_READ:
5042		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5043		if (ret)
5044			goto err;
5045		break;
5046	case BINDER_SET_MAX_THREADS: {
5047		int max_threads;
5048
5049		if (copy_from_user(&max_threads, ubuf,
5050				   sizeof(max_threads))) {
5051			ret = -EINVAL;
5052			goto err;
5053		}
5054		binder_inner_proc_lock(proc);
5055		proc->max_threads = max_threads;
5056		binder_inner_proc_unlock(proc);
5057		break;
5058	}
5059	case BINDER_SET_CONTEXT_MGR_EXT: {
5060		struct flat_binder_object fbo;
5061
5062		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5063			ret = -EINVAL;
5064			goto err;
5065		}
5066		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5067		if (ret)
5068			goto err;
5069		break;
5070	}
5071	case BINDER_SET_CONTEXT_MGR:
5072		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5073		if (ret)
5074			goto err;
5075		break;
5076	case BINDER_THREAD_EXIT:
5077		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5078			     proc->pid, thread->pid);
5079		binder_thread_release(proc, thread);
5080		thread = NULL;
5081		break;
5082	case BINDER_VERSION: {
5083		struct binder_version __user *ver = ubuf;
5084
5085		if (size != sizeof(struct binder_version)) {
5086			ret = -EINVAL;
5087			goto err;
5088		}
5089		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5090			     &ver->protocol_version)) {
5091			ret = -EINVAL;
5092			goto err;
5093		}
5094		break;
5095	}
5096	case BINDER_GET_NODE_INFO_FOR_REF: {
5097		struct binder_node_info_for_ref info;
5098
5099		if (copy_from_user(&info, ubuf, sizeof(info))) {
5100			ret = -EFAULT;
5101			goto err;
5102		}
5103
5104		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5105		if (ret < 0)
5106			goto err;
5107
5108		if (copy_to_user(ubuf, &info, sizeof(info))) {
5109			ret = -EFAULT;
5110			goto err;
5111		}
5112
5113		break;
5114	}
5115	case BINDER_GET_NODE_DEBUG_INFO: {
5116		struct binder_node_debug_info info;
5117
5118		if (copy_from_user(&info, ubuf, sizeof(info))) {
5119			ret = -EFAULT;
5120			goto err;
5121		}
5122
5123		ret = binder_ioctl_get_node_debug_info(proc, &info);
5124		if (ret < 0)
5125			goto err;
5126
5127		if (copy_to_user(ubuf, &info, sizeof(info))) {
5128			ret = -EFAULT;
5129			goto err;
5130		}
5131		break;
5132	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5133	default:
5134		ret = -EINVAL;
5135		goto err;
5136	}
5137	ret = 0;
5138err:
5139	if (thread)
5140		thread->looper_need_return = false;
5141	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5142	if (ret && ret != -ERESTARTSYS)
5143		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5144err_unlocked:
5145	trace_binder_ioctl_done(ret);
5146	return ret;
5147}
5148
5149static void binder_vma_open(struct vm_area_struct *vma)
5150{
5151	struct binder_proc *proc = vma->vm_private_data;
5152
5153	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5154		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5155		     proc->pid, vma->vm_start, vma->vm_end,
5156		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5157		     (unsigned long)pgprot_val(vma->vm_page_prot));
5158}
5159
5160static void binder_vma_close(struct vm_area_struct *vma)
5161{
5162	struct binder_proc *proc = vma->vm_private_data;
5163
5164	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5165		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5166		     proc->pid, vma->vm_start, vma->vm_end,
5167		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5168		     (unsigned long)pgprot_val(vma->vm_page_prot));
5169	binder_alloc_vma_close(&proc->alloc);
5170}
5171
5172static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5173{
5174	return VM_FAULT_SIGBUS;
5175}
5176
5177static const struct vm_operations_struct binder_vm_ops = {
5178	.open = binder_vma_open,
5179	.close = binder_vma_close,
5180	.fault = binder_vm_fault,
5181};
5182
5183static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5184{
5185	int ret;
5186	struct binder_proc *proc = filp->private_data;
5187	const char *failure_string;
5188
5189	if (proc->tsk != current->group_leader)
5190		return -EINVAL;
5191
5192	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5193		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5194		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5195		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5196		     (unsigned long)pgprot_val(vma->vm_page_prot));
5197
5198	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5199		ret = -EPERM;
5200		failure_string = "bad vm_flags";
5201		goto err_bad_arg;
5202	}
5203	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5204	vma->vm_flags &= ~VM_MAYWRITE;
5205
5206	vma->vm_ops = &binder_vm_ops;
5207	vma->vm_private_data = proc;
5208
5209	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5210	if (ret)
5211		return ret;
5212	return 0;
5213
5214err_bad_arg:
5215	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5216	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5217	return ret;
5218}
5219
5220static int binder_open(struct inode *nodp, struct file *filp)
5221{
5222	struct binder_proc *proc, *itr;
5223	struct binder_device *binder_dev;
5224	struct binderfs_info *info;
5225	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5226	bool existing_pid = false;
5227
5228	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5229		     current->group_leader->pid, current->pid);
5230
5231	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5232	if (proc == NULL)
5233		return -ENOMEM;
5234	spin_lock_init(&proc->inner_lock);
5235	spin_lock_init(&proc->outer_lock);
5236	get_task_struct(current->group_leader);
5237	proc->tsk = current->group_leader;
5238	INIT_LIST_HEAD(&proc->todo);
 
5239	proc->default_priority = task_nice(current);
5240	/* binderfs stashes devices in i_private */
5241	if (is_binderfs_device(nodp)) {
5242		binder_dev = nodp->i_private;
5243		info = nodp->i_sb->s_fs_info;
5244		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5245	} else {
5246		binder_dev = container_of(filp->private_data,
5247					  struct binder_device, miscdev);
5248	}
5249	refcount_inc(&binder_dev->ref);
5250	proc->context = &binder_dev->context;
5251	binder_alloc_init(&proc->alloc);
5252
5253	binder_stats_created(BINDER_STAT_PROC);
5254	proc->pid = current->group_leader->pid;
5255	INIT_LIST_HEAD(&proc->delivered_death);
5256	INIT_LIST_HEAD(&proc->waiting_threads);
5257	filp->private_data = proc;
5258
5259	mutex_lock(&binder_procs_lock);
5260	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5261		if (itr->pid == proc->pid) {
5262			existing_pid = true;
5263			break;
5264		}
5265	}
5266	hlist_add_head(&proc->proc_node, &binder_procs);
5267	mutex_unlock(&binder_procs_lock);
5268
5269	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5270		char strbuf[11];
5271
5272		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5273		/*
5274		 * proc debug entries are shared between contexts.
5275		 * Only create for the first PID to avoid debugfs log spamming
5276		 * The printing code will anyway print all contexts for a given
5277		 * PID so this is not a problem.
5278		 */
5279		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5280			binder_debugfs_dir_entry_proc,
5281			(void *)(unsigned long)proc->pid,
5282			&proc_fops);
5283	}
5284
5285	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5286		char strbuf[11];
5287		struct dentry *binderfs_entry;
5288
5289		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5290		/*
5291		 * Similar to debugfs, the process specific log file is shared
5292		 * between contexts. Only create for the first PID.
5293		 * This is ok since same as debugfs, the log file will contain
5294		 * information on all contexts of a given PID.
5295		 */
5296		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5297			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5298		if (!IS_ERR(binderfs_entry)) {
5299			proc->binderfs_entry = binderfs_entry;
5300		} else {
5301			int error;
5302
5303			error = PTR_ERR(binderfs_entry);
5304			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5305				strbuf, error);
5306		}
5307	}
5308
5309	return 0;
5310}
5311
5312static int binder_flush(struct file *filp, fl_owner_t id)
5313{
5314	struct binder_proc *proc = filp->private_data;
5315
5316	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5317
5318	return 0;
5319}
5320
5321static void binder_deferred_flush(struct binder_proc *proc)
5322{
5323	struct rb_node *n;
5324	int wake_count = 0;
5325
5326	binder_inner_proc_lock(proc);
5327	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5328		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5329
5330		thread->looper_need_return = true;
5331		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5332			wake_up_interruptible(&thread->wait);
5333			wake_count++;
5334		}
5335	}
5336	binder_inner_proc_unlock(proc);
5337
5338	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5339		     "binder_flush: %d woke %d threads\n", proc->pid,
5340		     wake_count);
5341}
5342
5343static int binder_release(struct inode *nodp, struct file *filp)
5344{
5345	struct binder_proc *proc = filp->private_data;
5346
5347	debugfs_remove(proc->debugfs_entry);
5348
5349	if (proc->binderfs_entry) {
5350		binderfs_remove_file(proc->binderfs_entry);
5351		proc->binderfs_entry = NULL;
5352	}
5353
5354	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5355
5356	return 0;
5357}
5358
5359static int binder_node_release(struct binder_node *node, int refs)
5360{
5361	struct binder_ref *ref;
5362	int death = 0;
5363	struct binder_proc *proc = node->proc;
5364
5365	binder_release_work(proc, &node->async_todo);
5366
5367	binder_node_lock(node);
5368	binder_inner_proc_lock(proc);
5369	binder_dequeue_work_ilocked(&node->work);
5370	/*
5371	 * The caller must have taken a temporary ref on the node,
5372	 */
5373	BUG_ON(!node->tmp_refs);
5374	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5375		binder_inner_proc_unlock(proc);
5376		binder_node_unlock(node);
5377		binder_free_node(node);
5378
5379		return refs;
5380	}
5381
5382	node->proc = NULL;
5383	node->local_strong_refs = 0;
5384	node->local_weak_refs = 0;
5385	binder_inner_proc_unlock(proc);
5386
5387	spin_lock(&binder_dead_nodes_lock);
5388	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5389	spin_unlock(&binder_dead_nodes_lock);
5390
5391	hlist_for_each_entry(ref, &node->refs, node_entry) {
5392		refs++;
5393		/*
5394		 * Need the node lock to synchronize
5395		 * with new notification requests and the
5396		 * inner lock to synchronize with queued
5397		 * death notifications.
5398		 */
5399		binder_inner_proc_lock(ref->proc);
5400		if (!ref->death) {
5401			binder_inner_proc_unlock(ref->proc);
5402			continue;
5403		}
5404
5405		death++;
5406
5407		BUG_ON(!list_empty(&ref->death->work.entry));
5408		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5409		binder_enqueue_work_ilocked(&ref->death->work,
5410					    &ref->proc->todo);
5411		binder_wakeup_proc_ilocked(ref->proc);
5412		binder_inner_proc_unlock(ref->proc);
5413	}
5414
5415	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5416		     "node %d now dead, refs %d, death %d\n",
5417		     node->debug_id, refs, death);
5418	binder_node_unlock(node);
5419	binder_put_node(node);
5420
5421	return refs;
5422}
5423
5424static void binder_deferred_release(struct binder_proc *proc)
5425{
5426	struct binder_context *context = proc->context;
5427	struct rb_node *n;
5428	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5429
5430	mutex_lock(&binder_procs_lock);
5431	hlist_del(&proc->proc_node);
5432	mutex_unlock(&binder_procs_lock);
5433
5434	mutex_lock(&context->context_mgr_node_lock);
5435	if (context->binder_context_mgr_node &&
5436	    context->binder_context_mgr_node->proc == proc) {
5437		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5438			     "%s: %d context_mgr_node gone\n",
5439			     __func__, proc->pid);
5440		context->binder_context_mgr_node = NULL;
5441	}
5442	mutex_unlock(&context->context_mgr_node_lock);
5443	binder_inner_proc_lock(proc);
5444	/*
5445	 * Make sure proc stays alive after we
5446	 * remove all the threads
5447	 */
5448	proc->tmp_ref++;
5449
5450	proc->is_dead = true;
 
 
 
5451	threads = 0;
5452	active_transactions = 0;
5453	while ((n = rb_first(&proc->threads))) {
5454		struct binder_thread *thread;
5455
5456		thread = rb_entry(n, struct binder_thread, rb_node);
5457		binder_inner_proc_unlock(proc);
5458		threads++;
5459		active_transactions += binder_thread_release(proc, thread);
5460		binder_inner_proc_lock(proc);
5461	}
5462
5463	nodes = 0;
5464	incoming_refs = 0;
5465	while ((n = rb_first(&proc->nodes))) {
5466		struct binder_node *node;
5467
5468		node = rb_entry(n, struct binder_node, rb_node);
5469		nodes++;
5470		/*
5471		 * take a temporary ref on the node before
5472		 * calling binder_node_release() which will either
5473		 * kfree() the node or call binder_put_node()
5474		 */
5475		binder_inc_node_tmpref_ilocked(node);
5476		rb_erase(&node->rb_node, &proc->nodes);
5477		binder_inner_proc_unlock(proc);
5478		incoming_refs = binder_node_release(node, incoming_refs);
5479		binder_inner_proc_lock(proc);
5480	}
5481	binder_inner_proc_unlock(proc);
5482
5483	outgoing_refs = 0;
5484	binder_proc_lock(proc);
5485	while ((n = rb_first(&proc->refs_by_desc))) {
5486		struct binder_ref *ref;
5487
5488		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5489		outgoing_refs++;
5490		binder_cleanup_ref_olocked(ref);
5491		binder_proc_unlock(proc);
5492		binder_free_ref(ref);
5493		binder_proc_lock(proc);
5494	}
5495	binder_proc_unlock(proc);
5496
5497	binder_release_work(proc, &proc->todo);
5498	binder_release_work(proc, &proc->delivered_death);
5499
5500	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5501		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5502		     __func__, proc->pid, threads, nodes, incoming_refs,
5503		     outgoing_refs, active_transactions);
5504
5505	binder_proc_dec_tmpref(proc);
5506}
5507
5508static void binder_deferred_func(struct work_struct *work)
5509{
5510	struct binder_proc *proc;
5511
5512	int defer;
5513
5514	do {
5515		mutex_lock(&binder_deferred_lock);
5516		if (!hlist_empty(&binder_deferred_list)) {
5517			proc = hlist_entry(binder_deferred_list.first,
5518					struct binder_proc, deferred_work_node);
5519			hlist_del_init(&proc->deferred_work_node);
5520			defer = proc->deferred_work;
5521			proc->deferred_work = 0;
5522		} else {
5523			proc = NULL;
5524			defer = 0;
5525		}
5526		mutex_unlock(&binder_deferred_lock);
5527
5528		if (defer & BINDER_DEFERRED_FLUSH)
5529			binder_deferred_flush(proc);
5530
5531		if (defer & BINDER_DEFERRED_RELEASE)
5532			binder_deferred_release(proc); /* frees proc */
5533	} while (proc);
5534}
5535static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5536
5537static void
5538binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5539{
5540	mutex_lock(&binder_deferred_lock);
5541	proc->deferred_work |= defer;
5542	if (hlist_unhashed(&proc->deferred_work_node)) {
5543		hlist_add_head(&proc->deferred_work_node,
5544				&binder_deferred_list);
5545		schedule_work(&binder_deferred_work);
5546	}
5547	mutex_unlock(&binder_deferred_lock);
5548}
5549
5550static void print_binder_transaction_ilocked(struct seq_file *m,
5551					     struct binder_proc *proc,
5552					     const char *prefix,
5553					     struct binder_transaction *t)
5554{
5555	struct binder_proc *to_proc;
5556	struct binder_buffer *buffer = t->buffer;
5557
5558	spin_lock(&t->lock);
5559	to_proc = t->to_proc;
5560	seq_printf(m,
5561		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5562		   prefix, t->debug_id, t,
5563		   t->from ? t->from->proc->pid : 0,
5564		   t->from ? t->from->pid : 0,
5565		   to_proc ? to_proc->pid : 0,
5566		   t->to_thread ? t->to_thread->pid : 0,
5567		   t->code, t->flags, t->priority, t->need_reply);
5568	spin_unlock(&t->lock);
5569
5570	if (proc != to_proc) {
5571		/*
5572		 * Can only safely deref buffer if we are holding the
5573		 * correct proc inner lock for this node
5574		 */
5575		seq_puts(m, "\n");
5576		return;
5577	}
5578
5579	if (buffer == NULL) {
5580		seq_puts(m, " buffer free\n");
5581		return;
5582	}
5583	if (buffer->target_node)
5584		seq_printf(m, " node %d", buffer->target_node->debug_id);
5585	seq_printf(m, " size %zd:%zd data %pK\n",
5586		   buffer->data_size, buffer->offsets_size,
5587		   buffer->user_data);
5588}
5589
5590static void print_binder_work_ilocked(struct seq_file *m,
5591				     struct binder_proc *proc,
5592				     const char *prefix,
5593				     const char *transaction_prefix,
5594				     struct binder_work *w)
5595{
5596	struct binder_node *node;
5597	struct binder_transaction *t;
5598
5599	switch (w->type) {
5600	case BINDER_WORK_TRANSACTION:
5601		t = container_of(w, struct binder_transaction, work);
5602		print_binder_transaction_ilocked(
5603				m, proc, transaction_prefix, t);
5604		break;
5605	case BINDER_WORK_RETURN_ERROR: {
5606		struct binder_error *e = container_of(
5607				w, struct binder_error, work);
5608
5609		seq_printf(m, "%stransaction error: %u\n",
5610			   prefix, e->cmd);
5611	} break;
5612	case BINDER_WORK_TRANSACTION_COMPLETE:
5613		seq_printf(m, "%stransaction complete\n", prefix);
5614		break;
5615	case BINDER_WORK_NODE:
5616		node = container_of(w, struct binder_node, work);
5617		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5618			   prefix, node->debug_id,
5619			   (u64)node->ptr, (u64)node->cookie);
5620		break;
5621	case BINDER_WORK_DEAD_BINDER:
5622		seq_printf(m, "%shas dead binder\n", prefix);
5623		break;
5624	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5625		seq_printf(m, "%shas cleared dead binder\n", prefix);
5626		break;
5627	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5628		seq_printf(m, "%shas cleared death notification\n", prefix);
5629		break;
5630	default:
5631		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5632		break;
5633	}
5634}
5635
5636static void print_binder_thread_ilocked(struct seq_file *m,
5637					struct binder_thread *thread,
5638					int print_always)
5639{
5640	struct binder_transaction *t;
5641	struct binder_work *w;
5642	size_t start_pos = m->count;
5643	size_t header_pos;
5644
5645	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5646			thread->pid, thread->looper,
5647			thread->looper_need_return,
5648			atomic_read(&thread->tmp_ref));
5649	header_pos = m->count;
5650	t = thread->transaction_stack;
5651	while (t) {
5652		if (t->from == thread) {
5653			print_binder_transaction_ilocked(m, thread->proc,
5654					"    outgoing transaction", t);
5655			t = t->from_parent;
5656		} else if (t->to_thread == thread) {
5657			print_binder_transaction_ilocked(m, thread->proc,
5658						 "    incoming transaction", t);
5659			t = t->to_parent;
5660		} else {
5661			print_binder_transaction_ilocked(m, thread->proc,
5662					"    bad transaction", t);
5663			t = NULL;
5664		}
5665	}
5666	list_for_each_entry(w, &thread->todo, entry) {
5667		print_binder_work_ilocked(m, thread->proc, "    ",
5668					  "    pending transaction", w);
5669	}
5670	if (!print_always && m->count == header_pos)
5671		m->count = start_pos;
5672}
5673
5674static void print_binder_node_nilocked(struct seq_file *m,
5675				       struct binder_node *node)
5676{
5677	struct binder_ref *ref;
5678	struct binder_work *w;
5679	int count;
5680
5681	count = 0;
5682	hlist_for_each_entry(ref, &node->refs, node_entry)
5683		count++;
5684
5685	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5686		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5687		   node->has_strong_ref, node->has_weak_ref,
5688		   node->local_strong_refs, node->local_weak_refs,
5689		   node->internal_strong_refs, count, node->tmp_refs);
5690	if (count) {
5691		seq_puts(m, " proc");
5692		hlist_for_each_entry(ref, &node->refs, node_entry)
5693			seq_printf(m, " %d", ref->proc->pid);
5694	}
5695	seq_puts(m, "\n");
5696	if (node->proc) {
5697		list_for_each_entry(w, &node->async_todo, entry)
5698			print_binder_work_ilocked(m, node->proc, "    ",
5699					  "    pending async transaction", w);
5700	}
5701}
5702
5703static void print_binder_ref_olocked(struct seq_file *m,
5704				     struct binder_ref *ref)
5705{
5706	binder_node_lock(ref->node);
5707	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5708		   ref->data.debug_id, ref->data.desc,
5709		   ref->node->proc ? "" : "dead ",
5710		   ref->node->debug_id, ref->data.strong,
5711		   ref->data.weak, ref->death);
5712	binder_node_unlock(ref->node);
5713}
5714
5715static void print_binder_proc(struct seq_file *m,
5716			      struct binder_proc *proc, int print_all)
5717{
5718	struct binder_work *w;
5719	struct rb_node *n;
5720	size_t start_pos = m->count;
5721	size_t header_pos;
5722	struct binder_node *last_node = NULL;
5723
5724	seq_printf(m, "proc %d\n", proc->pid);
5725	seq_printf(m, "context %s\n", proc->context->name);
5726	header_pos = m->count;
5727
5728	binder_inner_proc_lock(proc);
5729	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5730		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5731						rb_node), print_all);
5732
5733	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5734		struct binder_node *node = rb_entry(n, struct binder_node,
5735						    rb_node);
5736		if (!print_all && !node->has_async_transaction)
5737			continue;
5738
5739		/*
5740		 * take a temporary reference on the node so it
5741		 * survives and isn't removed from the tree
5742		 * while we print it.
5743		 */
5744		binder_inc_node_tmpref_ilocked(node);
5745		/* Need to drop inner lock to take node lock */
5746		binder_inner_proc_unlock(proc);
5747		if (last_node)
5748			binder_put_node(last_node);
5749		binder_node_inner_lock(node);
5750		print_binder_node_nilocked(m, node);
5751		binder_node_inner_unlock(node);
5752		last_node = node;
5753		binder_inner_proc_lock(proc);
5754	}
5755	binder_inner_proc_unlock(proc);
5756	if (last_node)
5757		binder_put_node(last_node);
5758
5759	if (print_all) {
5760		binder_proc_lock(proc);
5761		for (n = rb_first(&proc->refs_by_desc);
5762		     n != NULL;
5763		     n = rb_next(n))
5764			print_binder_ref_olocked(m, rb_entry(n,
5765							    struct binder_ref,
5766							    rb_node_desc));
5767		binder_proc_unlock(proc);
5768	}
5769	binder_alloc_print_allocated(m, &proc->alloc);
5770	binder_inner_proc_lock(proc);
5771	list_for_each_entry(w, &proc->todo, entry)
5772		print_binder_work_ilocked(m, proc, "  ",
5773					  "  pending transaction", w);
5774	list_for_each_entry(w, &proc->delivered_death, entry) {
5775		seq_puts(m, "  has delivered dead binder\n");
5776		break;
5777	}
5778	binder_inner_proc_unlock(proc);
5779	if (!print_all && m->count == header_pos)
5780		m->count = start_pos;
5781}
5782
5783static const char * const binder_return_strings[] = {
5784	"BR_ERROR",
5785	"BR_OK",
5786	"BR_TRANSACTION",
5787	"BR_REPLY",
5788	"BR_ACQUIRE_RESULT",
5789	"BR_DEAD_REPLY",
5790	"BR_TRANSACTION_COMPLETE",
5791	"BR_INCREFS",
5792	"BR_ACQUIRE",
5793	"BR_RELEASE",
5794	"BR_DECREFS",
5795	"BR_ATTEMPT_ACQUIRE",
5796	"BR_NOOP",
5797	"BR_SPAWN_LOOPER",
5798	"BR_FINISHED",
5799	"BR_DEAD_BINDER",
5800	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5801	"BR_FAILED_REPLY"
 
 
5802};
5803
5804static const char * const binder_command_strings[] = {
5805	"BC_TRANSACTION",
5806	"BC_REPLY",
5807	"BC_ACQUIRE_RESULT",
5808	"BC_FREE_BUFFER",
5809	"BC_INCREFS",
5810	"BC_ACQUIRE",
5811	"BC_RELEASE",
5812	"BC_DECREFS",
5813	"BC_INCREFS_DONE",
5814	"BC_ACQUIRE_DONE",
5815	"BC_ATTEMPT_ACQUIRE",
5816	"BC_REGISTER_LOOPER",
5817	"BC_ENTER_LOOPER",
5818	"BC_EXIT_LOOPER",
5819	"BC_REQUEST_DEATH_NOTIFICATION",
5820	"BC_CLEAR_DEATH_NOTIFICATION",
5821	"BC_DEAD_BINDER_DONE",
5822	"BC_TRANSACTION_SG",
5823	"BC_REPLY_SG",
5824};
5825
5826static const char * const binder_objstat_strings[] = {
5827	"proc",
5828	"thread",
5829	"node",
5830	"ref",
5831	"death",
5832	"transaction",
5833	"transaction_complete"
5834};
5835
5836static void print_binder_stats(struct seq_file *m, const char *prefix,
5837			       struct binder_stats *stats)
5838{
5839	int i;
5840
5841	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5842		     ARRAY_SIZE(binder_command_strings));
5843	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5844		int temp = atomic_read(&stats->bc[i]);
5845
5846		if (temp)
5847			seq_printf(m, "%s%s: %d\n", prefix,
5848				   binder_command_strings[i], temp);
5849	}
5850
5851	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5852		     ARRAY_SIZE(binder_return_strings));
5853	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5854		int temp = atomic_read(&stats->br[i]);
5855
5856		if (temp)
5857			seq_printf(m, "%s%s: %d\n", prefix,
5858				   binder_return_strings[i], temp);
5859	}
5860
5861	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5862		     ARRAY_SIZE(binder_objstat_strings));
5863	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5864		     ARRAY_SIZE(stats->obj_deleted));
5865	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5866		int created = atomic_read(&stats->obj_created[i]);
5867		int deleted = atomic_read(&stats->obj_deleted[i]);
5868
5869		if (created || deleted)
5870			seq_printf(m, "%s%s: active %d total %d\n",
5871				prefix,
5872				binder_objstat_strings[i],
5873				created - deleted,
5874				created);
5875	}
5876}
5877
5878static void print_binder_proc_stats(struct seq_file *m,
5879				    struct binder_proc *proc)
5880{
5881	struct binder_work *w;
5882	struct binder_thread *thread;
5883	struct rb_node *n;
5884	int count, strong, weak, ready_threads;
5885	size_t free_async_space =
5886		binder_alloc_get_free_async_space(&proc->alloc);
5887
5888	seq_printf(m, "proc %d\n", proc->pid);
5889	seq_printf(m, "context %s\n", proc->context->name);
5890	count = 0;
5891	ready_threads = 0;
5892	binder_inner_proc_lock(proc);
5893	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5894		count++;
5895
5896	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5897		ready_threads++;
5898
5899	seq_printf(m, "  threads: %d\n", count);
5900	seq_printf(m, "  requested threads: %d+%d/%d\n"
5901			"  ready threads %d\n"
5902			"  free async space %zd\n", proc->requested_threads,
5903			proc->requested_threads_started, proc->max_threads,
5904			ready_threads,
5905			free_async_space);
5906	count = 0;
5907	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5908		count++;
5909	binder_inner_proc_unlock(proc);
5910	seq_printf(m, "  nodes: %d\n", count);
5911	count = 0;
5912	strong = 0;
5913	weak = 0;
5914	binder_proc_lock(proc);
5915	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5916		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5917						  rb_node_desc);
5918		count++;
5919		strong += ref->data.strong;
5920		weak += ref->data.weak;
5921	}
5922	binder_proc_unlock(proc);
5923	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5924
5925	count = binder_alloc_get_allocated_count(&proc->alloc);
5926	seq_printf(m, "  buffers: %d\n", count);
5927
5928	binder_alloc_print_pages(m, &proc->alloc);
5929
5930	count = 0;
5931	binder_inner_proc_lock(proc);
5932	list_for_each_entry(w, &proc->todo, entry) {
5933		if (w->type == BINDER_WORK_TRANSACTION)
5934			count++;
5935	}
5936	binder_inner_proc_unlock(proc);
5937	seq_printf(m, "  pending transactions: %d\n", count);
5938
5939	print_binder_stats(m, "  ", &proc->stats);
5940}
5941
5942
5943int binder_state_show(struct seq_file *m, void *unused)
5944{
5945	struct binder_proc *proc;
5946	struct binder_node *node;
5947	struct binder_node *last_node = NULL;
5948
5949	seq_puts(m, "binder state:\n");
5950
5951	spin_lock(&binder_dead_nodes_lock);
5952	if (!hlist_empty(&binder_dead_nodes))
5953		seq_puts(m, "dead nodes:\n");
5954	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5955		/*
5956		 * take a temporary reference on the node so it
5957		 * survives and isn't removed from the list
5958		 * while we print it.
5959		 */
5960		node->tmp_refs++;
5961		spin_unlock(&binder_dead_nodes_lock);
5962		if (last_node)
5963			binder_put_node(last_node);
5964		binder_node_lock(node);
5965		print_binder_node_nilocked(m, node);
5966		binder_node_unlock(node);
5967		last_node = node;
5968		spin_lock(&binder_dead_nodes_lock);
5969	}
5970	spin_unlock(&binder_dead_nodes_lock);
5971	if (last_node)
5972		binder_put_node(last_node);
5973
5974	mutex_lock(&binder_procs_lock);
5975	hlist_for_each_entry(proc, &binder_procs, proc_node)
5976		print_binder_proc(m, proc, 1);
5977	mutex_unlock(&binder_procs_lock);
5978
5979	return 0;
5980}
5981
5982int binder_stats_show(struct seq_file *m, void *unused)
5983{
5984	struct binder_proc *proc;
5985
5986	seq_puts(m, "binder stats:\n");
5987
5988	print_binder_stats(m, "", &binder_stats);
5989
5990	mutex_lock(&binder_procs_lock);
5991	hlist_for_each_entry(proc, &binder_procs, proc_node)
5992		print_binder_proc_stats(m, proc);
5993	mutex_unlock(&binder_procs_lock);
5994
5995	return 0;
5996}
5997
5998int binder_transactions_show(struct seq_file *m, void *unused)
5999{
6000	struct binder_proc *proc;
6001
6002	seq_puts(m, "binder transactions:\n");
6003	mutex_lock(&binder_procs_lock);
6004	hlist_for_each_entry(proc, &binder_procs, proc_node)
6005		print_binder_proc(m, proc, 0);
6006	mutex_unlock(&binder_procs_lock);
6007
6008	return 0;
6009}
6010
6011static int proc_show(struct seq_file *m, void *unused)
6012{
6013	struct binder_proc *itr;
6014	int pid = (unsigned long)m->private;
6015
6016	mutex_lock(&binder_procs_lock);
6017	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6018		if (itr->pid == pid) {
6019			seq_puts(m, "binder proc state:\n");
6020			print_binder_proc(m, itr, 1);
6021		}
6022	}
6023	mutex_unlock(&binder_procs_lock);
6024
6025	return 0;
6026}
6027
6028static void print_binder_transaction_log_entry(struct seq_file *m,
6029					struct binder_transaction_log_entry *e)
6030{
6031	int debug_id = READ_ONCE(e->debug_id_done);
6032	/*
6033	 * read barrier to guarantee debug_id_done read before
6034	 * we print the log values
6035	 */
6036	smp_rmb();
6037	seq_printf(m,
6038		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6039		   e->debug_id, (e->call_type == 2) ? "reply" :
6040		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6041		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6042		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6043		   e->return_error, e->return_error_param,
6044		   e->return_error_line);
6045	/*
6046	 * read-barrier to guarantee read of debug_id_done after
6047	 * done printing the fields of the entry
6048	 */
6049	smp_rmb();
6050	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6051			"\n" : " (incomplete)\n");
6052}
6053
6054int binder_transaction_log_show(struct seq_file *m, void *unused)
6055{
6056	struct binder_transaction_log *log = m->private;
6057	unsigned int log_cur = atomic_read(&log->cur);
6058	unsigned int count;
6059	unsigned int cur;
6060	int i;
6061
6062	count = log_cur + 1;
6063	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6064		0 : count % ARRAY_SIZE(log->entry);
6065	if (count > ARRAY_SIZE(log->entry) || log->full)
6066		count = ARRAY_SIZE(log->entry);
6067	for (i = 0; i < count; i++) {
6068		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6069
6070		print_binder_transaction_log_entry(m, &log->entry[index]);
6071	}
6072	return 0;
6073}
6074
6075const struct file_operations binder_fops = {
6076	.owner = THIS_MODULE,
6077	.poll = binder_poll,
6078	.unlocked_ioctl = binder_ioctl,
6079	.compat_ioctl = compat_ptr_ioctl,
6080	.mmap = binder_mmap,
6081	.open = binder_open,
6082	.flush = binder_flush,
6083	.release = binder_release,
6084};
6085
6086static int __init init_binder_device(const char *name)
6087{
6088	int ret;
6089	struct binder_device *binder_device;
6090
6091	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6092	if (!binder_device)
6093		return -ENOMEM;
6094
6095	binder_device->miscdev.fops = &binder_fops;
6096	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6097	binder_device->miscdev.name = name;
6098
6099	refcount_set(&binder_device->ref, 1);
6100	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6101	binder_device->context.name = name;
6102	mutex_init(&binder_device->context.context_mgr_node_lock);
6103
6104	ret = misc_register(&binder_device->miscdev);
6105	if (ret < 0) {
6106		kfree(binder_device);
6107		return ret;
6108	}
6109
6110	hlist_add_head(&binder_device->hlist, &binder_devices);
6111
6112	return ret;
6113}
6114
6115static int __init binder_init(void)
6116{
6117	int ret;
6118	char *device_name, *device_tmp;
6119	struct binder_device *device;
6120	struct hlist_node *tmp;
6121	char *device_names = NULL;
6122
6123	ret = binder_alloc_shrinker_init();
6124	if (ret)
6125		return ret;
6126
6127	atomic_set(&binder_transaction_log.cur, ~0U);
6128	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6129
6130	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6131	if (binder_debugfs_dir_entry_root)
6132		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6133						 binder_debugfs_dir_entry_root);
6134
6135	if (binder_debugfs_dir_entry_root) {
6136		debugfs_create_file("state",
6137				    0444,
6138				    binder_debugfs_dir_entry_root,
6139				    NULL,
6140				    &binder_state_fops);
6141		debugfs_create_file("stats",
6142				    0444,
6143				    binder_debugfs_dir_entry_root,
6144				    NULL,
6145				    &binder_stats_fops);
6146		debugfs_create_file("transactions",
6147				    0444,
6148				    binder_debugfs_dir_entry_root,
6149				    NULL,
6150				    &binder_transactions_fops);
6151		debugfs_create_file("transaction_log",
6152				    0444,
6153				    binder_debugfs_dir_entry_root,
6154				    &binder_transaction_log,
6155				    &binder_transaction_log_fops);
6156		debugfs_create_file("failed_transaction_log",
6157				    0444,
6158				    binder_debugfs_dir_entry_root,
6159				    &binder_transaction_log_failed,
6160				    &binder_transaction_log_fops);
6161	}
6162
6163	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6164	    strcmp(binder_devices_param, "") != 0) {
6165		/*
6166		* Copy the module_parameter string, because we don't want to
6167		* tokenize it in-place.
6168		 */
6169		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6170		if (!device_names) {
6171			ret = -ENOMEM;
6172			goto err_alloc_device_names_failed;
6173		}
6174
6175		device_tmp = device_names;
6176		while ((device_name = strsep(&device_tmp, ","))) {
6177			ret = init_binder_device(device_name);
6178			if (ret)
6179				goto err_init_binder_device_failed;
6180		}
6181	}
6182
6183	ret = init_binderfs();
6184	if (ret)
6185		goto err_init_binder_device_failed;
6186
6187	return ret;
6188
6189err_init_binder_device_failed:
6190	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6191		misc_deregister(&device->miscdev);
6192		hlist_del(&device->hlist);
6193		kfree(device);
6194	}
6195
6196	kfree(device_names);
6197
6198err_alloc_device_names_failed:
6199	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6200
6201	return ret;
6202}
6203
6204device_initcall(binder_init);
6205
6206#define CREATE_TRACE_POINTS
6207#include "binder_trace.h"
6208
6209MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* binder.c
   3 *
   4 * Android IPC Subsystem
   5 *
   6 * Copyright (C) 2007-2008 Google, Inc.
   7 */
   8
   9/*
  10 * Locking overview
  11 *
  12 * There are 3 main spinlocks which must be acquired in the
  13 * order shown:
  14 *
  15 * 1) proc->outer_lock : protects binder_ref
  16 *    binder_proc_lock() and binder_proc_unlock() are
  17 *    used to acq/rel.
  18 * 2) node->lock : protects most fields of binder_node.
  19 *    binder_node_lock() and binder_node_unlock() are
  20 *    used to acq/rel
  21 * 3) proc->inner_lock : protects the thread and node lists
  22 *    (proc->threads, proc->waiting_threads, proc->nodes)
  23 *    and all todo lists associated with the binder_proc
  24 *    (proc->todo, thread->todo, proc->delivered_death and
  25 *    node->async_todo), as well as thread->transaction_stack
  26 *    binder_inner_proc_lock() and binder_inner_proc_unlock()
  27 *    are used to acq/rel
  28 *
  29 * Any lock under procA must never be nested under any lock at the same
  30 * level or below on procB.
  31 *
  32 * Functions that require a lock held on entry indicate which lock
  33 * in the suffix of the function name:
  34 *
  35 * foo_olocked() : requires node->outer_lock
  36 * foo_nlocked() : requires node->lock
  37 * foo_ilocked() : requires proc->inner_lock
  38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
  39 * foo_nilocked(): requires node->lock and proc->inner_lock
  40 * ...
  41 */
  42
  43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  44
  45#include <linux/fdtable.h>
  46#include <linux/file.h>
  47#include <linux/freezer.h>
  48#include <linux/fs.h>
  49#include <linux/list.h>
  50#include <linux/miscdevice.h>
  51#include <linux/module.h>
  52#include <linux/mutex.h>
  53#include <linux/nsproxy.h>
  54#include <linux/poll.h>
  55#include <linux/debugfs.h>
  56#include <linux/rbtree.h>
  57#include <linux/sched/signal.h>
  58#include <linux/sched/mm.h>
  59#include <linux/seq_file.h>
  60#include <linux/string.h>
  61#include <linux/uaccess.h>
  62#include <linux/pid_namespace.h>
  63#include <linux/security.h>
  64#include <linux/spinlock.h>
  65#include <linux/ratelimit.h>
  66#include <linux/syscalls.h>
  67#include <linux/task_work.h>
  68#include <linux/sizes.h>
  69
  70#include <uapi/linux/android/binder.h>
 
  71
  72#include <asm/cacheflush.h>
  73
 
  74#include "binder_internal.h"
  75#include "binder_trace.h"
  76
  77static HLIST_HEAD(binder_deferred_list);
  78static DEFINE_MUTEX(binder_deferred_lock);
  79
  80static HLIST_HEAD(binder_devices);
  81static HLIST_HEAD(binder_procs);
  82static DEFINE_MUTEX(binder_procs_lock);
  83
  84static HLIST_HEAD(binder_dead_nodes);
  85static DEFINE_SPINLOCK(binder_dead_nodes_lock);
  86
  87static struct dentry *binder_debugfs_dir_entry_root;
  88static struct dentry *binder_debugfs_dir_entry_proc;
  89static atomic_t binder_last_id;
  90
  91static int proc_show(struct seq_file *m, void *unused);
  92DEFINE_SHOW_ATTRIBUTE(proc);
  93
  94#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
  95
  96enum {
  97	BINDER_DEBUG_USER_ERROR             = 1U << 0,
  98	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
  99	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
 100	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
 101	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
 102	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
 103	BINDER_DEBUG_READ_WRITE             = 1U << 6,
 104	BINDER_DEBUG_USER_REFS              = 1U << 7,
 105	BINDER_DEBUG_THREADS                = 1U << 8,
 106	BINDER_DEBUG_TRANSACTION            = 1U << 9,
 107	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
 108	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
 109	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
 110	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
 111	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
 112};
 113static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
 114	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
 115module_param_named(debug_mask, binder_debug_mask, uint, 0644);
 116
 117char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
 118module_param_named(devices, binder_devices_param, charp, 0444);
 119
 120static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
 121static int binder_stop_on_user_error;
 122
 123static int binder_set_stop_on_user_error(const char *val,
 124					 const struct kernel_param *kp)
 125{
 126	int ret;
 127
 128	ret = param_set_int(val, kp);
 129	if (binder_stop_on_user_error < 2)
 130		wake_up(&binder_user_error_wait);
 131	return ret;
 132}
 133module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
 134	param_get_int, &binder_stop_on_user_error, 0644);
 135
 136#define binder_debug(mask, x...) \
 137	do { \
 138		if (binder_debug_mask & mask) \
 139			pr_info_ratelimited(x); \
 140	} while (0)
 141
 142#define binder_user_error(x...) \
 143	do { \
 144		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
 145			pr_info_ratelimited(x); \
 146		if (binder_stop_on_user_error) \
 147			binder_stop_on_user_error = 2; \
 148	} while (0)
 149
 150#define to_flat_binder_object(hdr) \
 151	container_of(hdr, struct flat_binder_object, hdr)
 152
 153#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
 154
 155#define to_binder_buffer_object(hdr) \
 156	container_of(hdr, struct binder_buffer_object, hdr)
 157
 158#define to_binder_fd_array_object(hdr) \
 159	container_of(hdr, struct binder_fd_array_object, hdr)
 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161static struct binder_stats binder_stats;
 162
 163static inline void binder_stats_deleted(enum binder_stat_types type)
 164{
 165	atomic_inc(&binder_stats.obj_deleted[type]);
 166}
 167
 168static inline void binder_stats_created(enum binder_stat_types type)
 169{
 170	atomic_inc(&binder_stats.obj_created[type]);
 171}
 172
 173struct binder_transaction_log binder_transaction_log;
 174struct binder_transaction_log binder_transaction_log_failed;
 175
 176static struct binder_transaction_log_entry *binder_transaction_log_add(
 177	struct binder_transaction_log *log)
 178{
 179	struct binder_transaction_log_entry *e;
 180	unsigned int cur = atomic_inc_return(&log->cur);
 181
 182	if (cur >= ARRAY_SIZE(log->entry))
 183		log->full = true;
 184	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
 185	WRITE_ONCE(e->debug_id_done, 0);
 186	/*
 187	 * write-barrier to synchronize access to e->debug_id_done.
 188	 * We make sure the initialized 0 value is seen before
 189	 * memset() other fields are zeroed by memset.
 190	 */
 191	smp_wmb();
 192	memset(e, 0, sizeof(*e));
 193	return e;
 194}
 195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 196enum binder_deferred_state {
 197	BINDER_DEFERRED_FLUSH        = 0x01,
 198	BINDER_DEFERRED_RELEASE      = 0x02,
 199};
 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 201enum {
 202	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
 203	BINDER_LOOPER_STATE_ENTERED     = 0x02,
 204	BINDER_LOOPER_STATE_EXITED      = 0x04,
 205	BINDER_LOOPER_STATE_INVALID     = 0x08,
 206	BINDER_LOOPER_STATE_WAITING     = 0x10,
 207	BINDER_LOOPER_STATE_POLL        = 0x20,
 208};
 209
 210/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 211 * binder_proc_lock() - Acquire outer lock for given binder_proc
 212 * @proc:         struct binder_proc to acquire
 213 *
 214 * Acquires proc->outer_lock. Used to protect binder_ref
 215 * structures associated with the given proc.
 216 */
 217#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
 218static void
 219_binder_proc_lock(struct binder_proc *proc, int line)
 220	__acquires(&proc->outer_lock)
 221{
 222	binder_debug(BINDER_DEBUG_SPINLOCKS,
 223		     "%s: line=%d\n", __func__, line);
 224	spin_lock(&proc->outer_lock);
 225}
 226
 227/**
 228 * binder_proc_unlock() - Release spinlock for given binder_proc
 229 * @proc:         struct binder_proc to acquire
 230 *
 231 * Release lock acquired via binder_proc_lock()
 232 */
 233#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
 234static void
 235_binder_proc_unlock(struct binder_proc *proc, int line)
 236	__releases(&proc->outer_lock)
 237{
 238	binder_debug(BINDER_DEBUG_SPINLOCKS,
 239		     "%s: line=%d\n", __func__, line);
 240	spin_unlock(&proc->outer_lock);
 241}
 242
 243/**
 244 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
 245 * @proc:         struct binder_proc to acquire
 246 *
 247 * Acquires proc->inner_lock. Used to protect todo lists
 248 */
 249#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
 250static void
 251_binder_inner_proc_lock(struct binder_proc *proc, int line)
 252	__acquires(&proc->inner_lock)
 253{
 254	binder_debug(BINDER_DEBUG_SPINLOCKS,
 255		     "%s: line=%d\n", __func__, line);
 256	spin_lock(&proc->inner_lock);
 257}
 258
 259/**
 260 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
 261 * @proc:         struct binder_proc to acquire
 262 *
 263 * Release lock acquired via binder_inner_proc_lock()
 264 */
 265#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
 266static void
 267_binder_inner_proc_unlock(struct binder_proc *proc, int line)
 268	__releases(&proc->inner_lock)
 269{
 270	binder_debug(BINDER_DEBUG_SPINLOCKS,
 271		     "%s: line=%d\n", __func__, line);
 272	spin_unlock(&proc->inner_lock);
 273}
 274
 275/**
 276 * binder_node_lock() - Acquire spinlock for given binder_node
 277 * @node:         struct binder_node to acquire
 278 *
 279 * Acquires node->lock. Used to protect binder_node fields
 280 */
 281#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
 282static void
 283_binder_node_lock(struct binder_node *node, int line)
 284	__acquires(&node->lock)
 285{
 286	binder_debug(BINDER_DEBUG_SPINLOCKS,
 287		     "%s: line=%d\n", __func__, line);
 288	spin_lock(&node->lock);
 289}
 290
 291/**
 292 * binder_node_unlock() - Release spinlock for given binder_proc
 293 * @node:         struct binder_node to acquire
 294 *
 295 * Release lock acquired via binder_node_lock()
 296 */
 297#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
 298static void
 299_binder_node_unlock(struct binder_node *node, int line)
 300	__releases(&node->lock)
 301{
 302	binder_debug(BINDER_DEBUG_SPINLOCKS,
 303		     "%s: line=%d\n", __func__, line);
 304	spin_unlock(&node->lock);
 305}
 306
 307/**
 308 * binder_node_inner_lock() - Acquire node and inner locks
 309 * @node:         struct binder_node to acquire
 310 *
 311 * Acquires node->lock. If node->proc also acquires
 312 * proc->inner_lock. Used to protect binder_node fields
 313 */
 314#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
 315static void
 316_binder_node_inner_lock(struct binder_node *node, int line)
 317	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
 318{
 319	binder_debug(BINDER_DEBUG_SPINLOCKS,
 320		     "%s: line=%d\n", __func__, line);
 321	spin_lock(&node->lock);
 322	if (node->proc)
 323		binder_inner_proc_lock(node->proc);
 324	else
 325		/* annotation for sparse */
 326		__acquire(&node->proc->inner_lock);
 327}
 328
 329/**
 330 * binder_node_unlock() - Release node and inner locks
 331 * @node:         struct binder_node to acquire
 332 *
 333 * Release lock acquired via binder_node_lock()
 334 */
 335#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
 336static void
 337_binder_node_inner_unlock(struct binder_node *node, int line)
 338	__releases(&node->lock) __releases(&node->proc->inner_lock)
 339{
 340	struct binder_proc *proc = node->proc;
 341
 342	binder_debug(BINDER_DEBUG_SPINLOCKS,
 343		     "%s: line=%d\n", __func__, line);
 344	if (proc)
 345		binder_inner_proc_unlock(proc);
 346	else
 347		/* annotation for sparse */
 348		__release(&node->proc->inner_lock);
 349	spin_unlock(&node->lock);
 350}
 351
 352static bool binder_worklist_empty_ilocked(struct list_head *list)
 353{
 354	return list_empty(list);
 355}
 356
 357/**
 358 * binder_worklist_empty() - Check if no items on the work list
 359 * @proc:       binder_proc associated with list
 360 * @list:	list to check
 361 *
 362 * Return: true if there are no items on list, else false
 363 */
 364static bool binder_worklist_empty(struct binder_proc *proc,
 365				  struct list_head *list)
 366{
 367	bool ret;
 368
 369	binder_inner_proc_lock(proc);
 370	ret = binder_worklist_empty_ilocked(list);
 371	binder_inner_proc_unlock(proc);
 372	return ret;
 373}
 374
 375/**
 376 * binder_enqueue_work_ilocked() - Add an item to the work list
 377 * @work:         struct binder_work to add to list
 378 * @target_list:  list to add work to
 379 *
 380 * Adds the work to the specified list. Asserts that work
 381 * is not already on a list.
 382 *
 383 * Requires the proc->inner_lock to be held.
 384 */
 385static void
 386binder_enqueue_work_ilocked(struct binder_work *work,
 387			   struct list_head *target_list)
 388{
 389	BUG_ON(target_list == NULL);
 390	BUG_ON(work->entry.next && !list_empty(&work->entry));
 391	list_add_tail(&work->entry, target_list);
 392}
 393
 394/**
 395 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
 396 * @thread:       thread to queue work to
 397 * @work:         struct binder_work to add to list
 398 *
 399 * Adds the work to the todo list of the thread. Doesn't set the process_todo
 400 * flag, which means that (if it wasn't already set) the thread will go to
 401 * sleep without handling this work when it calls read.
 402 *
 403 * Requires the proc->inner_lock to be held.
 404 */
 405static void
 406binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
 407					    struct binder_work *work)
 408{
 409	WARN_ON(!list_empty(&thread->waiting_thread_node));
 410	binder_enqueue_work_ilocked(work, &thread->todo);
 411}
 412
 413/**
 414 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
 415 * @thread:       thread to queue work to
 416 * @work:         struct binder_work to add to list
 417 *
 418 * Adds the work to the todo list of the thread, and enables processing
 419 * of the todo queue.
 420 *
 421 * Requires the proc->inner_lock to be held.
 422 */
 423static void
 424binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
 425				   struct binder_work *work)
 426{
 427	WARN_ON(!list_empty(&thread->waiting_thread_node));
 428	binder_enqueue_work_ilocked(work, &thread->todo);
 429	thread->process_todo = true;
 430}
 431
 432/**
 433 * binder_enqueue_thread_work() - Add an item to the thread work list
 434 * @thread:       thread to queue work to
 435 * @work:         struct binder_work to add to list
 436 *
 437 * Adds the work to the todo list of the thread, and enables processing
 438 * of the todo queue.
 439 */
 440static void
 441binder_enqueue_thread_work(struct binder_thread *thread,
 442			   struct binder_work *work)
 443{
 444	binder_inner_proc_lock(thread->proc);
 445	binder_enqueue_thread_work_ilocked(thread, work);
 446	binder_inner_proc_unlock(thread->proc);
 447}
 448
 449static void
 450binder_dequeue_work_ilocked(struct binder_work *work)
 451{
 452	list_del_init(&work->entry);
 453}
 454
 455/**
 456 * binder_dequeue_work() - Removes an item from the work list
 457 * @proc:         binder_proc associated with list
 458 * @work:         struct binder_work to remove from list
 459 *
 460 * Removes the specified work item from whatever list it is on.
 461 * Can safely be called if work is not on any list.
 462 */
 463static void
 464binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
 465{
 466	binder_inner_proc_lock(proc);
 467	binder_dequeue_work_ilocked(work);
 468	binder_inner_proc_unlock(proc);
 469}
 470
 471static struct binder_work *binder_dequeue_work_head_ilocked(
 472					struct list_head *list)
 473{
 474	struct binder_work *w;
 475
 476	w = list_first_entry_or_null(list, struct binder_work, entry);
 477	if (w)
 478		list_del_init(&w->entry);
 479	return w;
 480}
 481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482static void
 483binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
 484static void binder_free_thread(struct binder_thread *thread);
 485static void binder_free_proc(struct binder_proc *proc);
 486static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 487
 488static bool binder_has_work_ilocked(struct binder_thread *thread,
 489				    bool do_proc_work)
 490{
 491	return thread->process_todo ||
 492		thread->looper_need_return ||
 493		(do_proc_work &&
 494		 !binder_worklist_empty_ilocked(&thread->proc->todo));
 495}
 496
 497static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
 498{
 499	bool has_work;
 500
 501	binder_inner_proc_lock(thread->proc);
 502	has_work = binder_has_work_ilocked(thread, do_proc_work);
 503	binder_inner_proc_unlock(thread->proc);
 504
 505	return has_work;
 506}
 507
 508static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
 509{
 510	return !thread->transaction_stack &&
 511		binder_worklist_empty_ilocked(&thread->todo) &&
 512		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
 513				   BINDER_LOOPER_STATE_REGISTERED));
 514}
 515
 516static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
 517					       bool sync)
 518{
 519	struct rb_node *n;
 520	struct binder_thread *thread;
 521
 522	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
 523		thread = rb_entry(n, struct binder_thread, rb_node);
 524		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
 525		    binder_available_for_proc_work_ilocked(thread)) {
 526			if (sync)
 527				wake_up_interruptible_sync(&thread->wait);
 528			else
 529				wake_up_interruptible(&thread->wait);
 530		}
 531	}
 532}
 533
 534/**
 535 * binder_select_thread_ilocked() - selects a thread for doing proc work.
 536 * @proc:	process to select a thread from
 537 *
 538 * Note that calling this function moves the thread off the waiting_threads
 539 * list, so it can only be woken up by the caller of this function, or a
 540 * signal. Therefore, callers *should* always wake up the thread this function
 541 * returns.
 542 *
 543 * Return:	If there's a thread currently waiting for process work,
 544 *		returns that thread. Otherwise returns NULL.
 545 */
 546static struct binder_thread *
 547binder_select_thread_ilocked(struct binder_proc *proc)
 548{
 549	struct binder_thread *thread;
 550
 551	assert_spin_locked(&proc->inner_lock);
 552	thread = list_first_entry_or_null(&proc->waiting_threads,
 553					  struct binder_thread,
 554					  waiting_thread_node);
 555
 556	if (thread)
 557		list_del_init(&thread->waiting_thread_node);
 558
 559	return thread;
 560}
 561
 562/**
 563 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
 564 * @proc:	process to wake up a thread in
 565 * @thread:	specific thread to wake-up (may be NULL)
 566 * @sync:	whether to do a synchronous wake-up
 567 *
 568 * This function wakes up a thread in the @proc process.
 569 * The caller may provide a specific thread to wake-up in
 570 * the @thread parameter. If @thread is NULL, this function
 571 * will wake up threads that have called poll().
 572 *
 573 * Note that for this function to work as expected, callers
 574 * should first call binder_select_thread() to find a thread
 575 * to handle the work (if they don't have a thread already),
 576 * and pass the result into the @thread parameter.
 577 */
 578static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
 579					 struct binder_thread *thread,
 580					 bool sync)
 581{
 582	assert_spin_locked(&proc->inner_lock);
 583
 584	if (thread) {
 585		if (sync)
 586			wake_up_interruptible_sync(&thread->wait);
 587		else
 588			wake_up_interruptible(&thread->wait);
 589		return;
 590	}
 591
 592	/* Didn't find a thread waiting for proc work; this can happen
 593	 * in two scenarios:
 594	 * 1. All threads are busy handling transactions
 595	 *    In that case, one of those threads should call back into
 596	 *    the kernel driver soon and pick up this work.
 597	 * 2. Threads are using the (e)poll interface, in which case
 598	 *    they may be blocked on the waitqueue without having been
 599	 *    added to waiting_threads. For this case, we just iterate
 600	 *    over all threads not handling transaction work, and
 601	 *    wake them all up. We wake all because we don't know whether
 602	 *    a thread that called into (e)poll is handling non-binder
 603	 *    work currently.
 604	 */
 605	binder_wakeup_poll_threads_ilocked(proc, sync);
 606}
 607
 608static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
 609{
 610	struct binder_thread *thread = binder_select_thread_ilocked(proc);
 611
 612	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
 613}
 614
 615static void binder_set_nice(long nice)
 616{
 617	long min_nice;
 618
 619	if (can_nice(current, nice)) {
 620		set_user_nice(current, nice);
 621		return;
 622	}
 623	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
 624	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
 625		     "%d: nice value %ld not allowed use %ld instead\n",
 626		      current->pid, nice, min_nice);
 627	set_user_nice(current, min_nice);
 628	if (min_nice <= MAX_NICE)
 629		return;
 630	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
 631}
 632
 633static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
 634						   binder_uintptr_t ptr)
 635{
 636	struct rb_node *n = proc->nodes.rb_node;
 637	struct binder_node *node;
 638
 639	assert_spin_locked(&proc->inner_lock);
 640
 641	while (n) {
 642		node = rb_entry(n, struct binder_node, rb_node);
 643
 644		if (ptr < node->ptr)
 645			n = n->rb_left;
 646		else if (ptr > node->ptr)
 647			n = n->rb_right;
 648		else {
 649			/*
 650			 * take an implicit weak reference
 651			 * to ensure node stays alive until
 652			 * call to binder_put_node()
 653			 */
 654			binder_inc_node_tmpref_ilocked(node);
 655			return node;
 656		}
 657	}
 658	return NULL;
 659}
 660
 661static struct binder_node *binder_get_node(struct binder_proc *proc,
 662					   binder_uintptr_t ptr)
 663{
 664	struct binder_node *node;
 665
 666	binder_inner_proc_lock(proc);
 667	node = binder_get_node_ilocked(proc, ptr);
 668	binder_inner_proc_unlock(proc);
 669	return node;
 670}
 671
 672static struct binder_node *binder_init_node_ilocked(
 673						struct binder_proc *proc,
 674						struct binder_node *new_node,
 675						struct flat_binder_object *fp)
 676{
 677	struct rb_node **p = &proc->nodes.rb_node;
 678	struct rb_node *parent = NULL;
 679	struct binder_node *node;
 680	binder_uintptr_t ptr = fp ? fp->binder : 0;
 681	binder_uintptr_t cookie = fp ? fp->cookie : 0;
 682	__u32 flags = fp ? fp->flags : 0;
 683
 684	assert_spin_locked(&proc->inner_lock);
 685
 686	while (*p) {
 687
 688		parent = *p;
 689		node = rb_entry(parent, struct binder_node, rb_node);
 690
 691		if (ptr < node->ptr)
 692			p = &(*p)->rb_left;
 693		else if (ptr > node->ptr)
 694			p = &(*p)->rb_right;
 695		else {
 696			/*
 697			 * A matching node is already in
 698			 * the rb tree. Abandon the init
 699			 * and return it.
 700			 */
 701			binder_inc_node_tmpref_ilocked(node);
 702			return node;
 703		}
 704	}
 705	node = new_node;
 706	binder_stats_created(BINDER_STAT_NODE);
 707	node->tmp_refs++;
 708	rb_link_node(&node->rb_node, parent, p);
 709	rb_insert_color(&node->rb_node, &proc->nodes);
 710	node->debug_id = atomic_inc_return(&binder_last_id);
 711	node->proc = proc;
 712	node->ptr = ptr;
 713	node->cookie = cookie;
 714	node->work.type = BINDER_WORK_NODE;
 715	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
 716	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
 717	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
 718	spin_lock_init(&node->lock);
 719	INIT_LIST_HEAD(&node->work.entry);
 720	INIT_LIST_HEAD(&node->async_todo);
 721	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 722		     "%d:%d node %d u%016llx c%016llx created\n",
 723		     proc->pid, current->pid, node->debug_id,
 724		     (u64)node->ptr, (u64)node->cookie);
 725
 726	return node;
 727}
 728
 729static struct binder_node *binder_new_node(struct binder_proc *proc,
 730					   struct flat_binder_object *fp)
 731{
 732	struct binder_node *node;
 733	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
 734
 735	if (!new_node)
 736		return NULL;
 737	binder_inner_proc_lock(proc);
 738	node = binder_init_node_ilocked(proc, new_node, fp);
 739	binder_inner_proc_unlock(proc);
 740	if (node != new_node)
 741		/*
 742		 * The node was already added by another thread
 743		 */
 744		kfree(new_node);
 745
 746	return node;
 747}
 748
 749static void binder_free_node(struct binder_node *node)
 750{
 751	kfree(node);
 752	binder_stats_deleted(BINDER_STAT_NODE);
 753}
 754
 755static int binder_inc_node_nilocked(struct binder_node *node, int strong,
 756				    int internal,
 757				    struct list_head *target_list)
 758{
 759	struct binder_proc *proc = node->proc;
 760
 761	assert_spin_locked(&node->lock);
 762	if (proc)
 763		assert_spin_locked(&proc->inner_lock);
 764	if (strong) {
 765		if (internal) {
 766			if (target_list == NULL &&
 767			    node->internal_strong_refs == 0 &&
 768			    !(node->proc &&
 769			      node == node->proc->context->binder_context_mgr_node &&
 770			      node->has_strong_ref)) {
 771				pr_err("invalid inc strong node for %d\n",
 772					node->debug_id);
 773				return -EINVAL;
 774			}
 775			node->internal_strong_refs++;
 776		} else
 777			node->local_strong_refs++;
 778		if (!node->has_strong_ref && target_list) {
 779			struct binder_thread *thread = container_of(target_list,
 780						    struct binder_thread, todo);
 781			binder_dequeue_work_ilocked(&node->work);
 782			BUG_ON(&thread->todo != target_list);
 783			binder_enqueue_deferred_thread_work_ilocked(thread,
 784								   &node->work);
 785		}
 786	} else {
 787		if (!internal)
 788			node->local_weak_refs++;
 789		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
 790			if (target_list == NULL) {
 791				pr_err("invalid inc weak node for %d\n",
 792					node->debug_id);
 793				return -EINVAL;
 794			}
 795			/*
 796			 * See comment above
 797			 */
 798			binder_enqueue_work_ilocked(&node->work, target_list);
 799		}
 800	}
 801	return 0;
 802}
 803
 804static int binder_inc_node(struct binder_node *node, int strong, int internal,
 805			   struct list_head *target_list)
 806{
 807	int ret;
 808
 809	binder_node_inner_lock(node);
 810	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
 811	binder_node_inner_unlock(node);
 812
 813	return ret;
 814}
 815
 816static bool binder_dec_node_nilocked(struct binder_node *node,
 817				     int strong, int internal)
 818{
 819	struct binder_proc *proc = node->proc;
 820
 821	assert_spin_locked(&node->lock);
 822	if (proc)
 823		assert_spin_locked(&proc->inner_lock);
 824	if (strong) {
 825		if (internal)
 826			node->internal_strong_refs--;
 827		else
 828			node->local_strong_refs--;
 829		if (node->local_strong_refs || node->internal_strong_refs)
 830			return false;
 831	} else {
 832		if (!internal)
 833			node->local_weak_refs--;
 834		if (node->local_weak_refs || node->tmp_refs ||
 835				!hlist_empty(&node->refs))
 836			return false;
 837	}
 838
 839	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
 840		if (list_empty(&node->work.entry)) {
 841			binder_enqueue_work_ilocked(&node->work, &proc->todo);
 842			binder_wakeup_proc_ilocked(proc);
 843		}
 844	} else {
 845		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
 846		    !node->local_weak_refs && !node->tmp_refs) {
 847			if (proc) {
 848				binder_dequeue_work_ilocked(&node->work);
 849				rb_erase(&node->rb_node, &proc->nodes);
 850				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 851					     "refless node %d deleted\n",
 852					     node->debug_id);
 853			} else {
 854				BUG_ON(!list_empty(&node->work.entry));
 855				spin_lock(&binder_dead_nodes_lock);
 856				/*
 857				 * tmp_refs could have changed so
 858				 * check it again
 859				 */
 860				if (node->tmp_refs) {
 861					spin_unlock(&binder_dead_nodes_lock);
 862					return false;
 863				}
 864				hlist_del(&node->dead_node);
 865				spin_unlock(&binder_dead_nodes_lock);
 866				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 867					     "dead node %d deleted\n",
 868					     node->debug_id);
 869			}
 870			return true;
 871		}
 872	}
 873	return false;
 874}
 875
 876static void binder_dec_node(struct binder_node *node, int strong, int internal)
 877{
 878	bool free_node;
 879
 880	binder_node_inner_lock(node);
 881	free_node = binder_dec_node_nilocked(node, strong, internal);
 882	binder_node_inner_unlock(node);
 883	if (free_node)
 884		binder_free_node(node);
 885}
 886
 887static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
 888{
 889	/*
 890	 * No call to binder_inc_node() is needed since we
 891	 * don't need to inform userspace of any changes to
 892	 * tmp_refs
 893	 */
 894	node->tmp_refs++;
 895}
 896
 897/**
 898 * binder_inc_node_tmpref() - take a temporary reference on node
 899 * @node:	node to reference
 900 *
 901 * Take reference on node to prevent the node from being freed
 902 * while referenced only by a local variable. The inner lock is
 903 * needed to serialize with the node work on the queue (which
 904 * isn't needed after the node is dead). If the node is dead
 905 * (node->proc is NULL), use binder_dead_nodes_lock to protect
 906 * node->tmp_refs against dead-node-only cases where the node
 907 * lock cannot be acquired (eg traversing the dead node list to
 908 * print nodes)
 909 */
 910static void binder_inc_node_tmpref(struct binder_node *node)
 911{
 912	binder_node_lock(node);
 913	if (node->proc)
 914		binder_inner_proc_lock(node->proc);
 915	else
 916		spin_lock(&binder_dead_nodes_lock);
 917	binder_inc_node_tmpref_ilocked(node);
 918	if (node->proc)
 919		binder_inner_proc_unlock(node->proc);
 920	else
 921		spin_unlock(&binder_dead_nodes_lock);
 922	binder_node_unlock(node);
 923}
 924
 925/**
 926 * binder_dec_node_tmpref() - remove a temporary reference on node
 927 * @node:	node to reference
 928 *
 929 * Release temporary reference on node taken via binder_inc_node_tmpref()
 930 */
 931static void binder_dec_node_tmpref(struct binder_node *node)
 932{
 933	bool free_node;
 934
 935	binder_node_inner_lock(node);
 936	if (!node->proc)
 937		spin_lock(&binder_dead_nodes_lock);
 938	else
 939		__acquire(&binder_dead_nodes_lock);
 940	node->tmp_refs--;
 941	BUG_ON(node->tmp_refs < 0);
 942	if (!node->proc)
 943		spin_unlock(&binder_dead_nodes_lock);
 944	else
 945		__release(&binder_dead_nodes_lock);
 946	/*
 947	 * Call binder_dec_node() to check if all refcounts are 0
 948	 * and cleanup is needed. Calling with strong=0 and internal=1
 949	 * causes no actual reference to be released in binder_dec_node().
 950	 * If that changes, a change is needed here too.
 951	 */
 952	free_node = binder_dec_node_nilocked(node, 0, 1);
 953	binder_node_inner_unlock(node);
 954	if (free_node)
 955		binder_free_node(node);
 956}
 957
 958static void binder_put_node(struct binder_node *node)
 959{
 960	binder_dec_node_tmpref(node);
 961}
 962
 963static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
 964						 u32 desc, bool need_strong_ref)
 965{
 966	struct rb_node *n = proc->refs_by_desc.rb_node;
 967	struct binder_ref *ref;
 968
 969	while (n) {
 970		ref = rb_entry(n, struct binder_ref, rb_node_desc);
 971
 972		if (desc < ref->data.desc) {
 973			n = n->rb_left;
 974		} else if (desc > ref->data.desc) {
 975			n = n->rb_right;
 976		} else if (need_strong_ref && !ref->data.strong) {
 977			binder_user_error("tried to use weak ref as strong ref\n");
 978			return NULL;
 979		} else {
 980			return ref;
 981		}
 982	}
 983	return NULL;
 984}
 985
 986/**
 987 * binder_get_ref_for_node_olocked() - get the ref associated with given node
 988 * @proc:	binder_proc that owns the ref
 989 * @node:	binder_node of target
 990 * @new_ref:	newly allocated binder_ref to be initialized or %NULL
 991 *
 992 * Look up the ref for the given node and return it if it exists
 993 *
 994 * If it doesn't exist and the caller provides a newly allocated
 995 * ref, initialize the fields of the newly allocated ref and insert
 996 * into the given proc rb_trees and node refs list.
 997 *
 998 * Return:	the ref for node. It is possible that another thread
 999 *		allocated/initialized the ref first in which case the
1000 *		returned ref would be different than the passed-in
1001 *		new_ref. new_ref must be kfree'd by the caller in
1002 *		this case.
1003 */
1004static struct binder_ref *binder_get_ref_for_node_olocked(
1005					struct binder_proc *proc,
1006					struct binder_node *node,
1007					struct binder_ref *new_ref)
1008{
1009	struct binder_context *context = proc->context;
1010	struct rb_node **p = &proc->refs_by_node.rb_node;
1011	struct rb_node *parent = NULL;
1012	struct binder_ref *ref;
1013	struct rb_node *n;
1014
1015	while (*p) {
1016		parent = *p;
1017		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018
1019		if (node < ref->node)
1020			p = &(*p)->rb_left;
1021		else if (node > ref->node)
1022			p = &(*p)->rb_right;
1023		else
1024			return ref;
1025	}
1026	if (!new_ref)
1027		return NULL;
1028
1029	binder_stats_created(BINDER_STAT_REF);
1030	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031	new_ref->proc = proc;
1032	new_ref->node = node;
1033	rb_link_node(&new_ref->rb_node_node, parent, p);
1034	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035
1036	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039		if (ref->data.desc > new_ref->data.desc)
1040			break;
1041		new_ref->data.desc = ref->data.desc + 1;
1042	}
1043
1044	p = &proc->refs_by_desc.rb_node;
1045	while (*p) {
1046		parent = *p;
1047		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048
1049		if (new_ref->data.desc < ref->data.desc)
1050			p = &(*p)->rb_left;
1051		else if (new_ref->data.desc > ref->data.desc)
1052			p = &(*p)->rb_right;
1053		else
1054			BUG();
1055	}
1056	rb_link_node(&new_ref->rb_node_desc, parent, p);
1057	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058
1059	binder_node_lock(node);
1060	hlist_add_head(&new_ref->node_entry, &node->refs);
1061
1062	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063		     "%d new ref %d desc %d for node %d\n",
1064		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065		      node->debug_id);
1066	binder_node_unlock(node);
1067	return new_ref;
1068}
1069
1070static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071{
1072	bool delete_node = false;
1073
1074	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075		     "%d delete ref %d desc %d for node %d\n",
1076		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077		      ref->node->debug_id);
1078
1079	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081
1082	binder_node_inner_lock(ref->node);
1083	if (ref->data.strong)
1084		binder_dec_node_nilocked(ref->node, 1, 1);
1085
1086	hlist_del(&ref->node_entry);
1087	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088	binder_node_inner_unlock(ref->node);
1089	/*
1090	 * Clear ref->node unless we want the caller to free the node
1091	 */
1092	if (!delete_node) {
1093		/*
1094		 * The caller uses ref->node to determine
1095		 * whether the node needs to be freed. Clear
1096		 * it since the node is still alive.
1097		 */
1098		ref->node = NULL;
1099	}
1100
1101	if (ref->death) {
1102		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103			     "%d delete ref %d desc %d has death notification\n",
1104			      ref->proc->pid, ref->data.debug_id,
1105			      ref->data.desc);
1106		binder_dequeue_work(ref->proc, &ref->death->work);
1107		binder_stats_deleted(BINDER_STAT_DEATH);
1108	}
1109	binder_stats_deleted(BINDER_STAT_REF);
1110}
1111
1112/**
1113 * binder_inc_ref_olocked() - increment the ref for given handle
1114 * @ref:         ref to be incremented
1115 * @strong:      if true, strong increment, else weak
1116 * @target_list: list to queue node work on
1117 *
1118 * Increment the ref. @ref->proc->outer_lock must be held on entry
1119 *
1120 * Return: 0, if successful, else errno
1121 */
1122static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123				  struct list_head *target_list)
1124{
1125	int ret;
1126
1127	if (strong) {
1128		if (ref->data.strong == 0) {
1129			ret = binder_inc_node(ref->node, 1, 1, target_list);
1130			if (ret)
1131				return ret;
1132		}
1133		ref->data.strong++;
1134	} else {
1135		if (ref->data.weak == 0) {
1136			ret = binder_inc_node(ref->node, 0, 1, target_list);
1137			if (ret)
1138				return ret;
1139		}
1140		ref->data.weak++;
1141	}
1142	return 0;
1143}
1144
1145/**
1146 * binder_dec_ref() - dec the ref for given handle
1147 * @ref:	ref to be decremented
1148 * @strong:	if true, strong decrement, else weak
1149 *
1150 * Decrement the ref.
1151 *
1152 * Return: true if ref is cleaned up and ready to be freed
1153 */
1154static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155{
1156	if (strong) {
1157		if (ref->data.strong == 0) {
1158			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159					  ref->proc->pid, ref->data.debug_id,
1160					  ref->data.desc, ref->data.strong,
1161					  ref->data.weak);
1162			return false;
1163		}
1164		ref->data.strong--;
1165		if (ref->data.strong == 0)
1166			binder_dec_node(ref->node, strong, 1);
1167	} else {
1168		if (ref->data.weak == 0) {
1169			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170					  ref->proc->pid, ref->data.debug_id,
1171					  ref->data.desc, ref->data.strong,
1172					  ref->data.weak);
1173			return false;
1174		}
1175		ref->data.weak--;
1176	}
1177	if (ref->data.strong == 0 && ref->data.weak == 0) {
1178		binder_cleanup_ref_olocked(ref);
1179		return true;
1180	}
1181	return false;
1182}
1183
1184/**
1185 * binder_get_node_from_ref() - get the node from the given proc/desc
1186 * @proc:	proc containing the ref
1187 * @desc:	the handle associated with the ref
1188 * @need_strong_ref: if true, only return node if ref is strong
1189 * @rdata:	the id/refcount data for the ref
1190 *
1191 * Given a proc and ref handle, return the associated binder_node
1192 *
1193 * Return: a binder_node or NULL if not found or not strong when strong required
1194 */
1195static struct binder_node *binder_get_node_from_ref(
1196		struct binder_proc *proc,
1197		u32 desc, bool need_strong_ref,
1198		struct binder_ref_data *rdata)
1199{
1200	struct binder_node *node;
1201	struct binder_ref *ref;
1202
1203	binder_proc_lock(proc);
1204	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205	if (!ref)
1206		goto err_no_ref;
1207	node = ref->node;
1208	/*
1209	 * Take an implicit reference on the node to ensure
1210	 * it stays alive until the call to binder_put_node()
1211	 */
1212	binder_inc_node_tmpref(node);
1213	if (rdata)
1214		*rdata = ref->data;
1215	binder_proc_unlock(proc);
1216
1217	return node;
1218
1219err_no_ref:
1220	binder_proc_unlock(proc);
1221	return NULL;
1222}
1223
1224/**
1225 * binder_free_ref() - free the binder_ref
1226 * @ref:	ref to free
1227 *
1228 * Free the binder_ref. Free the binder_node indicated by ref->node
1229 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230 */
1231static void binder_free_ref(struct binder_ref *ref)
1232{
1233	if (ref->node)
1234		binder_free_node(ref->node);
1235	kfree(ref->death);
1236	kfree(ref);
1237}
1238
1239/**
1240 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241 * @proc:	proc containing the ref
1242 * @desc:	the handle associated with the ref
1243 * @increment:	true=inc reference, false=dec reference
1244 * @strong:	true=strong reference, false=weak reference
1245 * @rdata:	the id/refcount data for the ref
1246 *
1247 * Given a proc and ref handle, increment or decrement the ref
1248 * according to "increment" arg.
1249 *
1250 * Return: 0 if successful, else errno
1251 */
1252static int binder_update_ref_for_handle(struct binder_proc *proc,
1253		uint32_t desc, bool increment, bool strong,
1254		struct binder_ref_data *rdata)
1255{
1256	int ret = 0;
1257	struct binder_ref *ref;
1258	bool delete_ref = false;
1259
1260	binder_proc_lock(proc);
1261	ref = binder_get_ref_olocked(proc, desc, strong);
1262	if (!ref) {
1263		ret = -EINVAL;
1264		goto err_no_ref;
1265	}
1266	if (increment)
1267		ret = binder_inc_ref_olocked(ref, strong, NULL);
1268	else
1269		delete_ref = binder_dec_ref_olocked(ref, strong);
1270
1271	if (rdata)
1272		*rdata = ref->data;
1273	binder_proc_unlock(proc);
1274
1275	if (delete_ref)
1276		binder_free_ref(ref);
1277	return ret;
1278
1279err_no_ref:
1280	binder_proc_unlock(proc);
1281	return ret;
1282}
1283
1284/**
1285 * binder_dec_ref_for_handle() - dec the ref for given handle
1286 * @proc:	proc containing the ref
1287 * @desc:	the handle associated with the ref
1288 * @strong:	true=strong reference, false=weak reference
1289 * @rdata:	the id/refcount data for the ref
1290 *
1291 * Just calls binder_update_ref_for_handle() to decrement the ref.
1292 *
1293 * Return: 0 if successful, else errno
1294 */
1295static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297{
1298	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299}
1300
1301
1302/**
1303 * binder_inc_ref_for_node() - increment the ref for given proc/node
1304 * @proc:	 proc containing the ref
1305 * @node:	 target node
1306 * @strong:	 true=strong reference, false=weak reference
1307 * @target_list: worklist to use if node is incremented
1308 * @rdata:	 the id/refcount data for the ref
1309 *
1310 * Given a proc and node, increment the ref. Create the ref if it
1311 * doesn't already exist
1312 *
1313 * Return: 0 if successful, else errno
1314 */
1315static int binder_inc_ref_for_node(struct binder_proc *proc,
1316			struct binder_node *node,
1317			bool strong,
1318			struct list_head *target_list,
1319			struct binder_ref_data *rdata)
1320{
1321	struct binder_ref *ref;
1322	struct binder_ref *new_ref = NULL;
1323	int ret = 0;
1324
1325	binder_proc_lock(proc);
1326	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327	if (!ref) {
1328		binder_proc_unlock(proc);
1329		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330		if (!new_ref)
1331			return -ENOMEM;
1332		binder_proc_lock(proc);
1333		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334	}
1335	ret = binder_inc_ref_olocked(ref, strong, target_list);
1336	*rdata = ref->data;
1337	binder_proc_unlock(proc);
1338	if (new_ref && ref != new_ref)
1339		/*
1340		 * Another thread created the ref first so
1341		 * free the one we allocated
1342		 */
1343		kfree(new_ref);
1344	return ret;
1345}
1346
1347static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348					   struct binder_transaction *t)
1349{
1350	BUG_ON(!target_thread);
1351	assert_spin_locked(&target_thread->proc->inner_lock);
1352	BUG_ON(target_thread->transaction_stack != t);
1353	BUG_ON(target_thread->transaction_stack->from != target_thread);
1354	target_thread->transaction_stack =
1355		target_thread->transaction_stack->from_parent;
1356	t->from = NULL;
1357}
1358
1359/**
1360 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361 * @thread:	thread to decrement
1362 *
1363 * A thread needs to be kept alive while being used to create or
1364 * handle a transaction. binder_get_txn_from() is used to safely
1365 * extract t->from from a binder_transaction and keep the thread
1366 * indicated by t->from from being freed. When done with that
1367 * binder_thread, this function is called to decrement the
1368 * tmp_ref and free if appropriate (thread has been released
1369 * and no transaction being processed by the driver)
1370 */
1371static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372{
1373	/*
1374	 * atomic is used to protect the counter value while
1375	 * it cannot reach zero or thread->is_dead is false
1376	 */
1377	binder_inner_proc_lock(thread->proc);
1378	atomic_dec(&thread->tmp_ref);
1379	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380		binder_inner_proc_unlock(thread->proc);
1381		binder_free_thread(thread);
1382		return;
1383	}
1384	binder_inner_proc_unlock(thread->proc);
1385}
1386
1387/**
1388 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389 * @proc:	proc to decrement
1390 *
1391 * A binder_proc needs to be kept alive while being used to create or
1392 * handle a transaction. proc->tmp_ref is incremented when
1393 * creating a new transaction or the binder_proc is currently in-use
1394 * by threads that are being released. When done with the binder_proc,
1395 * this function is called to decrement the counter and free the
1396 * proc if appropriate (proc has been released, all threads have
1397 * been released and not currenly in-use to process a transaction).
1398 */
1399static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400{
1401	binder_inner_proc_lock(proc);
1402	proc->tmp_ref--;
1403	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404			!proc->tmp_ref) {
1405		binder_inner_proc_unlock(proc);
1406		binder_free_proc(proc);
1407		return;
1408	}
1409	binder_inner_proc_unlock(proc);
1410}
1411
1412/**
1413 * binder_get_txn_from() - safely extract the "from" thread in transaction
1414 * @t:	binder transaction for t->from
1415 *
1416 * Atomically return the "from" thread and increment the tmp_ref
1417 * count for the thread to ensure it stays alive until
1418 * binder_thread_dec_tmpref() is called.
1419 *
1420 * Return: the value of t->from
1421 */
1422static struct binder_thread *binder_get_txn_from(
1423		struct binder_transaction *t)
1424{
1425	struct binder_thread *from;
1426
1427	spin_lock(&t->lock);
1428	from = t->from;
1429	if (from)
1430		atomic_inc(&from->tmp_ref);
1431	spin_unlock(&t->lock);
1432	return from;
1433}
1434
1435/**
1436 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437 * @t:	binder transaction for t->from
1438 *
1439 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440 * to guarantee that the thread cannot be released while operating on it.
1441 * The caller must call binder_inner_proc_unlock() to release the inner lock
1442 * as well as call binder_dec_thread_txn() to release the reference.
1443 *
1444 * Return: the value of t->from
1445 */
1446static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447		struct binder_transaction *t)
1448	__acquires(&t->from->proc->inner_lock)
1449{
1450	struct binder_thread *from;
1451
1452	from = binder_get_txn_from(t);
1453	if (!from) {
1454		__acquire(&from->proc->inner_lock);
1455		return NULL;
1456	}
1457	binder_inner_proc_lock(from->proc);
1458	if (t->from) {
1459		BUG_ON(from != t->from);
1460		return from;
1461	}
1462	binder_inner_proc_unlock(from->proc);
1463	__acquire(&from->proc->inner_lock);
1464	binder_thread_dec_tmpref(from);
1465	return NULL;
1466}
1467
1468/**
1469 * binder_free_txn_fixups() - free unprocessed fd fixups
1470 * @t:	binder transaction for t->from
1471 *
1472 * If the transaction is being torn down prior to being
1473 * processed by the target process, free all of the
1474 * fd fixups and fput the file structs. It is safe to
1475 * call this function after the fixups have been
1476 * processed -- in that case, the list will be empty.
1477 */
1478static void binder_free_txn_fixups(struct binder_transaction *t)
1479{
1480	struct binder_txn_fd_fixup *fixup, *tmp;
1481
1482	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483		fput(fixup->file);
1484		list_del(&fixup->fixup_entry);
1485		kfree(fixup);
1486	}
1487}
1488
1489static void binder_txn_latency_free(struct binder_transaction *t)
1490{
1491	int from_proc, from_thread, to_proc, to_thread;
1492
1493	spin_lock(&t->lock);
1494	from_proc = t->from ? t->from->proc->pid : 0;
1495	from_thread = t->from ? t->from->pid : 0;
1496	to_proc = t->to_proc ? t->to_proc->pid : 0;
1497	to_thread = t->to_thread ? t->to_thread->pid : 0;
1498	spin_unlock(&t->lock);
1499
1500	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1501}
1502
1503static void binder_free_transaction(struct binder_transaction *t)
1504{
1505	struct binder_proc *target_proc = t->to_proc;
1506
1507	if (target_proc) {
1508		binder_inner_proc_lock(target_proc);
1509		target_proc->outstanding_txns--;
1510		if (target_proc->outstanding_txns < 0)
1511			pr_warn("%s: Unexpected outstanding_txns %d\n",
1512				__func__, target_proc->outstanding_txns);
1513		if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514			wake_up_interruptible_all(&target_proc->freeze_wait);
1515		if (t->buffer)
1516			t->buffer->transaction = NULL;
1517		binder_inner_proc_unlock(target_proc);
1518	}
1519	if (trace_binder_txn_latency_free_enabled())
1520		binder_txn_latency_free(t);
1521	/*
1522	 * If the transaction has no target_proc, then
1523	 * t->buffer->transaction has already been cleared.
1524	 */
1525	binder_free_txn_fixups(t);
1526	kfree(t);
1527	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1528}
1529
1530static void binder_send_failed_reply(struct binder_transaction *t,
1531				     uint32_t error_code)
1532{
1533	struct binder_thread *target_thread;
1534	struct binder_transaction *next;
1535
1536	BUG_ON(t->flags & TF_ONE_WAY);
1537	while (1) {
1538		target_thread = binder_get_txn_from_and_acq_inner(t);
1539		if (target_thread) {
1540			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541				     "send failed reply for transaction %d to %d:%d\n",
1542				      t->debug_id,
1543				      target_thread->proc->pid,
1544				      target_thread->pid);
1545
1546			binder_pop_transaction_ilocked(target_thread, t);
1547			if (target_thread->reply_error.cmd == BR_OK) {
1548				target_thread->reply_error.cmd = error_code;
1549				binder_enqueue_thread_work_ilocked(
1550					target_thread,
1551					&target_thread->reply_error.work);
1552				wake_up_interruptible(&target_thread->wait);
1553			} else {
1554				/*
1555				 * Cannot get here for normal operation, but
1556				 * we can if multiple synchronous transactions
1557				 * are sent without blocking for responses.
1558				 * Just ignore the 2nd error in this case.
1559				 */
1560				pr_warn("Unexpected reply error: %u\n",
1561					target_thread->reply_error.cmd);
1562			}
1563			binder_inner_proc_unlock(target_thread->proc);
1564			binder_thread_dec_tmpref(target_thread);
1565			binder_free_transaction(t);
1566			return;
1567		}
1568		__release(&target_thread->proc->inner_lock);
1569		next = t->from_parent;
1570
1571		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572			     "send failed reply for transaction %d, target dead\n",
1573			     t->debug_id);
1574
1575		binder_free_transaction(t);
1576		if (next == NULL) {
1577			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578				     "reply failed, no target thread at root\n");
1579			return;
1580		}
1581		t = next;
1582		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583			     "reply failed, no target thread -- retry %d\n",
1584			      t->debug_id);
1585	}
1586}
1587
1588/**
1589 * binder_cleanup_transaction() - cleans up undelivered transaction
1590 * @t:		transaction that needs to be cleaned up
1591 * @reason:	reason the transaction wasn't delivered
1592 * @error_code:	error to return to caller (if synchronous call)
1593 */
1594static void binder_cleanup_transaction(struct binder_transaction *t,
1595				       const char *reason,
1596				       uint32_t error_code)
1597{
1598	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599		binder_send_failed_reply(t, error_code);
1600	} else {
1601		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602			"undelivered transaction %d, %s\n",
1603			t->debug_id, reason);
1604		binder_free_transaction(t);
1605	}
1606}
1607
1608/**
1609 * binder_get_object() - gets object and checks for valid metadata
1610 * @proc:	binder_proc owning the buffer
1611 * @buffer:	binder_buffer that we're parsing.
1612 * @offset:	offset in the @buffer at which to validate an object.
1613 * @object:	struct binder_object to read into
1614 *
1615 * Return:	If there's a valid metadata object at @offset in @buffer, the
1616 *		size of that object. Otherwise, it returns zero. The object
1617 *		is read into the struct binder_object pointed to by @object.
1618 */
1619static size_t binder_get_object(struct binder_proc *proc,
1620				struct binder_buffer *buffer,
1621				unsigned long offset,
1622				struct binder_object *object)
1623{
1624	size_t read_size;
1625	struct binder_object_header *hdr;
1626	size_t object_size = 0;
1627
1628	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1629	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1630	    binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1631					  offset, read_size))
1632		return 0;
1633
1634	/* Ok, now see if we read a complete object. */
1635	hdr = &object->hdr;
1636	switch (hdr->type) {
1637	case BINDER_TYPE_BINDER:
1638	case BINDER_TYPE_WEAK_BINDER:
1639	case BINDER_TYPE_HANDLE:
1640	case BINDER_TYPE_WEAK_HANDLE:
1641		object_size = sizeof(struct flat_binder_object);
1642		break;
1643	case BINDER_TYPE_FD:
1644		object_size = sizeof(struct binder_fd_object);
1645		break;
1646	case BINDER_TYPE_PTR:
1647		object_size = sizeof(struct binder_buffer_object);
1648		break;
1649	case BINDER_TYPE_FDA:
1650		object_size = sizeof(struct binder_fd_array_object);
1651		break;
1652	default:
1653		return 0;
1654	}
1655	if (offset <= buffer->data_size - object_size &&
1656	    buffer->data_size >= object_size)
1657		return object_size;
1658	else
1659		return 0;
1660}
1661
1662/**
1663 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1664 * @proc:	binder_proc owning the buffer
1665 * @b:		binder_buffer containing the object
1666 * @object:	struct binder_object to read into
1667 * @index:	index in offset array at which the binder_buffer_object is
1668 *		located
1669 * @start_offset: points to the start of the offset array
1670 * @object_offsetp: offset of @object read from @b
1671 * @num_valid:	the number of valid offsets in the offset array
1672 *
1673 * Return:	If @index is within the valid range of the offset array
1674 *		described by @start and @num_valid, and if there's a valid
1675 *		binder_buffer_object at the offset found in index @index
1676 *		of the offset array, that object is returned. Otherwise,
1677 *		%NULL is returned.
1678 *		Note that the offset found in index @index itself is not
1679 *		verified; this function assumes that @num_valid elements
1680 *		from @start were previously verified to have valid offsets.
1681 *		If @object_offsetp is non-NULL, then the offset within
1682 *		@b is written to it.
1683 */
1684static struct binder_buffer_object *binder_validate_ptr(
1685						struct binder_proc *proc,
1686						struct binder_buffer *b,
1687						struct binder_object *object,
1688						binder_size_t index,
1689						binder_size_t start_offset,
1690						binder_size_t *object_offsetp,
1691						binder_size_t num_valid)
1692{
1693	size_t object_size;
1694	binder_size_t object_offset;
1695	unsigned long buffer_offset;
1696
1697	if (index >= num_valid)
1698		return NULL;
1699
1700	buffer_offset = start_offset + sizeof(binder_size_t) * index;
1701	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1702					  b, buffer_offset,
1703					  sizeof(object_offset)))
1704		return NULL;
1705	object_size = binder_get_object(proc, b, object_offset, object);
1706	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1707		return NULL;
1708	if (object_offsetp)
1709		*object_offsetp = object_offset;
1710
1711	return &object->bbo;
1712}
1713
1714/**
1715 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1716 * @proc:		binder_proc owning the buffer
1717 * @b:			transaction buffer
1718 * @objects_start_offset: offset to start of objects buffer
1719 * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
1720 * @fixup_offset:	start offset in @buffer to fix up
1721 * @last_obj_offset:	offset to last binder_buffer_object that we fixed
1722 * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
1723 *
1724 * Return:		%true if a fixup in buffer @buffer at offset @offset is
1725 *			allowed.
1726 *
1727 * For safety reasons, we only allow fixups inside a buffer to happen
1728 * at increasing offsets; additionally, we only allow fixup on the last
1729 * buffer object that was verified, or one of its parents.
1730 *
1731 * Example of what is allowed:
1732 *
1733 * A
1734 *   B (parent = A, offset = 0)
1735 *   C (parent = A, offset = 16)
1736 *     D (parent = C, offset = 0)
1737 *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1738 *
1739 * Examples of what is not allowed:
1740 *
1741 * Decreasing offsets within the same parent:
1742 * A
1743 *   C (parent = A, offset = 16)
1744 *   B (parent = A, offset = 0) // decreasing offset within A
1745 *
1746 * Referring to a parent that wasn't the last object or any of its parents:
1747 * A
1748 *   B (parent = A, offset = 0)
1749 *   C (parent = A, offset = 0)
1750 *   C (parent = A, offset = 16)
1751 *     D (parent = B, offset = 0) // B is not A or any of A's parents
1752 */
1753static bool binder_validate_fixup(struct binder_proc *proc,
1754				  struct binder_buffer *b,
1755				  binder_size_t objects_start_offset,
1756				  binder_size_t buffer_obj_offset,
1757				  binder_size_t fixup_offset,
1758				  binder_size_t last_obj_offset,
1759				  binder_size_t last_min_offset)
1760{
1761	if (!last_obj_offset) {
1762		/* Nothing to fix up in */
1763		return false;
1764	}
1765
1766	while (last_obj_offset != buffer_obj_offset) {
1767		unsigned long buffer_offset;
1768		struct binder_object last_object;
1769		struct binder_buffer_object *last_bbo;
1770		size_t object_size = binder_get_object(proc, b, last_obj_offset,
1771						       &last_object);
1772		if (object_size != sizeof(*last_bbo))
1773			return false;
1774
1775		last_bbo = &last_object.bbo;
1776		/*
1777		 * Safe to retrieve the parent of last_obj, since it
1778		 * was already previously verified by the driver.
1779		 */
1780		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1781			return false;
1782		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1783		buffer_offset = objects_start_offset +
1784			sizeof(binder_size_t) * last_bbo->parent;
1785		if (binder_alloc_copy_from_buffer(&proc->alloc,
1786						  &last_obj_offset,
1787						  b, buffer_offset,
1788						  sizeof(last_obj_offset)))
1789			return false;
1790	}
1791	return (fixup_offset >= last_min_offset);
1792}
1793
1794/**
1795 * struct binder_task_work_cb - for deferred close
1796 *
1797 * @twork:                callback_head for task work
1798 * @fd:                   fd to close
1799 *
1800 * Structure to pass task work to be handled after
1801 * returning from binder_ioctl() via task_work_add().
1802 */
1803struct binder_task_work_cb {
1804	struct callback_head twork;
1805	struct file *file;
1806};
1807
1808/**
1809 * binder_do_fd_close() - close list of file descriptors
1810 * @twork:	callback head for task work
1811 *
1812 * It is not safe to call ksys_close() during the binder_ioctl()
1813 * function if there is a chance that binder's own file descriptor
1814 * might be closed. This is to meet the requirements for using
1815 * fdget() (see comments for __fget_light()). Therefore use
1816 * task_work_add() to schedule the close operation once we have
1817 * returned from binder_ioctl(). This function is a callback
1818 * for that mechanism and does the actual ksys_close() on the
1819 * given file descriptor.
1820 */
1821static void binder_do_fd_close(struct callback_head *twork)
1822{
1823	struct binder_task_work_cb *twcb = container_of(twork,
1824			struct binder_task_work_cb, twork);
1825
1826	fput(twcb->file);
1827	kfree(twcb);
1828}
1829
1830/**
1831 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1832 * @fd:		file-descriptor to close
1833 *
1834 * See comments in binder_do_fd_close(). This function is used to schedule
1835 * a file-descriptor to be closed after returning from binder_ioctl().
1836 */
1837static void binder_deferred_fd_close(int fd)
1838{
1839	struct binder_task_work_cb *twcb;
1840
1841	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1842	if (!twcb)
1843		return;
1844	init_task_work(&twcb->twork, binder_do_fd_close);
1845	close_fd_get_file(fd, &twcb->file);
1846	if (twcb->file) {
1847		filp_close(twcb->file, current->files);
1848		task_work_add(current, &twcb->twork, TWA_RESUME);
1849	} else {
1850		kfree(twcb);
1851	}
1852}
1853
1854static void binder_transaction_buffer_release(struct binder_proc *proc,
1855					      struct binder_thread *thread,
1856					      struct binder_buffer *buffer,
1857					      binder_size_t failed_at,
1858					      bool is_failure)
1859{
1860	int debug_id = buffer->debug_id;
1861	binder_size_t off_start_offset, buffer_offset, off_end_offset;
1862
1863	binder_debug(BINDER_DEBUG_TRANSACTION,
1864		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1865		     proc->pid, buffer->debug_id,
1866		     buffer->data_size, buffer->offsets_size,
1867		     (unsigned long long)failed_at);
1868
1869	if (buffer->target_node)
1870		binder_dec_node(buffer->target_node, 1, 0);
1871
1872	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1873	off_end_offset = is_failure ? failed_at :
1874				off_start_offset + buffer->offsets_size;
1875	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1876	     buffer_offset += sizeof(binder_size_t)) {
1877		struct binder_object_header *hdr;
1878		size_t object_size = 0;
1879		struct binder_object object;
1880		binder_size_t object_offset;
1881
1882		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1883						   buffer, buffer_offset,
1884						   sizeof(object_offset)))
1885			object_size = binder_get_object(proc, buffer,
1886							object_offset, &object);
1887		if (object_size == 0) {
1888			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1889			       debug_id, (u64)object_offset, buffer->data_size);
1890			continue;
1891		}
1892		hdr = &object.hdr;
1893		switch (hdr->type) {
1894		case BINDER_TYPE_BINDER:
1895		case BINDER_TYPE_WEAK_BINDER: {
1896			struct flat_binder_object *fp;
1897			struct binder_node *node;
1898
1899			fp = to_flat_binder_object(hdr);
1900			node = binder_get_node(proc, fp->binder);
1901			if (node == NULL) {
1902				pr_err("transaction release %d bad node %016llx\n",
1903				       debug_id, (u64)fp->binder);
1904				break;
1905			}
1906			binder_debug(BINDER_DEBUG_TRANSACTION,
1907				     "        node %d u%016llx\n",
1908				     node->debug_id, (u64)node->ptr);
1909			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1910					0);
1911			binder_put_node(node);
1912		} break;
1913		case BINDER_TYPE_HANDLE:
1914		case BINDER_TYPE_WEAK_HANDLE: {
1915			struct flat_binder_object *fp;
1916			struct binder_ref_data rdata;
1917			int ret;
1918
1919			fp = to_flat_binder_object(hdr);
1920			ret = binder_dec_ref_for_handle(proc, fp->handle,
1921				hdr->type == BINDER_TYPE_HANDLE, &rdata);
1922
1923			if (ret) {
1924				pr_err("transaction release %d bad handle %d, ret = %d\n",
1925				 debug_id, fp->handle, ret);
1926				break;
1927			}
1928			binder_debug(BINDER_DEBUG_TRANSACTION,
1929				     "        ref %d desc %d\n",
1930				     rdata.debug_id, rdata.desc);
1931		} break;
1932
1933		case BINDER_TYPE_FD: {
1934			/*
1935			 * No need to close the file here since user-space
1936			 * closes it for for successfully delivered
1937			 * transactions. For transactions that weren't
1938			 * delivered, the new fd was never allocated so
1939			 * there is no need to close and the fput on the
1940			 * file is done when the transaction is torn
1941			 * down.
1942			 */
 
 
1943		} break;
1944		case BINDER_TYPE_PTR:
1945			/*
1946			 * Nothing to do here, this will get cleaned up when the
1947			 * transaction buffer gets freed
1948			 */
1949			break;
1950		case BINDER_TYPE_FDA: {
1951			struct binder_fd_array_object *fda;
1952			struct binder_buffer_object *parent;
1953			struct binder_object ptr_object;
1954			binder_size_t fda_offset;
1955			size_t fd_index;
1956			binder_size_t fd_buf_size;
1957			binder_size_t num_valid;
1958
1959			if (proc->tsk != current->group_leader) {
1960				/*
1961				 * Nothing to do if running in sender context
1962				 * The fd fixups have not been applied so no
1963				 * fds need to be closed.
1964				 */
1965				continue;
1966			}
1967
1968			num_valid = (buffer_offset - off_start_offset) /
1969						sizeof(binder_size_t);
1970			fda = to_binder_fd_array_object(hdr);
1971			parent = binder_validate_ptr(proc, buffer, &ptr_object,
1972						     fda->parent,
1973						     off_start_offset,
1974						     NULL,
1975						     num_valid);
1976			if (!parent) {
1977				pr_err("transaction release %d bad parent offset\n",
1978				       debug_id);
1979				continue;
1980			}
1981			fd_buf_size = sizeof(u32) * fda->num_fds;
1982			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1983				pr_err("transaction release %d invalid number of fds (%lld)\n",
1984				       debug_id, (u64)fda->num_fds);
1985				continue;
1986			}
1987			if (fd_buf_size > parent->length ||
1988			    fda->parent_offset > parent->length - fd_buf_size) {
1989				/* No space for all file descriptors here. */
1990				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1991				       debug_id, (u64)fda->num_fds);
1992				continue;
1993			}
1994			/*
1995			 * the source data for binder_buffer_object is visible
1996			 * to user-space and the @buffer element is the user
1997			 * pointer to the buffer_object containing the fd_array.
1998			 * Convert the address to an offset relative to
1999			 * the base of the transaction buffer.
2000			 */
2001			fda_offset =
2002			    (parent->buffer - (uintptr_t)buffer->user_data) +
2003			    fda->parent_offset;
2004			for (fd_index = 0; fd_index < fda->num_fds;
2005			     fd_index++) {
2006				u32 fd;
2007				int err;
2008				binder_size_t offset = fda_offset +
2009					fd_index * sizeof(fd);
2010
2011				err = binder_alloc_copy_from_buffer(
2012						&proc->alloc, &fd, buffer,
2013						offset, sizeof(fd));
2014				WARN_ON(err);
2015				if (!err) {
2016					binder_deferred_fd_close(fd);
2017					/*
2018					 * Need to make sure the thread goes
2019					 * back to userspace to complete the
2020					 * deferred close
2021					 */
2022					if (thread)
2023						thread->looper_need_return = true;
2024				}
2025			}
2026		} break;
2027		default:
2028			pr_err("transaction release %d bad object type %x\n",
2029				debug_id, hdr->type);
2030			break;
2031		}
2032	}
2033}
2034
2035static int binder_translate_binder(struct flat_binder_object *fp,
2036				   struct binder_transaction *t,
2037				   struct binder_thread *thread)
2038{
2039	struct binder_node *node;
2040	struct binder_proc *proc = thread->proc;
2041	struct binder_proc *target_proc = t->to_proc;
2042	struct binder_ref_data rdata;
2043	int ret = 0;
2044
2045	node = binder_get_node(proc, fp->binder);
2046	if (!node) {
2047		node = binder_new_node(proc, fp);
2048		if (!node)
2049			return -ENOMEM;
2050	}
2051	if (fp->cookie != node->cookie) {
2052		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2053				  proc->pid, thread->pid, (u64)fp->binder,
2054				  node->debug_id, (u64)fp->cookie,
2055				  (u64)node->cookie);
2056		ret = -EINVAL;
2057		goto done;
2058	}
2059	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2060		ret = -EPERM;
2061		goto done;
2062	}
2063
2064	ret = binder_inc_ref_for_node(target_proc, node,
2065			fp->hdr.type == BINDER_TYPE_BINDER,
2066			&thread->todo, &rdata);
2067	if (ret)
2068		goto done;
2069
2070	if (fp->hdr.type == BINDER_TYPE_BINDER)
2071		fp->hdr.type = BINDER_TYPE_HANDLE;
2072	else
2073		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2074	fp->binder = 0;
2075	fp->handle = rdata.desc;
2076	fp->cookie = 0;
2077
2078	trace_binder_transaction_node_to_ref(t, node, &rdata);
2079	binder_debug(BINDER_DEBUG_TRANSACTION,
2080		     "        node %d u%016llx -> ref %d desc %d\n",
2081		     node->debug_id, (u64)node->ptr,
2082		     rdata.debug_id, rdata.desc);
2083done:
2084	binder_put_node(node);
2085	return ret;
2086}
2087
2088static int binder_translate_handle(struct flat_binder_object *fp,
2089				   struct binder_transaction *t,
2090				   struct binder_thread *thread)
2091{
2092	struct binder_proc *proc = thread->proc;
2093	struct binder_proc *target_proc = t->to_proc;
2094	struct binder_node *node;
2095	struct binder_ref_data src_rdata;
2096	int ret = 0;
2097
2098	node = binder_get_node_from_ref(proc, fp->handle,
2099			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2100	if (!node) {
2101		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2102				  proc->pid, thread->pid, fp->handle);
2103		return -EINVAL;
2104	}
2105	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2106		ret = -EPERM;
2107		goto done;
2108	}
2109
2110	binder_node_lock(node);
2111	if (node->proc == target_proc) {
2112		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2113			fp->hdr.type = BINDER_TYPE_BINDER;
2114		else
2115			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2116		fp->binder = node->ptr;
2117		fp->cookie = node->cookie;
2118		if (node->proc)
2119			binder_inner_proc_lock(node->proc);
2120		else
2121			__acquire(&node->proc->inner_lock);
2122		binder_inc_node_nilocked(node,
2123					 fp->hdr.type == BINDER_TYPE_BINDER,
2124					 0, NULL);
2125		if (node->proc)
2126			binder_inner_proc_unlock(node->proc);
2127		else
2128			__release(&node->proc->inner_lock);
2129		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2130		binder_debug(BINDER_DEBUG_TRANSACTION,
2131			     "        ref %d desc %d -> node %d u%016llx\n",
2132			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2133			     (u64)node->ptr);
2134		binder_node_unlock(node);
2135	} else {
2136		struct binder_ref_data dest_rdata;
2137
2138		binder_node_unlock(node);
2139		ret = binder_inc_ref_for_node(target_proc, node,
2140				fp->hdr.type == BINDER_TYPE_HANDLE,
2141				NULL, &dest_rdata);
2142		if (ret)
2143			goto done;
2144
2145		fp->binder = 0;
2146		fp->handle = dest_rdata.desc;
2147		fp->cookie = 0;
2148		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2149						    &dest_rdata);
2150		binder_debug(BINDER_DEBUG_TRANSACTION,
2151			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2152			     src_rdata.debug_id, src_rdata.desc,
2153			     dest_rdata.debug_id, dest_rdata.desc,
2154			     node->debug_id);
2155	}
2156done:
2157	binder_put_node(node);
2158	return ret;
2159}
2160
2161static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2162			       struct binder_transaction *t,
2163			       struct binder_thread *thread,
2164			       struct binder_transaction *in_reply_to)
2165{
2166	struct binder_proc *proc = thread->proc;
2167	struct binder_proc *target_proc = t->to_proc;
2168	struct binder_txn_fd_fixup *fixup;
2169	struct file *file;
2170	int ret = 0;
2171	bool target_allows_fd;
2172
2173	if (in_reply_to)
2174		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2175	else
2176		target_allows_fd = t->buffer->target_node->accept_fds;
2177	if (!target_allows_fd) {
2178		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2179				  proc->pid, thread->pid,
2180				  in_reply_to ? "reply" : "transaction",
2181				  fd);
2182		ret = -EPERM;
2183		goto err_fd_not_accepted;
2184	}
2185
2186	file = fget(fd);
2187	if (!file) {
2188		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2189				  proc->pid, thread->pid, fd);
2190		ret = -EBADF;
2191		goto err_fget;
2192	}
2193	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2194	if (ret < 0) {
2195		ret = -EPERM;
2196		goto err_security;
2197	}
2198
2199	/*
2200	 * Add fixup record for this transaction. The allocation
2201	 * of the fd in the target needs to be done from a
2202	 * target thread.
2203	 */
2204	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2205	if (!fixup) {
2206		ret = -ENOMEM;
2207		goto err_alloc;
2208	}
2209	fixup->file = file;
2210	fixup->offset = fd_offset;
2211	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2212	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2213
2214	return ret;
2215
2216err_alloc:
2217err_security:
2218	fput(file);
2219err_fget:
2220err_fd_not_accepted:
2221	return ret;
2222}
2223
2224static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2225				     struct binder_buffer_object *parent,
2226				     struct binder_transaction *t,
2227				     struct binder_thread *thread,
2228				     struct binder_transaction *in_reply_to)
2229{
2230	binder_size_t fdi, fd_buf_size;
2231	binder_size_t fda_offset;
2232	struct binder_proc *proc = thread->proc;
2233	struct binder_proc *target_proc = t->to_proc;
2234
2235	fd_buf_size = sizeof(u32) * fda->num_fds;
2236	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2237		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2238				  proc->pid, thread->pid, (u64)fda->num_fds);
2239		return -EINVAL;
2240	}
2241	if (fd_buf_size > parent->length ||
2242	    fda->parent_offset > parent->length - fd_buf_size) {
2243		/* No space for all file descriptors here. */
2244		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2245				  proc->pid, thread->pid, (u64)fda->num_fds);
2246		return -EINVAL;
2247	}
2248	/*
2249	 * the source data for binder_buffer_object is visible
2250	 * to user-space and the @buffer element is the user
2251	 * pointer to the buffer_object containing the fd_array.
2252	 * Convert the address to an offset relative to
2253	 * the base of the transaction buffer.
2254	 */
2255	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2256		fda->parent_offset;
2257	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2258		binder_user_error("%d:%d parent offset not aligned correctly.\n",
2259				  proc->pid, thread->pid);
2260		return -EINVAL;
2261	}
2262	for (fdi = 0; fdi < fda->num_fds; fdi++) {
2263		u32 fd;
2264		int ret;
2265		binder_size_t offset = fda_offset + fdi * sizeof(fd);
2266
2267		ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2268						    &fd, t->buffer,
2269						    offset, sizeof(fd));
2270		if (!ret)
2271			ret = binder_translate_fd(fd, offset, t, thread,
2272						  in_reply_to);
2273		if (ret < 0)
2274			return ret;
2275	}
2276	return 0;
2277}
2278
2279static int binder_fixup_parent(struct binder_transaction *t,
2280			       struct binder_thread *thread,
2281			       struct binder_buffer_object *bp,
2282			       binder_size_t off_start_offset,
2283			       binder_size_t num_valid,
2284			       binder_size_t last_fixup_obj_off,
2285			       binder_size_t last_fixup_min_off)
2286{
2287	struct binder_buffer_object *parent;
2288	struct binder_buffer *b = t->buffer;
2289	struct binder_proc *proc = thread->proc;
2290	struct binder_proc *target_proc = t->to_proc;
2291	struct binder_object object;
2292	binder_size_t buffer_offset;
2293	binder_size_t parent_offset;
2294
2295	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2296		return 0;
2297
2298	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2299				     off_start_offset, &parent_offset,
2300				     num_valid);
2301	if (!parent) {
2302		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2303				  proc->pid, thread->pid);
2304		return -EINVAL;
2305	}
2306
2307	if (!binder_validate_fixup(target_proc, b, off_start_offset,
2308				   parent_offset, bp->parent_offset,
2309				   last_fixup_obj_off,
2310				   last_fixup_min_off)) {
2311		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2312				  proc->pid, thread->pid);
2313		return -EINVAL;
2314	}
2315
2316	if (parent->length < sizeof(binder_uintptr_t) ||
2317	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2318		/* No space for a pointer here! */
2319		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2320				  proc->pid, thread->pid);
2321		return -EINVAL;
2322	}
2323	buffer_offset = bp->parent_offset +
2324			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2325	if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2326					&bp->buffer, sizeof(bp->buffer))) {
2327		binder_user_error("%d:%d got transaction with invalid parent offset\n",
2328				  proc->pid, thread->pid);
2329		return -EINVAL;
2330	}
2331
2332	return 0;
2333}
2334
2335/**
2336 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2337 * @t:		transaction to send
2338 * @proc:	process to send the transaction to
2339 * @thread:	thread in @proc to send the transaction to (may be NULL)
2340 *
2341 * This function queues a transaction to the specified process. It will try
2342 * to find a thread in the target process to handle the transaction and
2343 * wake it up. If no thread is found, the work is queued to the proc
2344 * waitqueue.
2345 *
2346 * If the @thread parameter is not NULL, the transaction is always queued
2347 * to the waitlist of that specific thread.
2348 *
2349 * Return:	0 if the transaction was successfully queued
2350 *		BR_DEAD_REPLY if the target process or thread is dead
2351 *		BR_FROZEN_REPLY if the target process or thread is frozen
2352 */
2353static int binder_proc_transaction(struct binder_transaction *t,
2354				    struct binder_proc *proc,
2355				    struct binder_thread *thread)
2356{
2357	struct binder_node *node = t->buffer->target_node;
2358	bool oneway = !!(t->flags & TF_ONE_WAY);
2359	bool pending_async = false;
2360
2361	BUG_ON(!node);
2362	binder_node_lock(node);
2363	if (oneway) {
2364		BUG_ON(thread);
2365		if (node->has_async_transaction)
2366			pending_async = true;
2367		else
2368			node->has_async_transaction = true;
2369	}
2370
2371	binder_inner_proc_lock(proc);
2372	if (proc->is_frozen) {
2373		proc->sync_recv |= !oneway;
2374		proc->async_recv |= oneway;
2375	}
2376
2377	if ((proc->is_frozen && !oneway) || proc->is_dead ||
2378			(thread && thread->is_dead)) {
2379		binder_inner_proc_unlock(proc);
2380		binder_node_unlock(node);
2381		return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2382	}
2383
2384	if (!thread && !pending_async)
2385		thread = binder_select_thread_ilocked(proc);
2386
2387	if (thread)
2388		binder_enqueue_thread_work_ilocked(thread, &t->work);
2389	else if (!pending_async)
2390		binder_enqueue_work_ilocked(&t->work, &proc->todo);
2391	else
2392		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2393
2394	if (!pending_async)
2395		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2396
2397	proc->outstanding_txns++;
2398	binder_inner_proc_unlock(proc);
2399	binder_node_unlock(node);
2400
2401	return 0;
2402}
2403
2404/**
2405 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2406 * @node:         struct binder_node for which to get refs
2407 * @proc:         returns @node->proc if valid
2408 * @error:        if no @proc then returns BR_DEAD_REPLY
2409 *
2410 * User-space normally keeps the node alive when creating a transaction
2411 * since it has a reference to the target. The local strong ref keeps it
2412 * alive if the sending process dies before the target process processes
2413 * the transaction. If the source process is malicious or has a reference
2414 * counting bug, relying on the local strong ref can fail.
2415 *
2416 * Since user-space can cause the local strong ref to go away, we also take
2417 * a tmpref on the node to ensure it survives while we are constructing
2418 * the transaction. We also need a tmpref on the proc while we are
2419 * constructing the transaction, so we take that here as well.
2420 *
2421 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2422 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2423 * target proc has died, @error is set to BR_DEAD_REPLY
2424 */
2425static struct binder_node *binder_get_node_refs_for_txn(
2426		struct binder_node *node,
2427		struct binder_proc **procp,
2428		uint32_t *error)
2429{
2430	struct binder_node *target_node = NULL;
2431
2432	binder_node_inner_lock(node);
2433	if (node->proc) {
2434		target_node = node;
2435		binder_inc_node_nilocked(node, 1, 0, NULL);
2436		binder_inc_node_tmpref_ilocked(node);
2437		node->proc->tmp_ref++;
2438		*procp = node->proc;
2439	} else
2440		*error = BR_DEAD_REPLY;
2441	binder_node_inner_unlock(node);
2442
2443	return target_node;
2444}
2445
2446static void binder_transaction(struct binder_proc *proc,
2447			       struct binder_thread *thread,
2448			       struct binder_transaction_data *tr, int reply,
2449			       binder_size_t extra_buffers_size)
2450{
2451	int ret;
2452	struct binder_transaction *t;
2453	struct binder_work *w;
2454	struct binder_work *tcomplete;
2455	binder_size_t buffer_offset = 0;
2456	binder_size_t off_start_offset, off_end_offset;
2457	binder_size_t off_min;
2458	binder_size_t sg_buf_offset, sg_buf_end_offset;
2459	struct binder_proc *target_proc = NULL;
2460	struct binder_thread *target_thread = NULL;
2461	struct binder_node *target_node = NULL;
2462	struct binder_transaction *in_reply_to = NULL;
2463	struct binder_transaction_log_entry *e;
2464	uint32_t return_error = 0;
2465	uint32_t return_error_param = 0;
2466	uint32_t return_error_line = 0;
2467	binder_size_t last_fixup_obj_off = 0;
2468	binder_size_t last_fixup_min_off = 0;
2469	struct binder_context *context = proc->context;
2470	int t_debug_id = atomic_inc_return(&binder_last_id);
2471	char *secctx = NULL;
2472	u32 secctx_sz = 0;
2473
2474	e = binder_transaction_log_add(&binder_transaction_log);
2475	e->debug_id = t_debug_id;
2476	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2477	e->from_proc = proc->pid;
2478	e->from_thread = thread->pid;
2479	e->target_handle = tr->target.handle;
2480	e->data_size = tr->data_size;
2481	e->offsets_size = tr->offsets_size;
2482	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2483
2484	if (reply) {
2485		binder_inner_proc_lock(proc);
2486		in_reply_to = thread->transaction_stack;
2487		if (in_reply_to == NULL) {
2488			binder_inner_proc_unlock(proc);
2489			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2490					  proc->pid, thread->pid);
2491			return_error = BR_FAILED_REPLY;
2492			return_error_param = -EPROTO;
2493			return_error_line = __LINE__;
2494			goto err_empty_call_stack;
2495		}
2496		if (in_reply_to->to_thread != thread) {
2497			spin_lock(&in_reply_to->lock);
2498			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2499				proc->pid, thread->pid, in_reply_to->debug_id,
2500				in_reply_to->to_proc ?
2501				in_reply_to->to_proc->pid : 0,
2502				in_reply_to->to_thread ?
2503				in_reply_to->to_thread->pid : 0);
2504			spin_unlock(&in_reply_to->lock);
2505			binder_inner_proc_unlock(proc);
2506			return_error = BR_FAILED_REPLY;
2507			return_error_param = -EPROTO;
2508			return_error_line = __LINE__;
2509			in_reply_to = NULL;
2510			goto err_bad_call_stack;
2511		}
2512		thread->transaction_stack = in_reply_to->to_parent;
2513		binder_inner_proc_unlock(proc);
2514		binder_set_nice(in_reply_to->saved_priority);
2515		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2516		if (target_thread == NULL) {
2517			/* annotation for sparse */
2518			__release(&target_thread->proc->inner_lock);
2519			return_error = BR_DEAD_REPLY;
2520			return_error_line = __LINE__;
2521			goto err_dead_binder;
2522		}
2523		if (target_thread->transaction_stack != in_reply_to) {
2524			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2525				proc->pid, thread->pid,
2526				target_thread->transaction_stack ?
2527				target_thread->transaction_stack->debug_id : 0,
2528				in_reply_to->debug_id);
2529			binder_inner_proc_unlock(target_thread->proc);
2530			return_error = BR_FAILED_REPLY;
2531			return_error_param = -EPROTO;
2532			return_error_line = __LINE__;
2533			in_reply_to = NULL;
2534			target_thread = NULL;
2535			goto err_dead_binder;
2536		}
2537		target_proc = target_thread->proc;
2538		target_proc->tmp_ref++;
2539		binder_inner_proc_unlock(target_thread->proc);
2540	} else {
2541		if (tr->target.handle) {
2542			struct binder_ref *ref;
2543
2544			/*
2545			 * There must already be a strong ref
2546			 * on this node. If so, do a strong
2547			 * increment on the node to ensure it
2548			 * stays alive until the transaction is
2549			 * done.
2550			 */
2551			binder_proc_lock(proc);
2552			ref = binder_get_ref_olocked(proc, tr->target.handle,
2553						     true);
2554			if (ref) {
2555				target_node = binder_get_node_refs_for_txn(
2556						ref->node, &target_proc,
2557						&return_error);
2558			} else {
2559				binder_user_error("%d:%d got transaction to invalid handle\n",
2560						  proc->pid, thread->pid);
2561				return_error = BR_FAILED_REPLY;
2562			}
2563			binder_proc_unlock(proc);
2564		} else {
2565			mutex_lock(&context->context_mgr_node_lock);
2566			target_node = context->binder_context_mgr_node;
2567			if (target_node)
2568				target_node = binder_get_node_refs_for_txn(
2569						target_node, &target_proc,
2570						&return_error);
2571			else
2572				return_error = BR_DEAD_REPLY;
2573			mutex_unlock(&context->context_mgr_node_lock);
2574			if (target_node && target_proc->pid == proc->pid) {
2575				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2576						  proc->pid, thread->pid);
2577				return_error = BR_FAILED_REPLY;
2578				return_error_param = -EINVAL;
2579				return_error_line = __LINE__;
2580				goto err_invalid_target_handle;
2581			}
2582		}
2583		if (!target_node) {
2584			/*
2585			 * return_error is set above
2586			 */
2587			return_error_param = -EINVAL;
2588			return_error_line = __LINE__;
2589			goto err_dead_binder;
2590		}
2591		e->to_node = target_node->debug_id;
2592		if (WARN_ON(proc == target_proc)) {
2593			return_error = BR_FAILED_REPLY;
2594			return_error_param = -EINVAL;
2595			return_error_line = __LINE__;
2596			goto err_invalid_target_handle;
2597		}
2598		if (security_binder_transaction(proc->tsk,
2599						target_proc->tsk) < 0) {
2600			return_error = BR_FAILED_REPLY;
2601			return_error_param = -EPERM;
2602			return_error_line = __LINE__;
2603			goto err_invalid_target_handle;
2604		}
2605		binder_inner_proc_lock(proc);
2606
2607		w = list_first_entry_or_null(&thread->todo,
2608					     struct binder_work, entry);
2609		if (!(tr->flags & TF_ONE_WAY) && w &&
2610		    w->type == BINDER_WORK_TRANSACTION) {
2611			/*
2612			 * Do not allow new outgoing transaction from a
2613			 * thread that has a transaction at the head of
2614			 * its todo list. Only need to check the head
2615			 * because binder_select_thread_ilocked picks a
2616			 * thread from proc->waiting_threads to enqueue
2617			 * the transaction, and nothing is queued to the
2618			 * todo list while the thread is on waiting_threads.
2619			 */
2620			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2621					  proc->pid, thread->pid);
2622			binder_inner_proc_unlock(proc);
2623			return_error = BR_FAILED_REPLY;
2624			return_error_param = -EPROTO;
2625			return_error_line = __LINE__;
2626			goto err_bad_todo_list;
2627		}
2628
2629		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2630			struct binder_transaction *tmp;
2631
2632			tmp = thread->transaction_stack;
2633			if (tmp->to_thread != thread) {
2634				spin_lock(&tmp->lock);
2635				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2636					proc->pid, thread->pid, tmp->debug_id,
2637					tmp->to_proc ? tmp->to_proc->pid : 0,
2638					tmp->to_thread ?
2639					tmp->to_thread->pid : 0);
2640				spin_unlock(&tmp->lock);
2641				binder_inner_proc_unlock(proc);
2642				return_error = BR_FAILED_REPLY;
2643				return_error_param = -EPROTO;
2644				return_error_line = __LINE__;
2645				goto err_bad_call_stack;
2646			}
2647			while (tmp) {
2648				struct binder_thread *from;
2649
2650				spin_lock(&tmp->lock);
2651				from = tmp->from;
2652				if (from && from->proc == target_proc) {
2653					atomic_inc(&from->tmp_ref);
2654					target_thread = from;
2655					spin_unlock(&tmp->lock);
2656					break;
2657				}
2658				spin_unlock(&tmp->lock);
2659				tmp = tmp->from_parent;
2660			}
2661		}
2662		binder_inner_proc_unlock(proc);
2663	}
2664	if (target_thread)
2665		e->to_thread = target_thread->pid;
2666	e->to_proc = target_proc->pid;
2667
2668	/* TODO: reuse incoming transaction for reply */
2669	t = kzalloc(sizeof(*t), GFP_KERNEL);
2670	if (t == NULL) {
2671		return_error = BR_FAILED_REPLY;
2672		return_error_param = -ENOMEM;
2673		return_error_line = __LINE__;
2674		goto err_alloc_t_failed;
2675	}
2676	INIT_LIST_HEAD(&t->fd_fixups);
2677	binder_stats_created(BINDER_STAT_TRANSACTION);
2678	spin_lock_init(&t->lock);
2679
2680	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2681	if (tcomplete == NULL) {
2682		return_error = BR_FAILED_REPLY;
2683		return_error_param = -ENOMEM;
2684		return_error_line = __LINE__;
2685		goto err_alloc_tcomplete_failed;
2686	}
2687	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2688
2689	t->debug_id = t_debug_id;
2690
2691	if (reply)
2692		binder_debug(BINDER_DEBUG_TRANSACTION,
2693			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2694			     proc->pid, thread->pid, t->debug_id,
2695			     target_proc->pid, target_thread->pid,
2696			     (u64)tr->data.ptr.buffer,
2697			     (u64)tr->data.ptr.offsets,
2698			     (u64)tr->data_size, (u64)tr->offsets_size,
2699			     (u64)extra_buffers_size);
2700	else
2701		binder_debug(BINDER_DEBUG_TRANSACTION,
2702			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2703			     proc->pid, thread->pid, t->debug_id,
2704			     target_proc->pid, target_node->debug_id,
2705			     (u64)tr->data.ptr.buffer,
2706			     (u64)tr->data.ptr.offsets,
2707			     (u64)tr->data_size, (u64)tr->offsets_size,
2708			     (u64)extra_buffers_size);
2709
2710	if (!reply && !(tr->flags & TF_ONE_WAY))
2711		t->from = thread;
2712	else
2713		t->from = NULL;
2714	t->sender_euid = task_euid(proc->tsk);
2715	t->to_proc = target_proc;
2716	t->to_thread = target_thread;
2717	t->code = tr->code;
2718	t->flags = tr->flags;
2719	t->priority = task_nice(current);
2720
2721	if (target_node && target_node->txn_security_ctx) {
2722		u32 secid;
2723		size_t added_size;
2724
2725		/*
2726		 * Arguably this should be the task's subjective LSM secid but
2727		 * we can't reliably access the subjective creds of a task
2728		 * other than our own so we must use the objective creds, which
2729		 * are safe to access.  The downside is that if a task is
2730		 * temporarily overriding it's creds it will not be reflected
2731		 * here; however, it isn't clear that binder would handle that
2732		 * case well anyway.
2733		 */
2734		security_task_getsecid_obj(proc->tsk, &secid);
2735		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2736		if (ret) {
2737			return_error = BR_FAILED_REPLY;
2738			return_error_param = ret;
2739			return_error_line = __LINE__;
2740			goto err_get_secctx_failed;
2741		}
2742		added_size = ALIGN(secctx_sz, sizeof(u64));
2743		extra_buffers_size += added_size;
2744		if (extra_buffers_size < added_size) {
2745			/* integer overflow of extra_buffers_size */
2746			return_error = BR_FAILED_REPLY;
2747			return_error_param = -EINVAL;
2748			return_error_line = __LINE__;
2749			goto err_bad_extra_size;
2750		}
2751	}
2752
2753	trace_binder_transaction(reply, t, target_node);
2754
2755	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2756		tr->offsets_size, extra_buffers_size,
2757		!reply && (t->flags & TF_ONE_WAY), current->tgid);
2758	if (IS_ERR(t->buffer)) {
2759		/*
2760		 * -ESRCH indicates VMA cleared. The target is dying.
2761		 */
2762		return_error_param = PTR_ERR(t->buffer);
2763		return_error = return_error_param == -ESRCH ?
2764			BR_DEAD_REPLY : BR_FAILED_REPLY;
2765		return_error_line = __LINE__;
2766		t->buffer = NULL;
2767		goto err_binder_alloc_buf_failed;
2768	}
2769	if (secctx) {
2770		int err;
2771		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2772				    ALIGN(tr->offsets_size, sizeof(void *)) +
2773				    ALIGN(extra_buffers_size, sizeof(void *)) -
2774				    ALIGN(secctx_sz, sizeof(u64));
2775
2776		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2777		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2778						  t->buffer, buf_offset,
2779						  secctx, secctx_sz);
2780		if (err) {
2781			t->security_ctx = 0;
2782			WARN_ON(1);
2783		}
2784		security_release_secctx(secctx, secctx_sz);
2785		secctx = NULL;
2786	}
2787	t->buffer->debug_id = t->debug_id;
2788	t->buffer->transaction = t;
2789	t->buffer->target_node = target_node;
2790	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2791	trace_binder_transaction_alloc_buf(t->buffer);
2792
2793	if (binder_alloc_copy_user_to_buffer(
2794				&target_proc->alloc,
2795				t->buffer, 0,
2796				(const void __user *)
2797					(uintptr_t)tr->data.ptr.buffer,
2798				tr->data_size)) {
2799		binder_user_error("%d:%d got transaction with invalid data ptr\n",
2800				proc->pid, thread->pid);
2801		return_error = BR_FAILED_REPLY;
2802		return_error_param = -EFAULT;
2803		return_error_line = __LINE__;
2804		goto err_copy_data_failed;
2805	}
2806	if (binder_alloc_copy_user_to_buffer(
2807				&target_proc->alloc,
2808				t->buffer,
2809				ALIGN(tr->data_size, sizeof(void *)),
2810				(const void __user *)
2811					(uintptr_t)tr->data.ptr.offsets,
2812				tr->offsets_size)) {
2813		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2814				proc->pid, thread->pid);
2815		return_error = BR_FAILED_REPLY;
2816		return_error_param = -EFAULT;
2817		return_error_line = __LINE__;
2818		goto err_copy_data_failed;
2819	}
2820	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2821		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2822				proc->pid, thread->pid, (u64)tr->offsets_size);
2823		return_error = BR_FAILED_REPLY;
2824		return_error_param = -EINVAL;
2825		return_error_line = __LINE__;
2826		goto err_bad_offset;
2827	}
2828	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2829		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2830				  proc->pid, thread->pid,
2831				  (u64)extra_buffers_size);
2832		return_error = BR_FAILED_REPLY;
2833		return_error_param = -EINVAL;
2834		return_error_line = __LINE__;
2835		goto err_bad_offset;
2836	}
2837	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2838	buffer_offset = off_start_offset;
2839	off_end_offset = off_start_offset + tr->offsets_size;
2840	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2841	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2842		ALIGN(secctx_sz, sizeof(u64));
2843	off_min = 0;
2844	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2845	     buffer_offset += sizeof(binder_size_t)) {
2846		struct binder_object_header *hdr;
2847		size_t object_size;
2848		struct binder_object object;
2849		binder_size_t object_offset;
2850
2851		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2852						  &object_offset,
2853						  t->buffer,
2854						  buffer_offset,
2855						  sizeof(object_offset))) {
2856			return_error = BR_FAILED_REPLY;
2857			return_error_param = -EINVAL;
2858			return_error_line = __LINE__;
2859			goto err_bad_offset;
2860		}
2861		object_size = binder_get_object(target_proc, t->buffer,
2862						object_offset, &object);
2863		if (object_size == 0 || object_offset < off_min) {
2864			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2865					  proc->pid, thread->pid,
2866					  (u64)object_offset,
2867					  (u64)off_min,
2868					  (u64)t->buffer->data_size);
2869			return_error = BR_FAILED_REPLY;
2870			return_error_param = -EINVAL;
2871			return_error_line = __LINE__;
2872			goto err_bad_offset;
2873		}
2874
2875		hdr = &object.hdr;
2876		off_min = object_offset + object_size;
2877		switch (hdr->type) {
2878		case BINDER_TYPE_BINDER:
2879		case BINDER_TYPE_WEAK_BINDER: {
2880			struct flat_binder_object *fp;
2881
2882			fp = to_flat_binder_object(hdr);
2883			ret = binder_translate_binder(fp, t, thread);
2884
2885			if (ret < 0 ||
2886			    binder_alloc_copy_to_buffer(&target_proc->alloc,
2887							t->buffer,
2888							object_offset,
2889							fp, sizeof(*fp))) {
2890				return_error = BR_FAILED_REPLY;
2891				return_error_param = ret;
2892				return_error_line = __LINE__;
2893				goto err_translate_failed;
2894			}
2895		} break;
2896		case BINDER_TYPE_HANDLE:
2897		case BINDER_TYPE_WEAK_HANDLE: {
2898			struct flat_binder_object *fp;
2899
2900			fp = to_flat_binder_object(hdr);
2901			ret = binder_translate_handle(fp, t, thread);
2902			if (ret < 0 ||
2903			    binder_alloc_copy_to_buffer(&target_proc->alloc,
2904							t->buffer,
2905							object_offset,
2906							fp, sizeof(*fp))) {
2907				return_error = BR_FAILED_REPLY;
2908				return_error_param = ret;
2909				return_error_line = __LINE__;
2910				goto err_translate_failed;
2911			}
2912		} break;
2913
2914		case BINDER_TYPE_FD: {
2915			struct binder_fd_object *fp = to_binder_fd_object(hdr);
2916			binder_size_t fd_offset = object_offset +
2917				(uintptr_t)&fp->fd - (uintptr_t)fp;
2918			int ret = binder_translate_fd(fp->fd, fd_offset, t,
2919						      thread, in_reply_to);
2920
2921			fp->pad_binder = 0;
2922			if (ret < 0 ||
2923			    binder_alloc_copy_to_buffer(&target_proc->alloc,
2924							t->buffer,
2925							object_offset,
2926							fp, sizeof(*fp))) {
2927				return_error = BR_FAILED_REPLY;
2928				return_error_param = ret;
2929				return_error_line = __LINE__;
2930				goto err_translate_failed;
2931			}
2932		} break;
2933		case BINDER_TYPE_FDA: {
2934			struct binder_object ptr_object;
2935			binder_size_t parent_offset;
2936			struct binder_fd_array_object *fda =
2937				to_binder_fd_array_object(hdr);
2938			size_t num_valid = (buffer_offset - off_start_offset) /
2939						sizeof(binder_size_t);
2940			struct binder_buffer_object *parent =
2941				binder_validate_ptr(target_proc, t->buffer,
2942						    &ptr_object, fda->parent,
2943						    off_start_offset,
2944						    &parent_offset,
2945						    num_valid);
2946			if (!parent) {
2947				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2948						  proc->pid, thread->pid);
2949				return_error = BR_FAILED_REPLY;
2950				return_error_param = -EINVAL;
2951				return_error_line = __LINE__;
2952				goto err_bad_parent;
2953			}
2954			if (!binder_validate_fixup(target_proc, t->buffer,
2955						   off_start_offset,
2956						   parent_offset,
2957						   fda->parent_offset,
2958						   last_fixup_obj_off,
2959						   last_fixup_min_off)) {
2960				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2961						  proc->pid, thread->pid);
2962				return_error = BR_FAILED_REPLY;
2963				return_error_param = -EINVAL;
2964				return_error_line = __LINE__;
2965				goto err_bad_parent;
2966			}
2967			ret = binder_translate_fd_array(fda, parent, t, thread,
2968							in_reply_to);
2969			if (ret < 0) {
2970				return_error = BR_FAILED_REPLY;
2971				return_error_param = ret;
2972				return_error_line = __LINE__;
2973				goto err_translate_failed;
2974			}
2975			last_fixup_obj_off = parent_offset;
2976			last_fixup_min_off =
2977				fda->parent_offset + sizeof(u32) * fda->num_fds;
2978		} break;
2979		case BINDER_TYPE_PTR: {
2980			struct binder_buffer_object *bp =
2981				to_binder_buffer_object(hdr);
2982			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
2983			size_t num_valid;
2984
2985			if (bp->length > buf_left) {
2986				binder_user_error("%d:%d got transaction with too large buffer\n",
2987						  proc->pid, thread->pid);
2988				return_error = BR_FAILED_REPLY;
2989				return_error_param = -EINVAL;
2990				return_error_line = __LINE__;
2991				goto err_bad_offset;
2992			}
2993			if (binder_alloc_copy_user_to_buffer(
2994						&target_proc->alloc,
2995						t->buffer,
2996						sg_buf_offset,
2997						(const void __user *)
2998							(uintptr_t)bp->buffer,
2999						bp->length)) {
3000				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3001						  proc->pid, thread->pid);
3002				return_error_param = -EFAULT;
3003				return_error = BR_FAILED_REPLY;
3004				return_error_line = __LINE__;
3005				goto err_copy_data_failed;
3006			}
3007			/* Fixup buffer pointer to target proc address space */
3008			bp->buffer = (uintptr_t)
3009				t->buffer->user_data + sg_buf_offset;
3010			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3011
3012			num_valid = (buffer_offset - off_start_offset) /
3013					sizeof(binder_size_t);
3014			ret = binder_fixup_parent(t, thread, bp,
3015						  off_start_offset,
3016						  num_valid,
3017						  last_fixup_obj_off,
3018						  last_fixup_min_off);
3019			if (ret < 0 ||
3020			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3021							t->buffer,
3022							object_offset,
3023							bp, sizeof(*bp))) {
3024				return_error = BR_FAILED_REPLY;
3025				return_error_param = ret;
3026				return_error_line = __LINE__;
3027				goto err_translate_failed;
3028			}
3029			last_fixup_obj_off = object_offset;
3030			last_fixup_min_off = 0;
3031		} break;
3032		default:
3033			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3034				proc->pid, thread->pid, hdr->type);
3035			return_error = BR_FAILED_REPLY;
3036			return_error_param = -EINVAL;
3037			return_error_line = __LINE__;
3038			goto err_bad_object_type;
3039		}
3040	}
3041	if (t->buffer->oneway_spam_suspect)
3042		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3043	else
3044		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3045	t->work.type = BINDER_WORK_TRANSACTION;
3046
3047	if (reply) {
3048		binder_enqueue_thread_work(thread, tcomplete);
3049		binder_inner_proc_lock(target_proc);
3050		if (target_thread->is_dead) {
3051			return_error = BR_DEAD_REPLY;
3052			binder_inner_proc_unlock(target_proc);
3053			goto err_dead_proc_or_thread;
3054		}
3055		BUG_ON(t->buffer->async_transaction != 0);
3056		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3057		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3058		target_proc->outstanding_txns++;
3059		binder_inner_proc_unlock(target_proc);
3060		wake_up_interruptible_sync(&target_thread->wait);
3061		binder_free_transaction(in_reply_to);
3062	} else if (!(t->flags & TF_ONE_WAY)) {
3063		BUG_ON(t->buffer->async_transaction != 0);
3064		binder_inner_proc_lock(proc);
3065		/*
3066		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3067		 * userspace immediately; this allows the target process to
3068		 * immediately start processing this transaction, reducing
3069		 * latency. We will then return the TRANSACTION_COMPLETE when
3070		 * the target replies (or there is an error).
3071		 */
3072		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3073		t->need_reply = 1;
3074		t->from_parent = thread->transaction_stack;
3075		thread->transaction_stack = t;
3076		binder_inner_proc_unlock(proc);
3077		return_error = binder_proc_transaction(t,
3078				target_proc, target_thread);
3079		if (return_error) {
3080			binder_inner_proc_lock(proc);
3081			binder_pop_transaction_ilocked(thread, t);
3082			binder_inner_proc_unlock(proc);
3083			goto err_dead_proc_or_thread;
3084		}
3085	} else {
3086		BUG_ON(target_node == NULL);
3087		BUG_ON(t->buffer->async_transaction != 1);
3088		binder_enqueue_thread_work(thread, tcomplete);
3089		return_error = binder_proc_transaction(t, target_proc, NULL);
3090		if (return_error)
3091			goto err_dead_proc_or_thread;
3092	}
3093	if (target_thread)
3094		binder_thread_dec_tmpref(target_thread);
3095	binder_proc_dec_tmpref(target_proc);
3096	if (target_node)
3097		binder_dec_node_tmpref(target_node);
3098	/*
3099	 * write barrier to synchronize with initialization
3100	 * of log entry
3101	 */
3102	smp_wmb();
3103	WRITE_ONCE(e->debug_id_done, t_debug_id);
3104	return;
3105
3106err_dead_proc_or_thread:
 
3107	return_error_line = __LINE__;
3108	binder_dequeue_work(proc, tcomplete);
3109err_translate_failed:
3110err_bad_object_type:
3111err_bad_offset:
3112err_bad_parent:
3113err_copy_data_failed:
3114	binder_free_txn_fixups(t);
3115	trace_binder_transaction_failed_buffer_release(t->buffer);
3116	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3117					  buffer_offset, true);
3118	if (target_node)
3119		binder_dec_node_tmpref(target_node);
3120	target_node = NULL;
3121	t->buffer->transaction = NULL;
3122	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3123err_binder_alloc_buf_failed:
3124err_bad_extra_size:
3125	if (secctx)
3126		security_release_secctx(secctx, secctx_sz);
3127err_get_secctx_failed:
3128	kfree(tcomplete);
3129	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3130err_alloc_tcomplete_failed:
3131	if (trace_binder_txn_latency_free_enabled())
3132		binder_txn_latency_free(t);
3133	kfree(t);
3134	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3135err_alloc_t_failed:
3136err_bad_todo_list:
3137err_bad_call_stack:
3138err_empty_call_stack:
3139err_dead_binder:
3140err_invalid_target_handle:
3141	if (target_thread)
3142		binder_thread_dec_tmpref(target_thread);
3143	if (target_proc)
3144		binder_proc_dec_tmpref(target_proc);
3145	if (target_node) {
3146		binder_dec_node(target_node, 1, 0);
3147		binder_dec_node_tmpref(target_node);
3148	}
3149
3150	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3151		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3152		     proc->pid, thread->pid, return_error, return_error_param,
3153		     (u64)tr->data_size, (u64)tr->offsets_size,
3154		     return_error_line);
3155
3156	{
3157		struct binder_transaction_log_entry *fe;
3158
3159		e->return_error = return_error;
3160		e->return_error_param = return_error_param;
3161		e->return_error_line = return_error_line;
3162		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3163		*fe = *e;
3164		/*
3165		 * write barrier to synchronize with initialization
3166		 * of log entry
3167		 */
3168		smp_wmb();
3169		WRITE_ONCE(e->debug_id_done, t_debug_id);
3170		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3171	}
3172
3173	BUG_ON(thread->return_error.cmd != BR_OK);
3174	if (in_reply_to) {
3175		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3176		binder_enqueue_thread_work(thread, &thread->return_error.work);
3177		binder_send_failed_reply(in_reply_to, return_error);
3178	} else {
3179		thread->return_error.cmd = return_error;
3180		binder_enqueue_thread_work(thread, &thread->return_error.work);
3181	}
3182}
3183
3184/**
3185 * binder_free_buf() - free the specified buffer
3186 * @proc:	binder proc that owns buffer
3187 * @buffer:	buffer to be freed
3188 *
3189 * If buffer for an async transaction, enqueue the next async
3190 * transaction from the node.
3191 *
3192 * Cleanup buffer and free it.
3193 */
3194static void
3195binder_free_buf(struct binder_proc *proc,
3196		struct binder_thread *thread,
3197		struct binder_buffer *buffer)
3198{
3199	binder_inner_proc_lock(proc);
3200	if (buffer->transaction) {
3201		buffer->transaction->buffer = NULL;
3202		buffer->transaction = NULL;
3203	}
3204	binder_inner_proc_unlock(proc);
3205	if (buffer->async_transaction && buffer->target_node) {
3206		struct binder_node *buf_node;
3207		struct binder_work *w;
3208
3209		buf_node = buffer->target_node;
3210		binder_node_inner_lock(buf_node);
3211		BUG_ON(!buf_node->has_async_transaction);
3212		BUG_ON(buf_node->proc != proc);
3213		w = binder_dequeue_work_head_ilocked(
3214				&buf_node->async_todo);
3215		if (!w) {
3216			buf_node->has_async_transaction = false;
3217		} else {
3218			binder_enqueue_work_ilocked(
3219					w, &proc->todo);
3220			binder_wakeup_proc_ilocked(proc);
3221		}
3222		binder_node_inner_unlock(buf_node);
3223	}
3224	trace_binder_transaction_buffer_release(buffer);
3225	binder_transaction_buffer_release(proc, thread, buffer, 0, false);
3226	binder_alloc_free_buf(&proc->alloc, buffer);
3227}
3228
3229static int binder_thread_write(struct binder_proc *proc,
3230			struct binder_thread *thread,
3231			binder_uintptr_t binder_buffer, size_t size,
3232			binder_size_t *consumed)
3233{
3234	uint32_t cmd;
3235	struct binder_context *context = proc->context;
3236	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3237	void __user *ptr = buffer + *consumed;
3238	void __user *end = buffer + size;
3239
3240	while (ptr < end && thread->return_error.cmd == BR_OK) {
3241		int ret;
3242
3243		if (get_user(cmd, (uint32_t __user *)ptr))
3244			return -EFAULT;
3245		ptr += sizeof(uint32_t);
3246		trace_binder_command(cmd);
3247		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3248			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3249			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3250			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3251		}
3252		switch (cmd) {
3253		case BC_INCREFS:
3254		case BC_ACQUIRE:
3255		case BC_RELEASE:
3256		case BC_DECREFS: {
3257			uint32_t target;
3258			const char *debug_string;
3259			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3260			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3261			struct binder_ref_data rdata;
3262
3263			if (get_user(target, (uint32_t __user *)ptr))
3264				return -EFAULT;
3265
3266			ptr += sizeof(uint32_t);
3267			ret = -1;
3268			if (increment && !target) {
3269				struct binder_node *ctx_mgr_node;
3270
3271				mutex_lock(&context->context_mgr_node_lock);
3272				ctx_mgr_node = context->binder_context_mgr_node;
3273				if (ctx_mgr_node) {
3274					if (ctx_mgr_node->proc == proc) {
3275						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3276								  proc->pid, thread->pid);
3277						mutex_unlock(&context->context_mgr_node_lock);
3278						return -EINVAL;
3279					}
3280					ret = binder_inc_ref_for_node(
3281							proc, ctx_mgr_node,
3282							strong, NULL, &rdata);
3283				}
3284				mutex_unlock(&context->context_mgr_node_lock);
3285			}
3286			if (ret)
3287				ret = binder_update_ref_for_handle(
3288						proc, target, increment, strong,
3289						&rdata);
3290			if (!ret && rdata.desc != target) {
3291				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3292					proc->pid, thread->pid,
3293					target, rdata.desc);
3294			}
3295			switch (cmd) {
3296			case BC_INCREFS:
3297				debug_string = "IncRefs";
3298				break;
3299			case BC_ACQUIRE:
3300				debug_string = "Acquire";
3301				break;
3302			case BC_RELEASE:
3303				debug_string = "Release";
3304				break;
3305			case BC_DECREFS:
3306			default:
3307				debug_string = "DecRefs";
3308				break;
3309			}
3310			if (ret) {
3311				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3312					proc->pid, thread->pid, debug_string,
3313					strong, target, ret);
3314				break;
3315			}
3316			binder_debug(BINDER_DEBUG_USER_REFS,
3317				     "%d:%d %s ref %d desc %d s %d w %d\n",
3318				     proc->pid, thread->pid, debug_string,
3319				     rdata.debug_id, rdata.desc, rdata.strong,
3320				     rdata.weak);
3321			break;
3322		}
3323		case BC_INCREFS_DONE:
3324		case BC_ACQUIRE_DONE: {
3325			binder_uintptr_t node_ptr;
3326			binder_uintptr_t cookie;
3327			struct binder_node *node;
3328			bool free_node;
3329
3330			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3331				return -EFAULT;
3332			ptr += sizeof(binder_uintptr_t);
3333			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3334				return -EFAULT;
3335			ptr += sizeof(binder_uintptr_t);
3336			node = binder_get_node(proc, node_ptr);
3337			if (node == NULL) {
3338				binder_user_error("%d:%d %s u%016llx no match\n",
3339					proc->pid, thread->pid,
3340					cmd == BC_INCREFS_DONE ?
3341					"BC_INCREFS_DONE" :
3342					"BC_ACQUIRE_DONE",
3343					(u64)node_ptr);
3344				break;
3345			}
3346			if (cookie != node->cookie) {
3347				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3348					proc->pid, thread->pid,
3349					cmd == BC_INCREFS_DONE ?
3350					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3351					(u64)node_ptr, node->debug_id,
3352					(u64)cookie, (u64)node->cookie);
3353				binder_put_node(node);
3354				break;
3355			}
3356			binder_node_inner_lock(node);
3357			if (cmd == BC_ACQUIRE_DONE) {
3358				if (node->pending_strong_ref == 0) {
3359					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3360						proc->pid, thread->pid,
3361						node->debug_id);
3362					binder_node_inner_unlock(node);
3363					binder_put_node(node);
3364					break;
3365				}
3366				node->pending_strong_ref = 0;
3367			} else {
3368				if (node->pending_weak_ref == 0) {
3369					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3370						proc->pid, thread->pid,
3371						node->debug_id);
3372					binder_node_inner_unlock(node);
3373					binder_put_node(node);
3374					break;
3375				}
3376				node->pending_weak_ref = 0;
3377			}
3378			free_node = binder_dec_node_nilocked(node,
3379					cmd == BC_ACQUIRE_DONE, 0);
3380			WARN_ON(free_node);
3381			binder_debug(BINDER_DEBUG_USER_REFS,
3382				     "%d:%d %s node %d ls %d lw %d tr %d\n",
3383				     proc->pid, thread->pid,
3384				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3385				     node->debug_id, node->local_strong_refs,
3386				     node->local_weak_refs, node->tmp_refs);
3387			binder_node_inner_unlock(node);
3388			binder_put_node(node);
3389			break;
3390		}
3391		case BC_ATTEMPT_ACQUIRE:
3392			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3393			return -EINVAL;
3394		case BC_ACQUIRE_RESULT:
3395			pr_err("BC_ACQUIRE_RESULT not supported\n");
3396			return -EINVAL;
3397
3398		case BC_FREE_BUFFER: {
3399			binder_uintptr_t data_ptr;
3400			struct binder_buffer *buffer;
3401
3402			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3403				return -EFAULT;
3404			ptr += sizeof(binder_uintptr_t);
3405
3406			buffer = binder_alloc_prepare_to_free(&proc->alloc,
3407							      data_ptr);
3408			if (IS_ERR_OR_NULL(buffer)) {
3409				if (PTR_ERR(buffer) == -EPERM) {
3410					binder_user_error(
3411						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3412						proc->pid, thread->pid,
3413						(u64)data_ptr);
3414				} else {
3415					binder_user_error(
3416						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
3417						proc->pid, thread->pid,
3418						(u64)data_ptr);
3419				}
3420				break;
3421			}
3422			binder_debug(BINDER_DEBUG_FREE_BUFFER,
3423				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3424				     proc->pid, thread->pid, (u64)data_ptr,
3425				     buffer->debug_id,
3426				     buffer->transaction ? "active" : "finished");
3427			binder_free_buf(proc, thread, buffer);
3428			break;
3429		}
3430
3431		case BC_TRANSACTION_SG:
3432		case BC_REPLY_SG: {
3433			struct binder_transaction_data_sg tr;
3434
3435			if (copy_from_user(&tr, ptr, sizeof(tr)))
3436				return -EFAULT;
3437			ptr += sizeof(tr);
3438			binder_transaction(proc, thread, &tr.transaction_data,
3439					   cmd == BC_REPLY_SG, tr.buffers_size);
3440			break;
3441		}
3442		case BC_TRANSACTION:
3443		case BC_REPLY: {
3444			struct binder_transaction_data tr;
3445
3446			if (copy_from_user(&tr, ptr, sizeof(tr)))
3447				return -EFAULT;
3448			ptr += sizeof(tr);
3449			binder_transaction(proc, thread, &tr,
3450					   cmd == BC_REPLY, 0);
3451			break;
3452		}
3453
3454		case BC_REGISTER_LOOPER:
3455			binder_debug(BINDER_DEBUG_THREADS,
3456				     "%d:%d BC_REGISTER_LOOPER\n",
3457				     proc->pid, thread->pid);
3458			binder_inner_proc_lock(proc);
3459			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3460				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3461				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3462					proc->pid, thread->pid);
3463			} else if (proc->requested_threads == 0) {
3464				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3465				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3466					proc->pid, thread->pid);
3467			} else {
3468				proc->requested_threads--;
3469				proc->requested_threads_started++;
3470			}
3471			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3472			binder_inner_proc_unlock(proc);
3473			break;
3474		case BC_ENTER_LOOPER:
3475			binder_debug(BINDER_DEBUG_THREADS,
3476				     "%d:%d BC_ENTER_LOOPER\n",
3477				     proc->pid, thread->pid);
3478			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3479				thread->looper |= BINDER_LOOPER_STATE_INVALID;
3480				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3481					proc->pid, thread->pid);
3482			}
3483			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3484			break;
3485		case BC_EXIT_LOOPER:
3486			binder_debug(BINDER_DEBUG_THREADS,
3487				     "%d:%d BC_EXIT_LOOPER\n",
3488				     proc->pid, thread->pid);
3489			thread->looper |= BINDER_LOOPER_STATE_EXITED;
3490			break;
3491
3492		case BC_REQUEST_DEATH_NOTIFICATION:
3493		case BC_CLEAR_DEATH_NOTIFICATION: {
3494			uint32_t target;
3495			binder_uintptr_t cookie;
3496			struct binder_ref *ref;
3497			struct binder_ref_death *death = NULL;
3498
3499			if (get_user(target, (uint32_t __user *)ptr))
3500				return -EFAULT;
3501			ptr += sizeof(uint32_t);
3502			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3503				return -EFAULT;
3504			ptr += sizeof(binder_uintptr_t);
3505			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3506				/*
3507				 * Allocate memory for death notification
3508				 * before taking lock
3509				 */
3510				death = kzalloc(sizeof(*death), GFP_KERNEL);
3511				if (death == NULL) {
3512					WARN_ON(thread->return_error.cmd !=
3513						BR_OK);
3514					thread->return_error.cmd = BR_ERROR;
3515					binder_enqueue_thread_work(
3516						thread,
3517						&thread->return_error.work);
3518					binder_debug(
3519						BINDER_DEBUG_FAILED_TRANSACTION,
3520						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3521						proc->pid, thread->pid);
3522					break;
3523				}
3524			}
3525			binder_proc_lock(proc);
3526			ref = binder_get_ref_olocked(proc, target, false);
3527			if (ref == NULL) {
3528				binder_user_error("%d:%d %s invalid ref %d\n",
3529					proc->pid, thread->pid,
3530					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3531					"BC_REQUEST_DEATH_NOTIFICATION" :
3532					"BC_CLEAR_DEATH_NOTIFICATION",
3533					target);
3534				binder_proc_unlock(proc);
3535				kfree(death);
3536				break;
3537			}
3538
3539			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3540				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3541				     proc->pid, thread->pid,
3542				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3543				     "BC_REQUEST_DEATH_NOTIFICATION" :
3544				     "BC_CLEAR_DEATH_NOTIFICATION",
3545				     (u64)cookie, ref->data.debug_id,
3546				     ref->data.desc, ref->data.strong,
3547				     ref->data.weak, ref->node->debug_id);
3548
3549			binder_node_lock(ref->node);
3550			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3551				if (ref->death) {
3552					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3553						proc->pid, thread->pid);
3554					binder_node_unlock(ref->node);
3555					binder_proc_unlock(proc);
3556					kfree(death);
3557					break;
3558				}
3559				binder_stats_created(BINDER_STAT_DEATH);
3560				INIT_LIST_HEAD(&death->work.entry);
3561				death->cookie = cookie;
3562				ref->death = death;
3563				if (ref->node->proc == NULL) {
3564					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3565
3566					binder_inner_proc_lock(proc);
3567					binder_enqueue_work_ilocked(
3568						&ref->death->work, &proc->todo);
3569					binder_wakeup_proc_ilocked(proc);
3570					binder_inner_proc_unlock(proc);
3571				}
3572			} else {
3573				if (ref->death == NULL) {
3574					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3575						proc->pid, thread->pid);
3576					binder_node_unlock(ref->node);
3577					binder_proc_unlock(proc);
3578					break;
3579				}
3580				death = ref->death;
3581				if (death->cookie != cookie) {
3582					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3583						proc->pid, thread->pid,
3584						(u64)death->cookie,
3585						(u64)cookie);
3586					binder_node_unlock(ref->node);
3587					binder_proc_unlock(proc);
3588					break;
3589				}
3590				ref->death = NULL;
3591				binder_inner_proc_lock(proc);
3592				if (list_empty(&death->work.entry)) {
3593					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3594					if (thread->looper &
3595					    (BINDER_LOOPER_STATE_REGISTERED |
3596					     BINDER_LOOPER_STATE_ENTERED))
3597						binder_enqueue_thread_work_ilocked(
3598								thread,
3599								&death->work);
3600					else {
3601						binder_enqueue_work_ilocked(
3602								&death->work,
3603								&proc->todo);
3604						binder_wakeup_proc_ilocked(
3605								proc);
3606					}
3607				} else {
3608					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3609					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3610				}
3611				binder_inner_proc_unlock(proc);
3612			}
3613			binder_node_unlock(ref->node);
3614			binder_proc_unlock(proc);
3615		} break;
3616		case BC_DEAD_BINDER_DONE: {
3617			struct binder_work *w;
3618			binder_uintptr_t cookie;
3619			struct binder_ref_death *death = NULL;
3620
3621			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3622				return -EFAULT;
3623
3624			ptr += sizeof(cookie);
3625			binder_inner_proc_lock(proc);
3626			list_for_each_entry(w, &proc->delivered_death,
3627					    entry) {
3628				struct binder_ref_death *tmp_death =
3629					container_of(w,
3630						     struct binder_ref_death,
3631						     work);
3632
3633				if (tmp_death->cookie == cookie) {
3634					death = tmp_death;
3635					break;
3636				}
3637			}
3638			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3639				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3640				     proc->pid, thread->pid, (u64)cookie,
3641				     death);
3642			if (death == NULL) {
3643				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3644					proc->pid, thread->pid, (u64)cookie);
3645				binder_inner_proc_unlock(proc);
3646				break;
3647			}
3648			binder_dequeue_work_ilocked(&death->work);
3649			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3650				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3651				if (thread->looper &
3652					(BINDER_LOOPER_STATE_REGISTERED |
3653					 BINDER_LOOPER_STATE_ENTERED))
3654					binder_enqueue_thread_work_ilocked(
3655						thread, &death->work);
3656				else {
3657					binder_enqueue_work_ilocked(
3658							&death->work,
3659							&proc->todo);
3660					binder_wakeup_proc_ilocked(proc);
3661				}
3662			}
3663			binder_inner_proc_unlock(proc);
3664		} break;
3665
3666		default:
3667			pr_err("%d:%d unknown command %d\n",
3668			       proc->pid, thread->pid, cmd);
3669			return -EINVAL;
3670		}
3671		*consumed = ptr - buffer;
3672	}
3673	return 0;
3674}
3675
3676static void binder_stat_br(struct binder_proc *proc,
3677			   struct binder_thread *thread, uint32_t cmd)
3678{
3679	trace_binder_return(cmd);
3680	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3681		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3682		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3683		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3684	}
3685}
3686
3687static int binder_put_node_cmd(struct binder_proc *proc,
3688			       struct binder_thread *thread,
3689			       void __user **ptrp,
3690			       binder_uintptr_t node_ptr,
3691			       binder_uintptr_t node_cookie,
3692			       int node_debug_id,
3693			       uint32_t cmd, const char *cmd_name)
3694{
3695	void __user *ptr = *ptrp;
3696
3697	if (put_user(cmd, (uint32_t __user *)ptr))
3698		return -EFAULT;
3699	ptr += sizeof(uint32_t);
3700
3701	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3702		return -EFAULT;
3703	ptr += sizeof(binder_uintptr_t);
3704
3705	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3706		return -EFAULT;
3707	ptr += sizeof(binder_uintptr_t);
3708
3709	binder_stat_br(proc, thread, cmd);
3710	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3711		     proc->pid, thread->pid, cmd_name, node_debug_id,
3712		     (u64)node_ptr, (u64)node_cookie);
3713
3714	*ptrp = ptr;
3715	return 0;
3716}
3717
3718static int binder_wait_for_work(struct binder_thread *thread,
3719				bool do_proc_work)
3720{
3721	DEFINE_WAIT(wait);
3722	struct binder_proc *proc = thread->proc;
3723	int ret = 0;
3724
3725	freezer_do_not_count();
3726	binder_inner_proc_lock(proc);
3727	for (;;) {
3728		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3729		if (binder_has_work_ilocked(thread, do_proc_work))
3730			break;
3731		if (do_proc_work)
3732			list_add(&thread->waiting_thread_node,
3733				 &proc->waiting_threads);
3734		binder_inner_proc_unlock(proc);
3735		schedule();
3736		binder_inner_proc_lock(proc);
3737		list_del_init(&thread->waiting_thread_node);
3738		if (signal_pending(current)) {
3739			ret = -EINTR;
3740			break;
3741		}
3742	}
3743	finish_wait(&thread->wait, &wait);
3744	binder_inner_proc_unlock(proc);
3745	freezer_count();
3746
3747	return ret;
3748}
3749
3750/**
3751 * binder_apply_fd_fixups() - finish fd translation
3752 * @proc:         binder_proc associated @t->buffer
3753 * @t:	binder transaction with list of fd fixups
3754 *
3755 * Now that we are in the context of the transaction target
3756 * process, we can allocate and install fds. Process the
3757 * list of fds to translate and fixup the buffer with the
3758 * new fds.
3759 *
3760 * If we fail to allocate an fd, then free the resources by
3761 * fput'ing files that have not been processed and ksys_close'ing
3762 * any fds that have already been allocated.
3763 */
3764static int binder_apply_fd_fixups(struct binder_proc *proc,
3765				  struct binder_transaction *t)
3766{
3767	struct binder_txn_fd_fixup *fixup, *tmp;
3768	int ret = 0;
3769
3770	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3771		int fd = get_unused_fd_flags(O_CLOEXEC);
3772
3773		if (fd < 0) {
3774			binder_debug(BINDER_DEBUG_TRANSACTION,
3775				     "failed fd fixup txn %d fd %d\n",
3776				     t->debug_id, fd);
3777			ret = -ENOMEM;
3778			break;
3779		}
3780		binder_debug(BINDER_DEBUG_TRANSACTION,
3781			     "fd fixup txn %d fd %d\n",
3782			     t->debug_id, fd);
3783		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3784		fd_install(fd, fixup->file);
3785		fixup->file = NULL;
3786		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3787						fixup->offset, &fd,
3788						sizeof(u32))) {
3789			ret = -EINVAL;
3790			break;
3791		}
3792	}
3793	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3794		if (fixup->file) {
3795			fput(fixup->file);
3796		} else if (ret) {
3797			u32 fd;
3798			int err;
3799
3800			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3801							    t->buffer,
3802							    fixup->offset,
3803							    sizeof(fd));
3804			WARN_ON(err);
3805			if (!err)
3806				binder_deferred_fd_close(fd);
3807		}
3808		list_del(&fixup->fixup_entry);
3809		kfree(fixup);
3810	}
3811
3812	return ret;
3813}
3814
3815static int binder_thread_read(struct binder_proc *proc,
3816			      struct binder_thread *thread,
3817			      binder_uintptr_t binder_buffer, size_t size,
3818			      binder_size_t *consumed, int non_block)
3819{
3820	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3821	void __user *ptr = buffer + *consumed;
3822	void __user *end = buffer + size;
3823
3824	int ret = 0;
3825	int wait_for_proc_work;
3826
3827	if (*consumed == 0) {
3828		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3829			return -EFAULT;
3830		ptr += sizeof(uint32_t);
3831	}
3832
3833retry:
3834	binder_inner_proc_lock(proc);
3835	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3836	binder_inner_proc_unlock(proc);
3837
3838	thread->looper |= BINDER_LOOPER_STATE_WAITING;
3839
3840	trace_binder_wait_for_work(wait_for_proc_work,
3841				   !!thread->transaction_stack,
3842				   !binder_worklist_empty(proc, &thread->todo));
3843	if (wait_for_proc_work) {
3844		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3845					BINDER_LOOPER_STATE_ENTERED))) {
3846			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3847				proc->pid, thread->pid, thread->looper);
3848			wait_event_interruptible(binder_user_error_wait,
3849						 binder_stop_on_user_error < 2);
3850		}
3851		binder_set_nice(proc->default_priority);
3852	}
3853
3854	if (non_block) {
3855		if (!binder_has_work(thread, wait_for_proc_work))
3856			ret = -EAGAIN;
3857	} else {
3858		ret = binder_wait_for_work(thread, wait_for_proc_work);
3859	}
3860
3861	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3862
3863	if (ret)
3864		return ret;
3865
3866	while (1) {
3867		uint32_t cmd;
3868		struct binder_transaction_data_secctx tr;
3869		struct binder_transaction_data *trd = &tr.transaction_data;
3870		struct binder_work *w = NULL;
3871		struct list_head *list = NULL;
3872		struct binder_transaction *t = NULL;
3873		struct binder_thread *t_from;
3874		size_t trsize = sizeof(*trd);
3875
3876		binder_inner_proc_lock(proc);
3877		if (!binder_worklist_empty_ilocked(&thread->todo))
3878			list = &thread->todo;
3879		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3880			   wait_for_proc_work)
3881			list = &proc->todo;
3882		else {
3883			binder_inner_proc_unlock(proc);
3884
3885			/* no data added */
3886			if (ptr - buffer == 4 && !thread->looper_need_return)
3887				goto retry;
3888			break;
3889		}
3890
3891		if (end - ptr < sizeof(tr) + 4) {
3892			binder_inner_proc_unlock(proc);
3893			break;
3894		}
3895		w = binder_dequeue_work_head_ilocked(list);
3896		if (binder_worklist_empty_ilocked(&thread->todo))
3897			thread->process_todo = false;
3898
3899		switch (w->type) {
3900		case BINDER_WORK_TRANSACTION: {
3901			binder_inner_proc_unlock(proc);
3902			t = container_of(w, struct binder_transaction, work);
3903		} break;
3904		case BINDER_WORK_RETURN_ERROR: {
3905			struct binder_error *e = container_of(
3906					w, struct binder_error, work);
3907
3908			WARN_ON(e->cmd == BR_OK);
3909			binder_inner_proc_unlock(proc);
3910			if (put_user(e->cmd, (uint32_t __user *)ptr))
3911				return -EFAULT;
3912			cmd = e->cmd;
3913			e->cmd = BR_OK;
3914			ptr += sizeof(uint32_t);
3915
3916			binder_stat_br(proc, thread, cmd);
3917		} break;
3918		case BINDER_WORK_TRANSACTION_COMPLETE:
3919		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
3920			if (proc->oneway_spam_detection_enabled &&
3921				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
3922				cmd = BR_ONEWAY_SPAM_SUSPECT;
3923			else
3924				cmd = BR_TRANSACTION_COMPLETE;
3925			binder_inner_proc_unlock(proc);
 
3926			kfree(w);
3927			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3928			if (put_user(cmd, (uint32_t __user *)ptr))
3929				return -EFAULT;
3930			ptr += sizeof(uint32_t);
3931
3932			binder_stat_br(proc, thread, cmd);
3933			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3934				     "%d:%d BR_TRANSACTION_COMPLETE\n",
3935				     proc->pid, thread->pid);
3936		} break;
3937		case BINDER_WORK_NODE: {
3938			struct binder_node *node = container_of(w, struct binder_node, work);
3939			int strong, weak;
3940			binder_uintptr_t node_ptr = node->ptr;
3941			binder_uintptr_t node_cookie = node->cookie;
3942			int node_debug_id = node->debug_id;
3943			int has_weak_ref;
3944			int has_strong_ref;
3945			void __user *orig_ptr = ptr;
3946
3947			BUG_ON(proc != node->proc);
3948			strong = node->internal_strong_refs ||
3949					node->local_strong_refs;
3950			weak = !hlist_empty(&node->refs) ||
3951					node->local_weak_refs ||
3952					node->tmp_refs || strong;
3953			has_strong_ref = node->has_strong_ref;
3954			has_weak_ref = node->has_weak_ref;
3955
3956			if (weak && !has_weak_ref) {
3957				node->has_weak_ref = 1;
3958				node->pending_weak_ref = 1;
3959				node->local_weak_refs++;
3960			}
3961			if (strong && !has_strong_ref) {
3962				node->has_strong_ref = 1;
3963				node->pending_strong_ref = 1;
3964				node->local_strong_refs++;
3965			}
3966			if (!strong && has_strong_ref)
3967				node->has_strong_ref = 0;
3968			if (!weak && has_weak_ref)
3969				node->has_weak_ref = 0;
3970			if (!weak && !strong) {
3971				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3972					     "%d:%d node %d u%016llx c%016llx deleted\n",
3973					     proc->pid, thread->pid,
3974					     node_debug_id,
3975					     (u64)node_ptr,
3976					     (u64)node_cookie);
3977				rb_erase(&node->rb_node, &proc->nodes);
3978				binder_inner_proc_unlock(proc);
3979				binder_node_lock(node);
3980				/*
3981				 * Acquire the node lock before freeing the
3982				 * node to serialize with other threads that
3983				 * may have been holding the node lock while
3984				 * decrementing this node (avoids race where
3985				 * this thread frees while the other thread
3986				 * is unlocking the node after the final
3987				 * decrement)
3988				 */
3989				binder_node_unlock(node);
3990				binder_free_node(node);
3991			} else
3992				binder_inner_proc_unlock(proc);
3993
3994			if (weak && !has_weak_ref)
3995				ret = binder_put_node_cmd(
3996						proc, thread, &ptr, node_ptr,
3997						node_cookie, node_debug_id,
3998						BR_INCREFS, "BR_INCREFS");
3999			if (!ret && strong && !has_strong_ref)
4000				ret = binder_put_node_cmd(
4001						proc, thread, &ptr, node_ptr,
4002						node_cookie, node_debug_id,
4003						BR_ACQUIRE, "BR_ACQUIRE");
4004			if (!ret && !strong && has_strong_ref)
4005				ret = binder_put_node_cmd(
4006						proc, thread, &ptr, node_ptr,
4007						node_cookie, node_debug_id,
4008						BR_RELEASE, "BR_RELEASE");
4009			if (!ret && !weak && has_weak_ref)
4010				ret = binder_put_node_cmd(
4011						proc, thread, &ptr, node_ptr,
4012						node_cookie, node_debug_id,
4013						BR_DECREFS, "BR_DECREFS");
4014			if (orig_ptr == ptr)
4015				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4016					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4017					     proc->pid, thread->pid,
4018					     node_debug_id,
4019					     (u64)node_ptr,
4020					     (u64)node_cookie);
4021			if (ret)
4022				return ret;
4023		} break;
4024		case BINDER_WORK_DEAD_BINDER:
4025		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4026		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4027			struct binder_ref_death *death;
4028			uint32_t cmd;
4029			binder_uintptr_t cookie;
4030
4031			death = container_of(w, struct binder_ref_death, work);
4032			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4033				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4034			else
4035				cmd = BR_DEAD_BINDER;
4036			cookie = death->cookie;
4037
4038			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4039				     "%d:%d %s %016llx\n",
4040				      proc->pid, thread->pid,
4041				      cmd == BR_DEAD_BINDER ?
4042				      "BR_DEAD_BINDER" :
4043				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4044				      (u64)cookie);
4045			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4046				binder_inner_proc_unlock(proc);
4047				kfree(death);
4048				binder_stats_deleted(BINDER_STAT_DEATH);
4049			} else {
4050				binder_enqueue_work_ilocked(
4051						w, &proc->delivered_death);
4052				binder_inner_proc_unlock(proc);
4053			}
4054			if (put_user(cmd, (uint32_t __user *)ptr))
4055				return -EFAULT;
4056			ptr += sizeof(uint32_t);
4057			if (put_user(cookie,
4058				     (binder_uintptr_t __user *)ptr))
4059				return -EFAULT;
4060			ptr += sizeof(binder_uintptr_t);
4061			binder_stat_br(proc, thread, cmd);
4062			if (cmd == BR_DEAD_BINDER)
4063				goto done; /* DEAD_BINDER notifications can cause transactions */
4064		} break;
4065		default:
4066			binder_inner_proc_unlock(proc);
4067			pr_err("%d:%d: bad work type %d\n",
4068			       proc->pid, thread->pid, w->type);
4069			break;
4070		}
4071
4072		if (!t)
4073			continue;
4074
4075		BUG_ON(t->buffer == NULL);
4076		if (t->buffer->target_node) {
4077			struct binder_node *target_node = t->buffer->target_node;
4078
4079			trd->target.ptr = target_node->ptr;
4080			trd->cookie =  target_node->cookie;
4081			t->saved_priority = task_nice(current);
4082			if (t->priority < target_node->min_priority &&
4083			    !(t->flags & TF_ONE_WAY))
4084				binder_set_nice(t->priority);
4085			else if (!(t->flags & TF_ONE_WAY) ||
4086				 t->saved_priority > target_node->min_priority)
4087				binder_set_nice(target_node->min_priority);
4088			cmd = BR_TRANSACTION;
4089		} else {
4090			trd->target.ptr = 0;
4091			trd->cookie = 0;
4092			cmd = BR_REPLY;
4093		}
4094		trd->code = t->code;
4095		trd->flags = t->flags;
4096		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4097
4098		t_from = binder_get_txn_from(t);
4099		if (t_from) {
4100			struct task_struct *sender = t_from->proc->tsk;
4101
4102			trd->sender_pid =
4103				task_tgid_nr_ns(sender,
4104						task_active_pid_ns(current));
4105		} else {
4106			trd->sender_pid = 0;
4107		}
4108
4109		ret = binder_apply_fd_fixups(proc, t);
4110		if (ret) {
4111			struct binder_buffer *buffer = t->buffer;
4112			bool oneway = !!(t->flags & TF_ONE_WAY);
4113			int tid = t->debug_id;
4114
4115			if (t_from)
4116				binder_thread_dec_tmpref(t_from);
4117			buffer->transaction = NULL;
4118			binder_cleanup_transaction(t, "fd fixups failed",
4119						   BR_FAILED_REPLY);
4120			binder_free_buf(proc, thread, buffer);
4121			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4122				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4123				     proc->pid, thread->pid,
4124				     oneway ? "async " :
4125					(cmd == BR_REPLY ? "reply " : ""),
4126				     tid, BR_FAILED_REPLY, ret, __LINE__);
4127			if (cmd == BR_REPLY) {
4128				cmd = BR_FAILED_REPLY;
4129				if (put_user(cmd, (uint32_t __user *)ptr))
4130					return -EFAULT;
4131				ptr += sizeof(uint32_t);
4132				binder_stat_br(proc, thread, cmd);
4133				break;
4134			}
4135			continue;
4136		}
4137		trd->data_size = t->buffer->data_size;
4138		trd->offsets_size = t->buffer->offsets_size;
4139		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4140		trd->data.ptr.offsets = trd->data.ptr.buffer +
4141					ALIGN(t->buffer->data_size,
4142					    sizeof(void *));
4143
4144		tr.secctx = t->security_ctx;
4145		if (t->security_ctx) {
4146			cmd = BR_TRANSACTION_SEC_CTX;
4147			trsize = sizeof(tr);
4148		}
4149		if (put_user(cmd, (uint32_t __user *)ptr)) {
4150			if (t_from)
4151				binder_thread_dec_tmpref(t_from);
4152
4153			binder_cleanup_transaction(t, "put_user failed",
4154						   BR_FAILED_REPLY);
4155
4156			return -EFAULT;
4157		}
4158		ptr += sizeof(uint32_t);
4159		if (copy_to_user(ptr, &tr, trsize)) {
4160			if (t_from)
4161				binder_thread_dec_tmpref(t_from);
4162
4163			binder_cleanup_transaction(t, "copy_to_user failed",
4164						   BR_FAILED_REPLY);
4165
4166			return -EFAULT;
4167		}
4168		ptr += trsize;
4169
4170		trace_binder_transaction_received(t);
4171		binder_stat_br(proc, thread, cmd);
4172		binder_debug(BINDER_DEBUG_TRANSACTION,
4173			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4174			     proc->pid, thread->pid,
4175			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4176				(cmd == BR_TRANSACTION_SEC_CTX) ?
4177				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4178			     t->debug_id, t_from ? t_from->proc->pid : 0,
4179			     t_from ? t_from->pid : 0, cmd,
4180			     t->buffer->data_size, t->buffer->offsets_size,
4181			     (u64)trd->data.ptr.buffer,
4182			     (u64)trd->data.ptr.offsets);
4183
4184		if (t_from)
4185			binder_thread_dec_tmpref(t_from);
4186		t->buffer->allow_user_free = 1;
4187		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4188			binder_inner_proc_lock(thread->proc);
4189			t->to_parent = thread->transaction_stack;
4190			t->to_thread = thread;
4191			thread->transaction_stack = t;
4192			binder_inner_proc_unlock(thread->proc);
4193		} else {
4194			binder_free_transaction(t);
4195		}
4196		break;
4197	}
4198
4199done:
4200
4201	*consumed = ptr - buffer;
4202	binder_inner_proc_lock(proc);
4203	if (proc->requested_threads == 0 &&
4204	    list_empty(&thread->proc->waiting_threads) &&
4205	    proc->requested_threads_started < proc->max_threads &&
4206	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4207	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4208	     /*spawn a new thread if we leave this out */) {
4209		proc->requested_threads++;
4210		binder_inner_proc_unlock(proc);
4211		binder_debug(BINDER_DEBUG_THREADS,
4212			     "%d:%d BR_SPAWN_LOOPER\n",
4213			     proc->pid, thread->pid);
4214		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4215			return -EFAULT;
4216		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4217	} else
4218		binder_inner_proc_unlock(proc);
4219	return 0;
4220}
4221
4222static void binder_release_work(struct binder_proc *proc,
4223				struct list_head *list)
4224{
4225	struct binder_work *w;
4226	enum binder_work_type wtype;
4227
4228	while (1) {
4229		binder_inner_proc_lock(proc);
4230		w = binder_dequeue_work_head_ilocked(list);
4231		wtype = w ? w->type : 0;
4232		binder_inner_proc_unlock(proc);
4233		if (!w)
4234			return;
4235
4236		switch (wtype) {
4237		case BINDER_WORK_TRANSACTION: {
4238			struct binder_transaction *t;
4239
4240			t = container_of(w, struct binder_transaction, work);
4241
4242			binder_cleanup_transaction(t, "process died.",
4243						   BR_DEAD_REPLY);
4244		} break;
4245		case BINDER_WORK_RETURN_ERROR: {
4246			struct binder_error *e = container_of(
4247					w, struct binder_error, work);
4248
4249			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4250				"undelivered TRANSACTION_ERROR: %u\n",
4251				e->cmd);
4252		} break;
4253		case BINDER_WORK_TRANSACTION_COMPLETE: {
4254			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4255				"undelivered TRANSACTION_COMPLETE\n");
4256			kfree(w);
4257			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4258		} break;
4259		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4260		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4261			struct binder_ref_death *death;
4262
4263			death = container_of(w, struct binder_ref_death, work);
4264			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4265				"undelivered death notification, %016llx\n",
4266				(u64)death->cookie);
4267			kfree(death);
4268			binder_stats_deleted(BINDER_STAT_DEATH);
4269		} break;
4270		case BINDER_WORK_NODE:
4271			break;
4272		default:
4273			pr_err("unexpected work type, %d, not freed\n",
4274			       wtype);
4275			break;
4276		}
4277	}
4278
4279}
4280
4281static struct binder_thread *binder_get_thread_ilocked(
4282		struct binder_proc *proc, struct binder_thread *new_thread)
4283{
4284	struct binder_thread *thread = NULL;
4285	struct rb_node *parent = NULL;
4286	struct rb_node **p = &proc->threads.rb_node;
4287
4288	while (*p) {
4289		parent = *p;
4290		thread = rb_entry(parent, struct binder_thread, rb_node);
4291
4292		if (current->pid < thread->pid)
4293			p = &(*p)->rb_left;
4294		else if (current->pid > thread->pid)
4295			p = &(*p)->rb_right;
4296		else
4297			return thread;
4298	}
4299	if (!new_thread)
4300		return NULL;
4301	thread = new_thread;
4302	binder_stats_created(BINDER_STAT_THREAD);
4303	thread->proc = proc;
4304	thread->pid = current->pid;
4305	atomic_set(&thread->tmp_ref, 0);
4306	init_waitqueue_head(&thread->wait);
4307	INIT_LIST_HEAD(&thread->todo);
4308	rb_link_node(&thread->rb_node, parent, p);
4309	rb_insert_color(&thread->rb_node, &proc->threads);
4310	thread->looper_need_return = true;
4311	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4312	thread->return_error.cmd = BR_OK;
4313	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4314	thread->reply_error.cmd = BR_OK;
4315	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4316	return thread;
4317}
4318
4319static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4320{
4321	struct binder_thread *thread;
4322	struct binder_thread *new_thread;
4323
4324	binder_inner_proc_lock(proc);
4325	thread = binder_get_thread_ilocked(proc, NULL);
4326	binder_inner_proc_unlock(proc);
4327	if (!thread) {
4328		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4329		if (new_thread == NULL)
4330			return NULL;
4331		binder_inner_proc_lock(proc);
4332		thread = binder_get_thread_ilocked(proc, new_thread);
4333		binder_inner_proc_unlock(proc);
4334		if (thread != new_thread)
4335			kfree(new_thread);
4336	}
4337	return thread;
4338}
4339
4340static void binder_free_proc(struct binder_proc *proc)
4341{
4342	struct binder_device *device;
4343
4344	BUG_ON(!list_empty(&proc->todo));
4345	BUG_ON(!list_empty(&proc->delivered_death));
4346	if (proc->outstanding_txns)
4347		pr_warn("%s: Unexpected outstanding_txns %d\n",
4348			__func__, proc->outstanding_txns);
4349	device = container_of(proc->context, struct binder_device, context);
4350	if (refcount_dec_and_test(&device->ref)) {
4351		kfree(proc->context->name);
4352		kfree(device);
4353	}
4354	binder_alloc_deferred_release(&proc->alloc);
4355	put_task_struct(proc->tsk);
4356	binder_stats_deleted(BINDER_STAT_PROC);
4357	kfree(proc);
4358}
4359
4360static void binder_free_thread(struct binder_thread *thread)
4361{
4362	BUG_ON(!list_empty(&thread->todo));
4363	binder_stats_deleted(BINDER_STAT_THREAD);
4364	binder_proc_dec_tmpref(thread->proc);
4365	kfree(thread);
4366}
4367
4368static int binder_thread_release(struct binder_proc *proc,
4369				 struct binder_thread *thread)
4370{
4371	struct binder_transaction *t;
4372	struct binder_transaction *send_reply = NULL;
4373	int active_transactions = 0;
4374	struct binder_transaction *last_t = NULL;
4375
4376	binder_inner_proc_lock(thread->proc);
4377	/*
4378	 * take a ref on the proc so it survives
4379	 * after we remove this thread from proc->threads.
4380	 * The corresponding dec is when we actually
4381	 * free the thread in binder_free_thread()
4382	 */
4383	proc->tmp_ref++;
4384	/*
4385	 * take a ref on this thread to ensure it
4386	 * survives while we are releasing it
4387	 */
4388	atomic_inc(&thread->tmp_ref);
4389	rb_erase(&thread->rb_node, &proc->threads);
4390	t = thread->transaction_stack;
4391	if (t) {
4392		spin_lock(&t->lock);
4393		if (t->to_thread == thread)
4394			send_reply = t;
4395	} else {
4396		__acquire(&t->lock);
4397	}
4398	thread->is_dead = true;
4399
4400	while (t) {
4401		last_t = t;
4402		active_transactions++;
4403		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4404			     "release %d:%d transaction %d %s, still active\n",
4405			      proc->pid, thread->pid,
4406			     t->debug_id,
4407			     (t->to_thread == thread) ? "in" : "out");
4408
4409		if (t->to_thread == thread) {
4410			thread->proc->outstanding_txns--;
4411			t->to_proc = NULL;
4412			t->to_thread = NULL;
4413			if (t->buffer) {
4414				t->buffer->transaction = NULL;
4415				t->buffer = NULL;
4416			}
4417			t = t->to_parent;
4418		} else if (t->from == thread) {
4419			t->from = NULL;
4420			t = t->from_parent;
4421		} else
4422			BUG();
4423		spin_unlock(&last_t->lock);
4424		if (t)
4425			spin_lock(&t->lock);
4426		else
4427			__acquire(&t->lock);
4428	}
4429	/* annotation for sparse, lock not acquired in last iteration above */
4430	__release(&t->lock);
4431
4432	/*
4433	 * If this thread used poll, make sure we remove the waitqueue
4434	 * from any epoll data structures holding it with POLLFREE.
4435	 * waitqueue_active() is safe to use here because we're holding
4436	 * the inner lock.
4437	 */
4438	if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4439	    waitqueue_active(&thread->wait)) {
4440		wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4441	}
4442
4443	binder_inner_proc_unlock(thread->proc);
4444
4445	/*
4446	 * This is needed to avoid races between wake_up_poll() above and
4447	 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4448	 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4449	 * lock, so we can be sure it's done after calling synchronize_rcu().
4450	 */
4451	if (thread->looper & BINDER_LOOPER_STATE_POLL)
4452		synchronize_rcu();
4453
4454	if (send_reply)
4455		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4456	binder_release_work(proc, &thread->todo);
4457	binder_thread_dec_tmpref(thread);
4458	return active_transactions;
4459}
4460
4461static __poll_t binder_poll(struct file *filp,
4462				struct poll_table_struct *wait)
4463{
4464	struct binder_proc *proc = filp->private_data;
4465	struct binder_thread *thread = NULL;
4466	bool wait_for_proc_work;
4467
4468	thread = binder_get_thread(proc);
4469	if (!thread)
4470		return POLLERR;
4471
4472	binder_inner_proc_lock(thread->proc);
4473	thread->looper |= BINDER_LOOPER_STATE_POLL;
4474	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4475
4476	binder_inner_proc_unlock(thread->proc);
4477
4478	poll_wait(filp, &thread->wait, wait);
4479
4480	if (binder_has_work(thread, wait_for_proc_work))
4481		return EPOLLIN;
4482
4483	return 0;
4484}
4485
4486static int binder_ioctl_write_read(struct file *filp,
4487				unsigned int cmd, unsigned long arg,
4488				struct binder_thread *thread)
4489{
4490	int ret = 0;
4491	struct binder_proc *proc = filp->private_data;
4492	unsigned int size = _IOC_SIZE(cmd);
4493	void __user *ubuf = (void __user *)arg;
4494	struct binder_write_read bwr;
4495
4496	if (size != sizeof(struct binder_write_read)) {
4497		ret = -EINVAL;
4498		goto out;
4499	}
4500	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4501		ret = -EFAULT;
4502		goto out;
4503	}
4504	binder_debug(BINDER_DEBUG_READ_WRITE,
4505		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4506		     proc->pid, thread->pid,
4507		     (u64)bwr.write_size, (u64)bwr.write_buffer,
4508		     (u64)bwr.read_size, (u64)bwr.read_buffer);
4509
4510	if (bwr.write_size > 0) {
4511		ret = binder_thread_write(proc, thread,
4512					  bwr.write_buffer,
4513					  bwr.write_size,
4514					  &bwr.write_consumed);
4515		trace_binder_write_done(ret);
4516		if (ret < 0) {
4517			bwr.read_consumed = 0;
4518			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4519				ret = -EFAULT;
4520			goto out;
4521		}
4522	}
4523	if (bwr.read_size > 0) {
4524		ret = binder_thread_read(proc, thread, bwr.read_buffer,
4525					 bwr.read_size,
4526					 &bwr.read_consumed,
4527					 filp->f_flags & O_NONBLOCK);
4528		trace_binder_read_done(ret);
4529		binder_inner_proc_lock(proc);
4530		if (!binder_worklist_empty_ilocked(&proc->todo))
4531			binder_wakeup_proc_ilocked(proc);
4532		binder_inner_proc_unlock(proc);
4533		if (ret < 0) {
4534			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4535				ret = -EFAULT;
4536			goto out;
4537		}
4538	}
4539	binder_debug(BINDER_DEBUG_READ_WRITE,
4540		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4541		     proc->pid, thread->pid,
4542		     (u64)bwr.write_consumed, (u64)bwr.write_size,
4543		     (u64)bwr.read_consumed, (u64)bwr.read_size);
4544	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4545		ret = -EFAULT;
4546		goto out;
4547	}
4548out:
4549	return ret;
4550}
4551
4552static int binder_ioctl_set_ctx_mgr(struct file *filp,
4553				    struct flat_binder_object *fbo)
4554{
4555	int ret = 0;
4556	struct binder_proc *proc = filp->private_data;
4557	struct binder_context *context = proc->context;
4558	struct binder_node *new_node;
4559	kuid_t curr_euid = current_euid();
4560
4561	mutex_lock(&context->context_mgr_node_lock);
4562	if (context->binder_context_mgr_node) {
4563		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4564		ret = -EBUSY;
4565		goto out;
4566	}
4567	ret = security_binder_set_context_mgr(proc->tsk);
4568	if (ret < 0)
4569		goto out;
4570	if (uid_valid(context->binder_context_mgr_uid)) {
4571		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4572			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4573			       from_kuid(&init_user_ns, curr_euid),
4574			       from_kuid(&init_user_ns,
4575					 context->binder_context_mgr_uid));
4576			ret = -EPERM;
4577			goto out;
4578		}
4579	} else {
4580		context->binder_context_mgr_uid = curr_euid;
4581	}
4582	new_node = binder_new_node(proc, fbo);
4583	if (!new_node) {
4584		ret = -ENOMEM;
4585		goto out;
4586	}
4587	binder_node_lock(new_node);
4588	new_node->local_weak_refs++;
4589	new_node->local_strong_refs++;
4590	new_node->has_strong_ref = 1;
4591	new_node->has_weak_ref = 1;
4592	context->binder_context_mgr_node = new_node;
4593	binder_node_unlock(new_node);
4594	binder_put_node(new_node);
4595out:
4596	mutex_unlock(&context->context_mgr_node_lock);
4597	return ret;
4598}
4599
4600static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4601		struct binder_node_info_for_ref *info)
4602{
4603	struct binder_node *node;
4604	struct binder_context *context = proc->context;
4605	__u32 handle = info->handle;
4606
4607	if (info->strong_count || info->weak_count || info->reserved1 ||
4608	    info->reserved2 || info->reserved3) {
4609		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4610				  proc->pid);
4611		return -EINVAL;
4612	}
4613
4614	/* This ioctl may only be used by the context manager */
4615	mutex_lock(&context->context_mgr_node_lock);
4616	if (!context->binder_context_mgr_node ||
4617		context->binder_context_mgr_node->proc != proc) {
4618		mutex_unlock(&context->context_mgr_node_lock);
4619		return -EPERM;
4620	}
4621	mutex_unlock(&context->context_mgr_node_lock);
4622
4623	node = binder_get_node_from_ref(proc, handle, true, NULL);
4624	if (!node)
4625		return -EINVAL;
4626
4627	info->strong_count = node->local_strong_refs +
4628		node->internal_strong_refs;
4629	info->weak_count = node->local_weak_refs;
4630
4631	binder_put_node(node);
4632
4633	return 0;
4634}
4635
4636static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4637				struct binder_node_debug_info *info)
4638{
4639	struct rb_node *n;
4640	binder_uintptr_t ptr = info->ptr;
4641
4642	memset(info, 0, sizeof(*info));
4643
4644	binder_inner_proc_lock(proc);
4645	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4646		struct binder_node *node = rb_entry(n, struct binder_node,
4647						    rb_node);
4648		if (node->ptr > ptr) {
4649			info->ptr = node->ptr;
4650			info->cookie = node->cookie;
4651			info->has_strong_ref = node->has_strong_ref;
4652			info->has_weak_ref = node->has_weak_ref;
4653			break;
4654		}
4655	}
4656	binder_inner_proc_unlock(proc);
4657
4658	return 0;
4659}
4660
4661static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4662{
4663	struct rb_node *n;
4664	struct binder_thread *thread;
4665
4666	if (proc->outstanding_txns > 0)
4667		return true;
4668
4669	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4670		thread = rb_entry(n, struct binder_thread, rb_node);
4671		if (thread->transaction_stack)
4672			return true;
4673	}
4674	return false;
4675}
4676
4677static int binder_ioctl_freeze(struct binder_freeze_info *info,
4678			       struct binder_proc *target_proc)
4679{
4680	int ret = 0;
4681
4682	if (!info->enable) {
4683		binder_inner_proc_lock(target_proc);
4684		target_proc->sync_recv = false;
4685		target_proc->async_recv = false;
4686		target_proc->is_frozen = false;
4687		binder_inner_proc_unlock(target_proc);
4688		return 0;
4689	}
4690
4691	/*
4692	 * Freezing the target. Prevent new transactions by
4693	 * setting frozen state. If timeout specified, wait
4694	 * for transactions to drain.
4695	 */
4696	binder_inner_proc_lock(target_proc);
4697	target_proc->sync_recv = false;
4698	target_proc->async_recv = false;
4699	target_proc->is_frozen = true;
4700	binder_inner_proc_unlock(target_proc);
4701
4702	if (info->timeout_ms > 0)
4703		ret = wait_event_interruptible_timeout(
4704			target_proc->freeze_wait,
4705			(!target_proc->outstanding_txns),
4706			msecs_to_jiffies(info->timeout_ms));
4707
4708	/* Check pending transactions that wait for reply */
4709	if (ret >= 0) {
4710		binder_inner_proc_lock(target_proc);
4711		if (binder_txns_pending_ilocked(target_proc))
4712			ret = -EAGAIN;
4713		binder_inner_proc_unlock(target_proc);
4714	}
4715
4716	if (ret < 0) {
4717		binder_inner_proc_lock(target_proc);
4718		target_proc->is_frozen = false;
4719		binder_inner_proc_unlock(target_proc);
4720	}
4721
4722	return ret;
4723}
4724
4725static int binder_ioctl_get_freezer_info(
4726				struct binder_frozen_status_info *info)
4727{
4728	struct binder_proc *target_proc;
4729	bool found = false;
4730	__u32 txns_pending;
4731
4732	info->sync_recv = 0;
4733	info->async_recv = 0;
4734
4735	mutex_lock(&binder_procs_lock);
4736	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4737		if (target_proc->pid == info->pid) {
4738			found = true;
4739			binder_inner_proc_lock(target_proc);
4740			txns_pending = binder_txns_pending_ilocked(target_proc);
4741			info->sync_recv |= target_proc->sync_recv |
4742					(txns_pending << 1);
4743			info->async_recv |= target_proc->async_recv;
4744			binder_inner_proc_unlock(target_proc);
4745		}
4746	}
4747	mutex_unlock(&binder_procs_lock);
4748
4749	if (!found)
4750		return -EINVAL;
4751
4752	return 0;
4753}
4754
4755static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4756{
4757	int ret;
4758	struct binder_proc *proc = filp->private_data;
4759	struct binder_thread *thread;
4760	unsigned int size = _IOC_SIZE(cmd);
4761	void __user *ubuf = (void __user *)arg;
4762
4763	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
4764			proc->pid, current->pid, cmd, arg);*/
4765
4766	binder_selftest_alloc(&proc->alloc);
4767
4768	trace_binder_ioctl(cmd, arg);
4769
4770	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4771	if (ret)
4772		goto err_unlocked;
4773
4774	thread = binder_get_thread(proc);
4775	if (thread == NULL) {
4776		ret = -ENOMEM;
4777		goto err;
4778	}
4779
4780	switch (cmd) {
4781	case BINDER_WRITE_READ:
4782		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4783		if (ret)
4784			goto err;
4785		break;
4786	case BINDER_SET_MAX_THREADS: {
4787		int max_threads;
4788
4789		if (copy_from_user(&max_threads, ubuf,
4790				   sizeof(max_threads))) {
4791			ret = -EINVAL;
4792			goto err;
4793		}
4794		binder_inner_proc_lock(proc);
4795		proc->max_threads = max_threads;
4796		binder_inner_proc_unlock(proc);
4797		break;
4798	}
4799	case BINDER_SET_CONTEXT_MGR_EXT: {
4800		struct flat_binder_object fbo;
4801
4802		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4803			ret = -EINVAL;
4804			goto err;
4805		}
4806		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4807		if (ret)
4808			goto err;
4809		break;
4810	}
4811	case BINDER_SET_CONTEXT_MGR:
4812		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4813		if (ret)
4814			goto err;
4815		break;
4816	case BINDER_THREAD_EXIT:
4817		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4818			     proc->pid, thread->pid);
4819		binder_thread_release(proc, thread);
4820		thread = NULL;
4821		break;
4822	case BINDER_VERSION: {
4823		struct binder_version __user *ver = ubuf;
4824
4825		if (size != sizeof(struct binder_version)) {
4826			ret = -EINVAL;
4827			goto err;
4828		}
4829		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4830			     &ver->protocol_version)) {
4831			ret = -EINVAL;
4832			goto err;
4833		}
4834		break;
4835	}
4836	case BINDER_GET_NODE_INFO_FOR_REF: {
4837		struct binder_node_info_for_ref info;
4838
4839		if (copy_from_user(&info, ubuf, sizeof(info))) {
4840			ret = -EFAULT;
4841			goto err;
4842		}
4843
4844		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4845		if (ret < 0)
4846			goto err;
4847
4848		if (copy_to_user(ubuf, &info, sizeof(info))) {
4849			ret = -EFAULT;
4850			goto err;
4851		}
4852
4853		break;
4854	}
4855	case BINDER_GET_NODE_DEBUG_INFO: {
4856		struct binder_node_debug_info info;
4857
4858		if (copy_from_user(&info, ubuf, sizeof(info))) {
4859			ret = -EFAULT;
4860			goto err;
4861		}
4862
4863		ret = binder_ioctl_get_node_debug_info(proc, &info);
4864		if (ret < 0)
4865			goto err;
4866
4867		if (copy_to_user(ubuf, &info, sizeof(info))) {
4868			ret = -EFAULT;
4869			goto err;
4870		}
4871		break;
4872	}
4873	case BINDER_FREEZE: {
4874		struct binder_freeze_info info;
4875		struct binder_proc **target_procs = NULL, *target_proc;
4876		int target_procs_count = 0, i = 0;
4877
4878		ret = 0;
4879
4880		if (copy_from_user(&info, ubuf, sizeof(info))) {
4881			ret = -EFAULT;
4882			goto err;
4883		}
4884
4885		mutex_lock(&binder_procs_lock);
4886		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4887			if (target_proc->pid == info.pid)
4888				target_procs_count++;
4889		}
4890
4891		if (target_procs_count == 0) {
4892			mutex_unlock(&binder_procs_lock);
4893			ret = -EINVAL;
4894			goto err;
4895		}
4896
4897		target_procs = kcalloc(target_procs_count,
4898				       sizeof(struct binder_proc *),
4899				       GFP_KERNEL);
4900
4901		if (!target_procs) {
4902			mutex_unlock(&binder_procs_lock);
4903			ret = -ENOMEM;
4904			goto err;
4905		}
4906
4907		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4908			if (target_proc->pid != info.pid)
4909				continue;
4910
4911			binder_inner_proc_lock(target_proc);
4912			target_proc->tmp_ref++;
4913			binder_inner_proc_unlock(target_proc);
4914
4915			target_procs[i++] = target_proc;
4916		}
4917		mutex_unlock(&binder_procs_lock);
4918
4919		for (i = 0; i < target_procs_count; i++) {
4920			if (ret >= 0)
4921				ret = binder_ioctl_freeze(&info,
4922							  target_procs[i]);
4923
4924			binder_proc_dec_tmpref(target_procs[i]);
4925		}
4926
4927		kfree(target_procs);
4928
4929		if (ret < 0)
4930			goto err;
4931		break;
4932	}
4933	case BINDER_GET_FROZEN_INFO: {
4934		struct binder_frozen_status_info info;
4935
4936		if (copy_from_user(&info, ubuf, sizeof(info))) {
4937			ret = -EFAULT;
4938			goto err;
4939		}
4940
4941		ret = binder_ioctl_get_freezer_info(&info);
4942		if (ret < 0)
4943			goto err;
4944
4945		if (copy_to_user(ubuf, &info, sizeof(info))) {
4946			ret = -EFAULT;
4947			goto err;
4948		}
4949		break;
4950	}
4951	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
4952		uint32_t enable;
4953
4954		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
4955			ret = -EFAULT;
4956			goto err;
4957		}
4958		binder_inner_proc_lock(proc);
4959		proc->oneway_spam_detection_enabled = (bool)enable;
4960		binder_inner_proc_unlock(proc);
4961		break;
4962	}
4963	default:
4964		ret = -EINVAL;
4965		goto err;
4966	}
4967	ret = 0;
4968err:
4969	if (thread)
4970		thread->looper_need_return = false;
4971	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4972	if (ret && ret != -EINTR)
4973		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4974err_unlocked:
4975	trace_binder_ioctl_done(ret);
4976	return ret;
4977}
4978
4979static void binder_vma_open(struct vm_area_struct *vma)
4980{
4981	struct binder_proc *proc = vma->vm_private_data;
4982
4983	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4984		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4985		     proc->pid, vma->vm_start, vma->vm_end,
4986		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4987		     (unsigned long)pgprot_val(vma->vm_page_prot));
4988}
4989
4990static void binder_vma_close(struct vm_area_struct *vma)
4991{
4992	struct binder_proc *proc = vma->vm_private_data;
4993
4994	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4995		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4996		     proc->pid, vma->vm_start, vma->vm_end,
4997		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4998		     (unsigned long)pgprot_val(vma->vm_page_prot));
4999	binder_alloc_vma_close(&proc->alloc);
5000}
5001
5002static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5003{
5004	return VM_FAULT_SIGBUS;
5005}
5006
5007static const struct vm_operations_struct binder_vm_ops = {
5008	.open = binder_vma_open,
5009	.close = binder_vma_close,
5010	.fault = binder_vm_fault,
5011};
5012
5013static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5014{
 
5015	struct binder_proc *proc = filp->private_data;
 
5016
5017	if (proc->tsk != current->group_leader)
5018		return -EINVAL;
5019
5020	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5021		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5022		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5023		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5024		     (unsigned long)pgprot_val(vma->vm_page_prot));
5025
5026	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5027		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5028		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5029		return -EPERM;
5030	}
5031	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5032	vma->vm_flags &= ~VM_MAYWRITE;
5033
5034	vma->vm_ops = &binder_vm_ops;
5035	vma->vm_private_data = proc;
5036
5037	return binder_alloc_mmap_handler(&proc->alloc, vma);
 
 
 
 
 
 
 
 
5038}
5039
5040static int binder_open(struct inode *nodp, struct file *filp)
5041{
5042	struct binder_proc *proc, *itr;
5043	struct binder_device *binder_dev;
5044	struct binderfs_info *info;
5045	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5046	bool existing_pid = false;
5047
5048	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5049		     current->group_leader->pid, current->pid);
5050
5051	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5052	if (proc == NULL)
5053		return -ENOMEM;
5054	spin_lock_init(&proc->inner_lock);
5055	spin_lock_init(&proc->outer_lock);
5056	get_task_struct(current->group_leader);
5057	proc->tsk = current->group_leader;
5058	INIT_LIST_HEAD(&proc->todo);
5059	init_waitqueue_head(&proc->freeze_wait);
5060	proc->default_priority = task_nice(current);
5061	/* binderfs stashes devices in i_private */
5062	if (is_binderfs_device(nodp)) {
5063		binder_dev = nodp->i_private;
5064		info = nodp->i_sb->s_fs_info;
5065		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5066	} else {
5067		binder_dev = container_of(filp->private_data,
5068					  struct binder_device, miscdev);
5069	}
5070	refcount_inc(&binder_dev->ref);
5071	proc->context = &binder_dev->context;
5072	binder_alloc_init(&proc->alloc);
5073
5074	binder_stats_created(BINDER_STAT_PROC);
5075	proc->pid = current->group_leader->pid;
5076	INIT_LIST_HEAD(&proc->delivered_death);
5077	INIT_LIST_HEAD(&proc->waiting_threads);
5078	filp->private_data = proc;
5079
5080	mutex_lock(&binder_procs_lock);
5081	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5082		if (itr->pid == proc->pid) {
5083			existing_pid = true;
5084			break;
5085		}
5086	}
5087	hlist_add_head(&proc->proc_node, &binder_procs);
5088	mutex_unlock(&binder_procs_lock);
5089
5090	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5091		char strbuf[11];
5092
5093		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5094		/*
5095		 * proc debug entries are shared between contexts.
5096		 * Only create for the first PID to avoid debugfs log spamming
5097		 * The printing code will anyway print all contexts for a given
5098		 * PID so this is not a problem.
5099		 */
5100		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5101			binder_debugfs_dir_entry_proc,
5102			(void *)(unsigned long)proc->pid,
5103			&proc_fops);
5104	}
5105
5106	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5107		char strbuf[11];
5108		struct dentry *binderfs_entry;
5109
5110		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5111		/*
5112		 * Similar to debugfs, the process specific log file is shared
5113		 * between contexts. Only create for the first PID.
5114		 * This is ok since same as debugfs, the log file will contain
5115		 * information on all contexts of a given PID.
5116		 */
5117		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5118			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5119		if (!IS_ERR(binderfs_entry)) {
5120			proc->binderfs_entry = binderfs_entry;
5121		} else {
5122			int error;
5123
5124			error = PTR_ERR(binderfs_entry);
5125			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5126				strbuf, error);
5127		}
5128	}
5129
5130	return 0;
5131}
5132
5133static int binder_flush(struct file *filp, fl_owner_t id)
5134{
5135	struct binder_proc *proc = filp->private_data;
5136
5137	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5138
5139	return 0;
5140}
5141
5142static void binder_deferred_flush(struct binder_proc *proc)
5143{
5144	struct rb_node *n;
5145	int wake_count = 0;
5146
5147	binder_inner_proc_lock(proc);
5148	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5149		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5150
5151		thread->looper_need_return = true;
5152		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5153			wake_up_interruptible(&thread->wait);
5154			wake_count++;
5155		}
5156	}
5157	binder_inner_proc_unlock(proc);
5158
5159	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5160		     "binder_flush: %d woke %d threads\n", proc->pid,
5161		     wake_count);
5162}
5163
5164static int binder_release(struct inode *nodp, struct file *filp)
5165{
5166	struct binder_proc *proc = filp->private_data;
5167
5168	debugfs_remove(proc->debugfs_entry);
5169
5170	if (proc->binderfs_entry) {
5171		binderfs_remove_file(proc->binderfs_entry);
5172		proc->binderfs_entry = NULL;
5173	}
5174
5175	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5176
5177	return 0;
5178}
5179
5180static int binder_node_release(struct binder_node *node, int refs)
5181{
5182	struct binder_ref *ref;
5183	int death = 0;
5184	struct binder_proc *proc = node->proc;
5185
5186	binder_release_work(proc, &node->async_todo);
5187
5188	binder_node_lock(node);
5189	binder_inner_proc_lock(proc);
5190	binder_dequeue_work_ilocked(&node->work);
5191	/*
5192	 * The caller must have taken a temporary ref on the node,
5193	 */
5194	BUG_ON(!node->tmp_refs);
5195	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5196		binder_inner_proc_unlock(proc);
5197		binder_node_unlock(node);
5198		binder_free_node(node);
5199
5200		return refs;
5201	}
5202
5203	node->proc = NULL;
5204	node->local_strong_refs = 0;
5205	node->local_weak_refs = 0;
5206	binder_inner_proc_unlock(proc);
5207
5208	spin_lock(&binder_dead_nodes_lock);
5209	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5210	spin_unlock(&binder_dead_nodes_lock);
5211
5212	hlist_for_each_entry(ref, &node->refs, node_entry) {
5213		refs++;
5214		/*
5215		 * Need the node lock to synchronize
5216		 * with new notification requests and the
5217		 * inner lock to synchronize with queued
5218		 * death notifications.
5219		 */
5220		binder_inner_proc_lock(ref->proc);
5221		if (!ref->death) {
5222			binder_inner_proc_unlock(ref->proc);
5223			continue;
5224		}
5225
5226		death++;
5227
5228		BUG_ON(!list_empty(&ref->death->work.entry));
5229		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5230		binder_enqueue_work_ilocked(&ref->death->work,
5231					    &ref->proc->todo);
5232		binder_wakeup_proc_ilocked(ref->proc);
5233		binder_inner_proc_unlock(ref->proc);
5234	}
5235
5236	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5237		     "node %d now dead, refs %d, death %d\n",
5238		     node->debug_id, refs, death);
5239	binder_node_unlock(node);
5240	binder_put_node(node);
5241
5242	return refs;
5243}
5244
5245static void binder_deferred_release(struct binder_proc *proc)
5246{
5247	struct binder_context *context = proc->context;
5248	struct rb_node *n;
5249	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5250
5251	mutex_lock(&binder_procs_lock);
5252	hlist_del(&proc->proc_node);
5253	mutex_unlock(&binder_procs_lock);
5254
5255	mutex_lock(&context->context_mgr_node_lock);
5256	if (context->binder_context_mgr_node &&
5257	    context->binder_context_mgr_node->proc == proc) {
5258		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5259			     "%s: %d context_mgr_node gone\n",
5260			     __func__, proc->pid);
5261		context->binder_context_mgr_node = NULL;
5262	}
5263	mutex_unlock(&context->context_mgr_node_lock);
5264	binder_inner_proc_lock(proc);
5265	/*
5266	 * Make sure proc stays alive after we
5267	 * remove all the threads
5268	 */
5269	proc->tmp_ref++;
5270
5271	proc->is_dead = true;
5272	proc->is_frozen = false;
5273	proc->sync_recv = false;
5274	proc->async_recv = false;
5275	threads = 0;
5276	active_transactions = 0;
5277	while ((n = rb_first(&proc->threads))) {
5278		struct binder_thread *thread;
5279
5280		thread = rb_entry(n, struct binder_thread, rb_node);
5281		binder_inner_proc_unlock(proc);
5282		threads++;
5283		active_transactions += binder_thread_release(proc, thread);
5284		binder_inner_proc_lock(proc);
5285	}
5286
5287	nodes = 0;
5288	incoming_refs = 0;
5289	while ((n = rb_first(&proc->nodes))) {
5290		struct binder_node *node;
5291
5292		node = rb_entry(n, struct binder_node, rb_node);
5293		nodes++;
5294		/*
5295		 * take a temporary ref on the node before
5296		 * calling binder_node_release() which will either
5297		 * kfree() the node or call binder_put_node()
5298		 */
5299		binder_inc_node_tmpref_ilocked(node);
5300		rb_erase(&node->rb_node, &proc->nodes);
5301		binder_inner_proc_unlock(proc);
5302		incoming_refs = binder_node_release(node, incoming_refs);
5303		binder_inner_proc_lock(proc);
5304	}
5305	binder_inner_proc_unlock(proc);
5306
5307	outgoing_refs = 0;
5308	binder_proc_lock(proc);
5309	while ((n = rb_first(&proc->refs_by_desc))) {
5310		struct binder_ref *ref;
5311
5312		ref = rb_entry(n, struct binder_ref, rb_node_desc);
5313		outgoing_refs++;
5314		binder_cleanup_ref_olocked(ref);
5315		binder_proc_unlock(proc);
5316		binder_free_ref(ref);
5317		binder_proc_lock(proc);
5318	}
5319	binder_proc_unlock(proc);
5320
5321	binder_release_work(proc, &proc->todo);
5322	binder_release_work(proc, &proc->delivered_death);
5323
5324	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5325		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5326		     __func__, proc->pid, threads, nodes, incoming_refs,
5327		     outgoing_refs, active_transactions);
5328
5329	binder_proc_dec_tmpref(proc);
5330}
5331
5332static void binder_deferred_func(struct work_struct *work)
5333{
5334	struct binder_proc *proc;
5335
5336	int defer;
5337
5338	do {
5339		mutex_lock(&binder_deferred_lock);
5340		if (!hlist_empty(&binder_deferred_list)) {
5341			proc = hlist_entry(binder_deferred_list.first,
5342					struct binder_proc, deferred_work_node);
5343			hlist_del_init(&proc->deferred_work_node);
5344			defer = proc->deferred_work;
5345			proc->deferred_work = 0;
5346		} else {
5347			proc = NULL;
5348			defer = 0;
5349		}
5350		mutex_unlock(&binder_deferred_lock);
5351
5352		if (defer & BINDER_DEFERRED_FLUSH)
5353			binder_deferred_flush(proc);
5354
5355		if (defer & BINDER_DEFERRED_RELEASE)
5356			binder_deferred_release(proc); /* frees proc */
5357	} while (proc);
5358}
5359static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5360
5361static void
5362binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5363{
5364	mutex_lock(&binder_deferred_lock);
5365	proc->deferred_work |= defer;
5366	if (hlist_unhashed(&proc->deferred_work_node)) {
5367		hlist_add_head(&proc->deferred_work_node,
5368				&binder_deferred_list);
5369		schedule_work(&binder_deferred_work);
5370	}
5371	mutex_unlock(&binder_deferred_lock);
5372}
5373
5374static void print_binder_transaction_ilocked(struct seq_file *m,
5375					     struct binder_proc *proc,
5376					     const char *prefix,
5377					     struct binder_transaction *t)
5378{
5379	struct binder_proc *to_proc;
5380	struct binder_buffer *buffer = t->buffer;
5381
5382	spin_lock(&t->lock);
5383	to_proc = t->to_proc;
5384	seq_printf(m,
5385		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5386		   prefix, t->debug_id, t,
5387		   t->from ? t->from->proc->pid : 0,
5388		   t->from ? t->from->pid : 0,
5389		   to_proc ? to_proc->pid : 0,
5390		   t->to_thread ? t->to_thread->pid : 0,
5391		   t->code, t->flags, t->priority, t->need_reply);
5392	spin_unlock(&t->lock);
5393
5394	if (proc != to_proc) {
5395		/*
5396		 * Can only safely deref buffer if we are holding the
5397		 * correct proc inner lock for this node
5398		 */
5399		seq_puts(m, "\n");
5400		return;
5401	}
5402
5403	if (buffer == NULL) {
5404		seq_puts(m, " buffer free\n");
5405		return;
5406	}
5407	if (buffer->target_node)
5408		seq_printf(m, " node %d", buffer->target_node->debug_id);
5409	seq_printf(m, " size %zd:%zd data %pK\n",
5410		   buffer->data_size, buffer->offsets_size,
5411		   buffer->user_data);
5412}
5413
5414static void print_binder_work_ilocked(struct seq_file *m,
5415				     struct binder_proc *proc,
5416				     const char *prefix,
5417				     const char *transaction_prefix,
5418				     struct binder_work *w)
5419{
5420	struct binder_node *node;
5421	struct binder_transaction *t;
5422
5423	switch (w->type) {
5424	case BINDER_WORK_TRANSACTION:
5425		t = container_of(w, struct binder_transaction, work);
5426		print_binder_transaction_ilocked(
5427				m, proc, transaction_prefix, t);
5428		break;
5429	case BINDER_WORK_RETURN_ERROR: {
5430		struct binder_error *e = container_of(
5431				w, struct binder_error, work);
5432
5433		seq_printf(m, "%stransaction error: %u\n",
5434			   prefix, e->cmd);
5435	} break;
5436	case BINDER_WORK_TRANSACTION_COMPLETE:
5437		seq_printf(m, "%stransaction complete\n", prefix);
5438		break;
5439	case BINDER_WORK_NODE:
5440		node = container_of(w, struct binder_node, work);
5441		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5442			   prefix, node->debug_id,
5443			   (u64)node->ptr, (u64)node->cookie);
5444		break;
5445	case BINDER_WORK_DEAD_BINDER:
5446		seq_printf(m, "%shas dead binder\n", prefix);
5447		break;
5448	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5449		seq_printf(m, "%shas cleared dead binder\n", prefix);
5450		break;
5451	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5452		seq_printf(m, "%shas cleared death notification\n", prefix);
5453		break;
5454	default:
5455		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5456		break;
5457	}
5458}
5459
5460static void print_binder_thread_ilocked(struct seq_file *m,
5461					struct binder_thread *thread,
5462					int print_always)
5463{
5464	struct binder_transaction *t;
5465	struct binder_work *w;
5466	size_t start_pos = m->count;
5467	size_t header_pos;
5468
5469	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5470			thread->pid, thread->looper,
5471			thread->looper_need_return,
5472			atomic_read(&thread->tmp_ref));
5473	header_pos = m->count;
5474	t = thread->transaction_stack;
5475	while (t) {
5476		if (t->from == thread) {
5477			print_binder_transaction_ilocked(m, thread->proc,
5478					"    outgoing transaction", t);
5479			t = t->from_parent;
5480		} else if (t->to_thread == thread) {
5481			print_binder_transaction_ilocked(m, thread->proc,
5482						 "    incoming transaction", t);
5483			t = t->to_parent;
5484		} else {
5485			print_binder_transaction_ilocked(m, thread->proc,
5486					"    bad transaction", t);
5487			t = NULL;
5488		}
5489	}
5490	list_for_each_entry(w, &thread->todo, entry) {
5491		print_binder_work_ilocked(m, thread->proc, "    ",
5492					  "    pending transaction", w);
5493	}
5494	if (!print_always && m->count == header_pos)
5495		m->count = start_pos;
5496}
5497
5498static void print_binder_node_nilocked(struct seq_file *m,
5499				       struct binder_node *node)
5500{
5501	struct binder_ref *ref;
5502	struct binder_work *w;
5503	int count;
5504
5505	count = 0;
5506	hlist_for_each_entry(ref, &node->refs, node_entry)
5507		count++;
5508
5509	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5510		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
5511		   node->has_strong_ref, node->has_weak_ref,
5512		   node->local_strong_refs, node->local_weak_refs,
5513		   node->internal_strong_refs, count, node->tmp_refs);
5514	if (count) {
5515		seq_puts(m, " proc");
5516		hlist_for_each_entry(ref, &node->refs, node_entry)
5517			seq_printf(m, " %d", ref->proc->pid);
5518	}
5519	seq_puts(m, "\n");
5520	if (node->proc) {
5521		list_for_each_entry(w, &node->async_todo, entry)
5522			print_binder_work_ilocked(m, node->proc, "    ",
5523					  "    pending async transaction", w);
5524	}
5525}
5526
5527static void print_binder_ref_olocked(struct seq_file *m,
5528				     struct binder_ref *ref)
5529{
5530	binder_node_lock(ref->node);
5531	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5532		   ref->data.debug_id, ref->data.desc,
5533		   ref->node->proc ? "" : "dead ",
5534		   ref->node->debug_id, ref->data.strong,
5535		   ref->data.weak, ref->death);
5536	binder_node_unlock(ref->node);
5537}
5538
5539static void print_binder_proc(struct seq_file *m,
5540			      struct binder_proc *proc, int print_all)
5541{
5542	struct binder_work *w;
5543	struct rb_node *n;
5544	size_t start_pos = m->count;
5545	size_t header_pos;
5546	struct binder_node *last_node = NULL;
5547
5548	seq_printf(m, "proc %d\n", proc->pid);
5549	seq_printf(m, "context %s\n", proc->context->name);
5550	header_pos = m->count;
5551
5552	binder_inner_proc_lock(proc);
5553	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5554		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5555						rb_node), print_all);
5556
5557	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5558		struct binder_node *node = rb_entry(n, struct binder_node,
5559						    rb_node);
5560		if (!print_all && !node->has_async_transaction)
5561			continue;
5562
5563		/*
5564		 * take a temporary reference on the node so it
5565		 * survives and isn't removed from the tree
5566		 * while we print it.
5567		 */
5568		binder_inc_node_tmpref_ilocked(node);
5569		/* Need to drop inner lock to take node lock */
5570		binder_inner_proc_unlock(proc);
5571		if (last_node)
5572			binder_put_node(last_node);
5573		binder_node_inner_lock(node);
5574		print_binder_node_nilocked(m, node);
5575		binder_node_inner_unlock(node);
5576		last_node = node;
5577		binder_inner_proc_lock(proc);
5578	}
5579	binder_inner_proc_unlock(proc);
5580	if (last_node)
5581		binder_put_node(last_node);
5582
5583	if (print_all) {
5584		binder_proc_lock(proc);
5585		for (n = rb_first(&proc->refs_by_desc);
5586		     n != NULL;
5587		     n = rb_next(n))
5588			print_binder_ref_olocked(m, rb_entry(n,
5589							    struct binder_ref,
5590							    rb_node_desc));
5591		binder_proc_unlock(proc);
5592	}
5593	binder_alloc_print_allocated(m, &proc->alloc);
5594	binder_inner_proc_lock(proc);
5595	list_for_each_entry(w, &proc->todo, entry)
5596		print_binder_work_ilocked(m, proc, "  ",
5597					  "  pending transaction", w);
5598	list_for_each_entry(w, &proc->delivered_death, entry) {
5599		seq_puts(m, "  has delivered dead binder\n");
5600		break;
5601	}
5602	binder_inner_proc_unlock(proc);
5603	if (!print_all && m->count == header_pos)
5604		m->count = start_pos;
5605}
5606
5607static const char * const binder_return_strings[] = {
5608	"BR_ERROR",
5609	"BR_OK",
5610	"BR_TRANSACTION",
5611	"BR_REPLY",
5612	"BR_ACQUIRE_RESULT",
5613	"BR_DEAD_REPLY",
5614	"BR_TRANSACTION_COMPLETE",
5615	"BR_INCREFS",
5616	"BR_ACQUIRE",
5617	"BR_RELEASE",
5618	"BR_DECREFS",
5619	"BR_ATTEMPT_ACQUIRE",
5620	"BR_NOOP",
5621	"BR_SPAWN_LOOPER",
5622	"BR_FINISHED",
5623	"BR_DEAD_BINDER",
5624	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
5625	"BR_FAILED_REPLY",
5626	"BR_FROZEN_REPLY",
5627	"BR_ONEWAY_SPAM_SUSPECT",
5628};
5629
5630static const char * const binder_command_strings[] = {
5631	"BC_TRANSACTION",
5632	"BC_REPLY",
5633	"BC_ACQUIRE_RESULT",
5634	"BC_FREE_BUFFER",
5635	"BC_INCREFS",
5636	"BC_ACQUIRE",
5637	"BC_RELEASE",
5638	"BC_DECREFS",
5639	"BC_INCREFS_DONE",
5640	"BC_ACQUIRE_DONE",
5641	"BC_ATTEMPT_ACQUIRE",
5642	"BC_REGISTER_LOOPER",
5643	"BC_ENTER_LOOPER",
5644	"BC_EXIT_LOOPER",
5645	"BC_REQUEST_DEATH_NOTIFICATION",
5646	"BC_CLEAR_DEATH_NOTIFICATION",
5647	"BC_DEAD_BINDER_DONE",
5648	"BC_TRANSACTION_SG",
5649	"BC_REPLY_SG",
5650};
5651
5652static const char * const binder_objstat_strings[] = {
5653	"proc",
5654	"thread",
5655	"node",
5656	"ref",
5657	"death",
5658	"transaction",
5659	"transaction_complete"
5660};
5661
5662static void print_binder_stats(struct seq_file *m, const char *prefix,
5663			       struct binder_stats *stats)
5664{
5665	int i;
5666
5667	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5668		     ARRAY_SIZE(binder_command_strings));
5669	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5670		int temp = atomic_read(&stats->bc[i]);
5671
5672		if (temp)
5673			seq_printf(m, "%s%s: %d\n", prefix,
5674				   binder_command_strings[i], temp);
5675	}
5676
5677	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5678		     ARRAY_SIZE(binder_return_strings));
5679	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5680		int temp = atomic_read(&stats->br[i]);
5681
5682		if (temp)
5683			seq_printf(m, "%s%s: %d\n", prefix,
5684				   binder_return_strings[i], temp);
5685	}
5686
5687	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5688		     ARRAY_SIZE(binder_objstat_strings));
5689	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5690		     ARRAY_SIZE(stats->obj_deleted));
5691	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5692		int created = atomic_read(&stats->obj_created[i]);
5693		int deleted = atomic_read(&stats->obj_deleted[i]);
5694
5695		if (created || deleted)
5696			seq_printf(m, "%s%s: active %d total %d\n",
5697				prefix,
5698				binder_objstat_strings[i],
5699				created - deleted,
5700				created);
5701	}
5702}
5703
5704static void print_binder_proc_stats(struct seq_file *m,
5705				    struct binder_proc *proc)
5706{
5707	struct binder_work *w;
5708	struct binder_thread *thread;
5709	struct rb_node *n;
5710	int count, strong, weak, ready_threads;
5711	size_t free_async_space =
5712		binder_alloc_get_free_async_space(&proc->alloc);
5713
5714	seq_printf(m, "proc %d\n", proc->pid);
5715	seq_printf(m, "context %s\n", proc->context->name);
5716	count = 0;
5717	ready_threads = 0;
5718	binder_inner_proc_lock(proc);
5719	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5720		count++;
5721
5722	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5723		ready_threads++;
5724
5725	seq_printf(m, "  threads: %d\n", count);
5726	seq_printf(m, "  requested threads: %d+%d/%d\n"
5727			"  ready threads %d\n"
5728			"  free async space %zd\n", proc->requested_threads,
5729			proc->requested_threads_started, proc->max_threads,
5730			ready_threads,
5731			free_async_space);
5732	count = 0;
5733	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5734		count++;
5735	binder_inner_proc_unlock(proc);
5736	seq_printf(m, "  nodes: %d\n", count);
5737	count = 0;
5738	strong = 0;
5739	weak = 0;
5740	binder_proc_lock(proc);
5741	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5742		struct binder_ref *ref = rb_entry(n, struct binder_ref,
5743						  rb_node_desc);
5744		count++;
5745		strong += ref->data.strong;
5746		weak += ref->data.weak;
5747	}
5748	binder_proc_unlock(proc);
5749	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5750
5751	count = binder_alloc_get_allocated_count(&proc->alloc);
5752	seq_printf(m, "  buffers: %d\n", count);
5753
5754	binder_alloc_print_pages(m, &proc->alloc);
5755
5756	count = 0;
5757	binder_inner_proc_lock(proc);
5758	list_for_each_entry(w, &proc->todo, entry) {
5759		if (w->type == BINDER_WORK_TRANSACTION)
5760			count++;
5761	}
5762	binder_inner_proc_unlock(proc);
5763	seq_printf(m, "  pending transactions: %d\n", count);
5764
5765	print_binder_stats(m, "  ", &proc->stats);
5766}
5767
5768
5769int binder_state_show(struct seq_file *m, void *unused)
5770{
5771	struct binder_proc *proc;
5772	struct binder_node *node;
5773	struct binder_node *last_node = NULL;
5774
5775	seq_puts(m, "binder state:\n");
5776
5777	spin_lock(&binder_dead_nodes_lock);
5778	if (!hlist_empty(&binder_dead_nodes))
5779		seq_puts(m, "dead nodes:\n");
5780	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5781		/*
5782		 * take a temporary reference on the node so it
5783		 * survives and isn't removed from the list
5784		 * while we print it.
5785		 */
5786		node->tmp_refs++;
5787		spin_unlock(&binder_dead_nodes_lock);
5788		if (last_node)
5789			binder_put_node(last_node);
5790		binder_node_lock(node);
5791		print_binder_node_nilocked(m, node);
5792		binder_node_unlock(node);
5793		last_node = node;
5794		spin_lock(&binder_dead_nodes_lock);
5795	}
5796	spin_unlock(&binder_dead_nodes_lock);
5797	if (last_node)
5798		binder_put_node(last_node);
5799
5800	mutex_lock(&binder_procs_lock);
5801	hlist_for_each_entry(proc, &binder_procs, proc_node)
5802		print_binder_proc(m, proc, 1);
5803	mutex_unlock(&binder_procs_lock);
5804
5805	return 0;
5806}
5807
5808int binder_stats_show(struct seq_file *m, void *unused)
5809{
5810	struct binder_proc *proc;
5811
5812	seq_puts(m, "binder stats:\n");
5813
5814	print_binder_stats(m, "", &binder_stats);
5815
5816	mutex_lock(&binder_procs_lock);
5817	hlist_for_each_entry(proc, &binder_procs, proc_node)
5818		print_binder_proc_stats(m, proc);
5819	mutex_unlock(&binder_procs_lock);
5820
5821	return 0;
5822}
5823
5824int binder_transactions_show(struct seq_file *m, void *unused)
5825{
5826	struct binder_proc *proc;
5827
5828	seq_puts(m, "binder transactions:\n");
5829	mutex_lock(&binder_procs_lock);
5830	hlist_for_each_entry(proc, &binder_procs, proc_node)
5831		print_binder_proc(m, proc, 0);
5832	mutex_unlock(&binder_procs_lock);
5833
5834	return 0;
5835}
5836
5837static int proc_show(struct seq_file *m, void *unused)
5838{
5839	struct binder_proc *itr;
5840	int pid = (unsigned long)m->private;
5841
5842	mutex_lock(&binder_procs_lock);
5843	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5844		if (itr->pid == pid) {
5845			seq_puts(m, "binder proc state:\n");
5846			print_binder_proc(m, itr, 1);
5847		}
5848	}
5849	mutex_unlock(&binder_procs_lock);
5850
5851	return 0;
5852}
5853
5854static void print_binder_transaction_log_entry(struct seq_file *m,
5855					struct binder_transaction_log_entry *e)
5856{
5857	int debug_id = READ_ONCE(e->debug_id_done);
5858	/*
5859	 * read barrier to guarantee debug_id_done read before
5860	 * we print the log values
5861	 */
5862	smp_rmb();
5863	seq_printf(m,
5864		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5865		   e->debug_id, (e->call_type == 2) ? "reply" :
5866		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5867		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
5868		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
5869		   e->return_error, e->return_error_param,
5870		   e->return_error_line);
5871	/*
5872	 * read-barrier to guarantee read of debug_id_done after
5873	 * done printing the fields of the entry
5874	 */
5875	smp_rmb();
5876	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5877			"\n" : " (incomplete)\n");
5878}
5879
5880int binder_transaction_log_show(struct seq_file *m, void *unused)
5881{
5882	struct binder_transaction_log *log = m->private;
5883	unsigned int log_cur = atomic_read(&log->cur);
5884	unsigned int count;
5885	unsigned int cur;
5886	int i;
5887
5888	count = log_cur + 1;
5889	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5890		0 : count % ARRAY_SIZE(log->entry);
5891	if (count > ARRAY_SIZE(log->entry) || log->full)
5892		count = ARRAY_SIZE(log->entry);
5893	for (i = 0; i < count; i++) {
5894		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5895
5896		print_binder_transaction_log_entry(m, &log->entry[index]);
5897	}
5898	return 0;
5899}
5900
5901const struct file_operations binder_fops = {
5902	.owner = THIS_MODULE,
5903	.poll = binder_poll,
5904	.unlocked_ioctl = binder_ioctl,
5905	.compat_ioctl = compat_ptr_ioctl,
5906	.mmap = binder_mmap,
5907	.open = binder_open,
5908	.flush = binder_flush,
5909	.release = binder_release,
5910};
5911
5912static int __init init_binder_device(const char *name)
5913{
5914	int ret;
5915	struct binder_device *binder_device;
5916
5917	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5918	if (!binder_device)
5919		return -ENOMEM;
5920
5921	binder_device->miscdev.fops = &binder_fops;
5922	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5923	binder_device->miscdev.name = name;
5924
5925	refcount_set(&binder_device->ref, 1);
5926	binder_device->context.binder_context_mgr_uid = INVALID_UID;
5927	binder_device->context.name = name;
5928	mutex_init(&binder_device->context.context_mgr_node_lock);
5929
5930	ret = misc_register(&binder_device->miscdev);
5931	if (ret < 0) {
5932		kfree(binder_device);
5933		return ret;
5934	}
5935
5936	hlist_add_head(&binder_device->hlist, &binder_devices);
5937
5938	return ret;
5939}
5940
5941static int __init binder_init(void)
5942{
5943	int ret;
5944	char *device_name, *device_tmp;
5945	struct binder_device *device;
5946	struct hlist_node *tmp;
5947	char *device_names = NULL;
5948
5949	ret = binder_alloc_shrinker_init();
5950	if (ret)
5951		return ret;
5952
5953	atomic_set(&binder_transaction_log.cur, ~0U);
5954	atomic_set(&binder_transaction_log_failed.cur, ~0U);
5955
5956	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5957	if (binder_debugfs_dir_entry_root)
5958		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5959						 binder_debugfs_dir_entry_root);
5960
5961	if (binder_debugfs_dir_entry_root) {
5962		debugfs_create_file("state",
5963				    0444,
5964				    binder_debugfs_dir_entry_root,
5965				    NULL,
5966				    &binder_state_fops);
5967		debugfs_create_file("stats",
5968				    0444,
5969				    binder_debugfs_dir_entry_root,
5970				    NULL,
5971				    &binder_stats_fops);
5972		debugfs_create_file("transactions",
5973				    0444,
5974				    binder_debugfs_dir_entry_root,
5975				    NULL,
5976				    &binder_transactions_fops);
5977		debugfs_create_file("transaction_log",
5978				    0444,
5979				    binder_debugfs_dir_entry_root,
5980				    &binder_transaction_log,
5981				    &binder_transaction_log_fops);
5982		debugfs_create_file("failed_transaction_log",
5983				    0444,
5984				    binder_debugfs_dir_entry_root,
5985				    &binder_transaction_log_failed,
5986				    &binder_transaction_log_fops);
5987	}
5988
5989	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
5990	    strcmp(binder_devices_param, "") != 0) {
5991		/*
5992		* Copy the module_parameter string, because we don't want to
5993		* tokenize it in-place.
5994		 */
5995		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5996		if (!device_names) {
5997			ret = -ENOMEM;
5998			goto err_alloc_device_names_failed;
5999		}
6000
6001		device_tmp = device_names;
6002		while ((device_name = strsep(&device_tmp, ","))) {
6003			ret = init_binder_device(device_name);
6004			if (ret)
6005				goto err_init_binder_device_failed;
6006		}
6007	}
6008
6009	ret = init_binderfs();
6010	if (ret)
6011		goto err_init_binder_device_failed;
6012
6013	return ret;
6014
6015err_init_binder_device_failed:
6016	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6017		misc_deregister(&device->miscdev);
6018		hlist_del(&device->hlist);
6019		kfree(device);
6020	}
6021
6022	kfree(device_names);
6023
6024err_alloc_device_names_failed:
6025	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6026
6027	return ret;
6028}
6029
6030device_initcall(binder_init);
6031
6032#define CREATE_TRACE_POINTS
6033#include "binder_trace.h"
6034
6035MODULE_LICENSE("GPL v2");