Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _LINUX_BINDER_INTERNAL_H
4#define _LINUX_BINDER_INTERNAL_H
5
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/list.h>
9#include <linux/miscdevice.h>
10#include <linux/mutex.h>
11#include <linux/refcount.h>
12#include <linux/stddef.h>
13#include <linux/types.h>
14#include <linux/uidgid.h>
15#include <uapi/linux/android/binderfs.h>
16#include "binder_alloc.h"
17#include "dbitmap.h"
18
19struct binder_context {
20 struct binder_node *binder_context_mgr_node;
21 struct mutex context_mgr_node_lock;
22 kuid_t binder_context_mgr_uid;
23 const char *name;
24};
25
26/**
27 * struct binder_device - information about a binder device node
28 * @hlist: list of binder devices (only used for devices requested via
29 * CONFIG_ANDROID_BINDER_DEVICES)
30 * @miscdev: information about a binder character device node
31 * @context: binder context information
32 * @binderfs_inode: This is the inode of the root dentry of the super block
33 * belonging to a binderfs mount.
34 */
35struct binder_device {
36 struct hlist_node hlist;
37 struct miscdevice miscdev;
38 struct binder_context context;
39 struct inode *binderfs_inode;
40 refcount_t ref;
41};
42
43/**
44 * binderfs_mount_opts - mount options for binderfs
45 * @max: maximum number of allocatable binderfs binder devices
46 * @stats_mode: enable binder stats in binderfs.
47 */
48struct binderfs_mount_opts {
49 int max;
50 int stats_mode;
51};
52
53/**
54 * binderfs_info - information about a binderfs mount
55 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
56 * @control_dentry: This records the dentry of this binderfs mount
57 * binder-control device.
58 * @root_uid: uid that needs to be used when a new binder device is
59 * created.
60 * @root_gid: gid that needs to be used when a new binder device is
61 * created.
62 * @mount_opts: The mount options in use.
63 * @device_count: The current number of allocated binder devices.
64 * @proc_log_dir: Pointer to the directory dentry containing process-specific
65 * logs.
66 */
67struct binderfs_info {
68 struct ipc_namespace *ipc_ns;
69 struct dentry *control_dentry;
70 kuid_t root_uid;
71 kgid_t root_gid;
72 struct binderfs_mount_opts mount_opts;
73 int device_count;
74 struct dentry *proc_log_dir;
75};
76
77extern const struct file_operations binder_fops;
78
79extern char *binder_devices_param;
80
81#ifdef CONFIG_ANDROID_BINDERFS
82extern bool is_binderfs_device(const struct inode *inode);
83extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
84 const struct file_operations *fops,
85 void *data);
86extern void binderfs_remove_file(struct dentry *dentry);
87#else
88static inline bool is_binderfs_device(const struct inode *inode)
89{
90 return false;
91}
92static inline struct dentry *binderfs_create_file(struct dentry *dir,
93 const char *name,
94 const struct file_operations *fops,
95 void *data)
96{
97 return NULL;
98}
99static inline void binderfs_remove_file(struct dentry *dentry) {}
100#endif
101
102#ifdef CONFIG_ANDROID_BINDERFS
103extern int __init init_binderfs(void);
104#else
105static inline int __init init_binderfs(void)
106{
107 return 0;
108}
109#endif
110
111struct binder_debugfs_entry {
112 const char *name;
113 umode_t mode;
114 const struct file_operations *fops;
115 void *data;
116};
117
118extern const struct binder_debugfs_entry binder_debugfs_entries[];
119
120#define binder_for_each_debugfs_entry(entry) \
121 for ((entry) = binder_debugfs_entries; \
122 (entry)->name; \
123 (entry)++)
124
125enum binder_stat_types {
126 BINDER_STAT_PROC,
127 BINDER_STAT_THREAD,
128 BINDER_STAT_NODE,
129 BINDER_STAT_REF,
130 BINDER_STAT_DEATH,
131 BINDER_STAT_TRANSACTION,
132 BINDER_STAT_TRANSACTION_COMPLETE,
133 BINDER_STAT_FREEZE,
134 BINDER_STAT_COUNT
135};
136
137struct binder_stats {
138 atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
139 atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
140 atomic_t obj_created[BINDER_STAT_COUNT];
141 atomic_t obj_deleted[BINDER_STAT_COUNT];
142};
143
144/**
145 * struct binder_work - work enqueued on a worklist
146 * @entry: node enqueued on list
147 * @type: type of work to be performed
148 *
149 * There are separate work lists for proc, thread, and node (async).
150 */
151struct binder_work {
152 struct list_head entry;
153
154 enum binder_work_type {
155 BINDER_WORK_TRANSACTION = 1,
156 BINDER_WORK_TRANSACTION_COMPLETE,
157 BINDER_WORK_TRANSACTION_PENDING,
158 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
159 BINDER_WORK_RETURN_ERROR,
160 BINDER_WORK_NODE,
161 BINDER_WORK_DEAD_BINDER,
162 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
163 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
164 BINDER_WORK_FROZEN_BINDER,
165 BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
166 } type;
167};
168
169struct binder_error {
170 struct binder_work work;
171 uint32_t cmd;
172};
173
174/**
175 * struct binder_node - binder node bookkeeping
176 * @debug_id: unique ID for debugging
177 * (invariant after initialized)
178 * @lock: lock for node fields
179 * @work: worklist element for node work
180 * (protected by @proc->inner_lock)
181 * @rb_node: element for proc->nodes tree
182 * (protected by @proc->inner_lock)
183 * @dead_node: element for binder_dead_nodes list
184 * (protected by binder_dead_nodes_lock)
185 * @proc: binder_proc that owns this node
186 * (invariant after initialized)
187 * @refs: list of references on this node
188 * (protected by @lock)
189 * @internal_strong_refs: used to take strong references when
190 * initiating a transaction
191 * (protected by @proc->inner_lock if @proc
192 * and by @lock)
193 * @local_weak_refs: weak user refs from local process
194 * (protected by @proc->inner_lock if @proc
195 * and by @lock)
196 * @local_strong_refs: strong user refs from local process
197 * (protected by @proc->inner_lock if @proc
198 * and by @lock)
199 * @tmp_refs: temporary kernel refs
200 * (protected by @proc->inner_lock while @proc
201 * is valid, and by binder_dead_nodes_lock
202 * if @proc is NULL. During inc/dec and node release
203 * it is also protected by @lock to provide safety
204 * as the node dies and @proc becomes NULL)
205 * @ptr: userspace pointer for node
206 * (invariant, no lock needed)
207 * @cookie: userspace cookie for node
208 * (invariant, no lock needed)
209 * @has_strong_ref: userspace notified of strong ref
210 * (protected by @proc->inner_lock if @proc
211 * and by @lock)
212 * @pending_strong_ref: userspace has acked notification of strong ref
213 * (protected by @proc->inner_lock if @proc
214 * and by @lock)
215 * @has_weak_ref: userspace notified of weak ref
216 * (protected by @proc->inner_lock if @proc
217 * and by @lock)
218 * @pending_weak_ref: userspace has acked notification of weak ref
219 * (protected by @proc->inner_lock if @proc
220 * and by @lock)
221 * @has_async_transaction: async transaction to node in progress
222 * (protected by @lock)
223 * @accept_fds: file descriptor operations supported for node
224 * (invariant after initialized)
225 * @min_priority: minimum scheduling priority
226 * (invariant after initialized)
227 * @txn_security_ctx: require sender's security context
228 * (invariant after initialized)
229 * @async_todo: list of async work items
230 * (protected by @proc->inner_lock)
231 *
232 * Bookkeeping structure for binder nodes.
233 */
234struct binder_node {
235 int debug_id;
236 spinlock_t lock;
237 struct binder_work work;
238 union {
239 struct rb_node rb_node;
240 struct hlist_node dead_node;
241 };
242 struct binder_proc *proc;
243 struct hlist_head refs;
244 int internal_strong_refs;
245 int local_weak_refs;
246 int local_strong_refs;
247 int tmp_refs;
248 binder_uintptr_t ptr;
249 binder_uintptr_t cookie;
250 struct {
251 /*
252 * bitfield elements protected by
253 * proc inner_lock
254 */
255 u8 has_strong_ref:1;
256 u8 pending_strong_ref:1;
257 u8 has_weak_ref:1;
258 u8 pending_weak_ref:1;
259 };
260 struct {
261 /*
262 * invariant after initialization
263 */
264 u8 accept_fds:1;
265 u8 txn_security_ctx:1;
266 u8 min_priority;
267 };
268 bool has_async_transaction;
269 struct list_head async_todo;
270};
271
272struct binder_ref_death {
273 /**
274 * @work: worklist element for death notifications
275 * (protected by inner_lock of the proc that
276 * this ref belongs to)
277 */
278 struct binder_work work;
279 binder_uintptr_t cookie;
280};
281
282struct binder_ref_freeze {
283 struct binder_work work;
284 binder_uintptr_t cookie;
285 bool is_frozen:1;
286 bool sent:1;
287 bool resend:1;
288};
289
290/**
291 * struct binder_ref_data - binder_ref counts and id
292 * @debug_id: unique ID for the ref
293 * @desc: unique userspace handle for ref
294 * @strong: strong ref count (debugging only if not locked)
295 * @weak: weak ref count (debugging only if not locked)
296 *
297 * Structure to hold ref count and ref id information. Since
298 * the actual ref can only be accessed with a lock, this structure
299 * is used to return information about the ref to callers of
300 * ref inc/dec functions.
301 */
302struct binder_ref_data {
303 int debug_id;
304 uint32_t desc;
305 int strong;
306 int weak;
307};
308
309/**
310 * struct binder_ref - struct to track references on nodes
311 * @data: binder_ref_data containing id, handle, and current refcounts
312 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
313 * @rb_node_node: node for lookup by @node in proc's rb_tree
314 * @node_entry: list entry for node->refs list in target node
315 * (protected by @node->lock)
316 * @proc: binder_proc containing ref
317 * @node: binder_node of target node. When cleaning up a
318 * ref for deletion in binder_cleanup_ref, a non-NULL
319 * @node indicates the node must be freed
320 * @death: pointer to death notification (ref_death) if requested
321 * (protected by @node->lock)
322 * @freeze: pointer to freeze notification (ref_freeze) if requested
323 * (protected by @node->lock)
324 *
325 * Structure to track references from procA to target node (on procB). This
326 * structure is unsafe to access without holding @proc->outer_lock.
327 */
328struct binder_ref {
329 /* Lookups needed: */
330 /* node + proc => ref (transaction) */
331 /* desc + proc => ref (transaction, inc/dec ref) */
332 /* node => refs + procs (proc exit) */
333 struct binder_ref_data data;
334 struct rb_node rb_node_desc;
335 struct rb_node rb_node_node;
336 struct hlist_node node_entry;
337 struct binder_proc *proc;
338 struct binder_node *node;
339 struct binder_ref_death *death;
340 struct binder_ref_freeze *freeze;
341};
342
343/**
344 * struct binder_proc - binder process bookkeeping
345 * @proc_node: element for binder_procs list
346 * @threads: rbtree of binder_threads in this proc
347 * (protected by @inner_lock)
348 * @nodes: rbtree of binder nodes associated with
349 * this proc ordered by node->ptr
350 * (protected by @inner_lock)
351 * @refs_by_desc: rbtree of refs ordered by ref->desc
352 * (protected by @outer_lock)
353 * @refs_by_node: rbtree of refs ordered by ref->node
354 * (protected by @outer_lock)
355 * @waiting_threads: threads currently waiting for proc work
356 * (protected by @inner_lock)
357 * @pid PID of group_leader of process
358 * (invariant after initialized)
359 * @tsk task_struct for group_leader of process
360 * (invariant after initialized)
361 * @cred struct cred associated with the `struct file`
362 * in binder_open()
363 * (invariant after initialized)
364 * @deferred_work_node: element for binder_deferred_list
365 * (protected by binder_deferred_lock)
366 * @deferred_work: bitmap of deferred work to perform
367 * (protected by binder_deferred_lock)
368 * @outstanding_txns: number of transactions to be transmitted before
369 * processes in freeze_wait are woken up
370 * (protected by @inner_lock)
371 * @is_dead: process is dead and awaiting free
372 * when outstanding transactions are cleaned up
373 * (protected by @inner_lock)
374 * @is_frozen: process is frozen and unable to service
375 * binder transactions
376 * (protected by @inner_lock)
377 * @sync_recv: process received sync transactions since last frozen
378 * bit 0: received sync transaction after being frozen
379 * bit 1: new pending sync transaction during freezing
380 * (protected by @inner_lock)
381 * @async_recv: process received async transactions since last frozen
382 * (protected by @inner_lock)
383 * @freeze_wait: waitqueue of processes waiting for all outstanding
384 * transactions to be processed
385 * (protected by @inner_lock)
386 * @dmap dbitmap to manage available reference descriptors
387 * (protected by @outer_lock)
388 * @todo: list of work for this process
389 * (protected by @inner_lock)
390 * @stats: per-process binder statistics
391 * (atomics, no lock needed)
392 * @delivered_death: list of delivered death notification
393 * (protected by @inner_lock)
394 * @delivered_freeze: list of delivered freeze notification
395 * (protected by @inner_lock)
396 * @max_threads: cap on number of binder threads
397 * (protected by @inner_lock)
398 * @requested_threads: number of binder threads requested but not
399 * yet started. In current implementation, can
400 * only be 0 or 1.
401 * (protected by @inner_lock)
402 * @requested_threads_started: number binder threads started
403 * (protected by @inner_lock)
404 * @tmp_ref: temporary reference to indicate proc is in use
405 * (protected by @inner_lock)
406 * @default_priority: default scheduler priority
407 * (invariant after initialized)
408 * @debugfs_entry: debugfs node
409 * @alloc: binder allocator bookkeeping
410 * @context: binder_context for this proc
411 * (invariant after initialized)
412 * @inner_lock: can nest under outer_lock and/or node lock
413 * @outer_lock: no nesting under innor or node lock
414 * Lock order: 1) outer, 2) node, 3) inner
415 * @binderfs_entry: process-specific binderfs log file
416 * @oneway_spam_detection_enabled: process enabled oneway spam detection
417 * or not
418 *
419 * Bookkeeping structure for binder processes
420 */
421struct binder_proc {
422 struct hlist_node proc_node;
423 struct rb_root threads;
424 struct rb_root nodes;
425 struct rb_root refs_by_desc;
426 struct rb_root refs_by_node;
427 struct list_head waiting_threads;
428 int pid;
429 struct task_struct *tsk;
430 const struct cred *cred;
431 struct hlist_node deferred_work_node;
432 int deferred_work;
433 int outstanding_txns;
434 bool is_dead;
435 bool is_frozen;
436 bool sync_recv;
437 bool async_recv;
438 wait_queue_head_t freeze_wait;
439 struct dbitmap dmap;
440 struct list_head todo;
441 struct binder_stats stats;
442 struct list_head delivered_death;
443 struct list_head delivered_freeze;
444 u32 max_threads;
445 int requested_threads;
446 int requested_threads_started;
447 int tmp_ref;
448 long default_priority;
449 struct dentry *debugfs_entry;
450 struct binder_alloc alloc;
451 struct binder_context *context;
452 spinlock_t inner_lock;
453 spinlock_t outer_lock;
454 struct dentry *binderfs_entry;
455 bool oneway_spam_detection_enabled;
456};
457
458/**
459 * struct binder_thread - binder thread bookkeeping
460 * @proc: binder process for this thread
461 * (invariant after initialization)
462 * @rb_node: element for proc->threads rbtree
463 * (protected by @proc->inner_lock)
464 * @waiting_thread_node: element for @proc->waiting_threads list
465 * (protected by @proc->inner_lock)
466 * @pid: PID for this thread
467 * (invariant after initialization)
468 * @looper: bitmap of looping state
469 * (only accessed by this thread)
470 * @looper_needs_return: looping thread needs to exit driver
471 * (no lock needed)
472 * @transaction_stack: stack of in-progress transactions for this thread
473 * (protected by @proc->inner_lock)
474 * @todo: list of work to do for this thread
475 * (protected by @proc->inner_lock)
476 * @process_todo: whether work in @todo should be processed
477 * (protected by @proc->inner_lock)
478 * @return_error: transaction errors reported by this thread
479 * (only accessed by this thread)
480 * @reply_error: transaction errors reported by target thread
481 * (protected by @proc->inner_lock)
482 * @ee: extended error information from this thread
483 * (protected by @proc->inner_lock)
484 * @wait: wait queue for thread work
485 * @stats: per-thread statistics
486 * (atomics, no lock needed)
487 * @tmp_ref: temporary reference to indicate thread is in use
488 * (atomic since @proc->inner_lock cannot
489 * always be acquired)
490 * @is_dead: thread is dead and awaiting free
491 * when outstanding transactions are cleaned up
492 * (protected by @proc->inner_lock)
493 *
494 * Bookkeeping structure for binder threads.
495 */
496struct binder_thread {
497 struct binder_proc *proc;
498 struct rb_node rb_node;
499 struct list_head waiting_thread_node;
500 int pid;
501 int looper; /* only modified by this thread */
502 bool looper_need_return; /* can be written by other thread */
503 struct binder_transaction *transaction_stack;
504 struct list_head todo;
505 bool process_todo;
506 struct binder_error return_error;
507 struct binder_error reply_error;
508 struct binder_extended_error ee;
509 wait_queue_head_t wait;
510 struct binder_stats stats;
511 atomic_t tmp_ref;
512 bool is_dead;
513};
514
515/**
516 * struct binder_txn_fd_fixup - transaction fd fixup list element
517 * @fixup_entry: list entry
518 * @file: struct file to be associated with new fd
519 * @offset: offset in buffer data to this fixup
520 * @target_fd: fd to use by the target to install @file
521 *
522 * List element for fd fixups in a transaction. Since file
523 * descriptors need to be allocated in the context of the
524 * target process, we pass each fd to be processed in this
525 * struct.
526 */
527struct binder_txn_fd_fixup {
528 struct list_head fixup_entry;
529 struct file *file;
530 size_t offset;
531 int target_fd;
532};
533
534struct binder_transaction {
535 int debug_id;
536 struct binder_work work;
537 struct binder_thread *from;
538 pid_t from_pid;
539 pid_t from_tid;
540 struct binder_transaction *from_parent;
541 struct binder_proc *to_proc;
542 struct binder_thread *to_thread;
543 struct binder_transaction *to_parent;
544 unsigned need_reply:1;
545 /* unsigned is_dead:1; */ /* not used at the moment */
546
547 struct binder_buffer *buffer;
548 unsigned int code;
549 unsigned int flags;
550 long priority;
551 long saved_priority;
552 kuid_t sender_euid;
553 ktime_t start_time;
554 struct list_head fd_fixups;
555 binder_uintptr_t security_ctx;
556 /**
557 * @lock: protects @from, @to_proc, and @to_thread
558 *
559 * @from, @to_proc, and @to_thread can be set to NULL
560 * during thread teardown
561 */
562 spinlock_t lock;
563};
564
565/**
566 * struct binder_object - union of flat binder object types
567 * @hdr: generic object header
568 * @fbo: binder object (nodes and refs)
569 * @fdo: file descriptor object
570 * @bbo: binder buffer pointer
571 * @fdao: file descriptor array
572 *
573 * Used for type-independent object copies
574 */
575struct binder_object {
576 union {
577 struct binder_object_header hdr;
578 struct flat_binder_object fbo;
579 struct binder_fd_object fdo;
580 struct binder_buffer_object bbo;
581 struct binder_fd_array_object fdao;
582 };
583};
584
585#endif /* _LINUX_BINDER_INTERNAL_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _LINUX_BINDER_INTERNAL_H
4#define _LINUX_BINDER_INTERNAL_H
5
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/list.h>
9#include <linux/miscdevice.h>
10#include <linux/mutex.h>
11#include <linux/refcount.h>
12#include <linux/stddef.h>
13#include <linux/types.h>
14#include <linux/uidgid.h>
15#include <uapi/linux/android/binderfs.h>
16#include "binder_alloc.h"
17
18struct binder_context {
19 struct binder_node *binder_context_mgr_node;
20 struct mutex context_mgr_node_lock;
21 kuid_t binder_context_mgr_uid;
22 const char *name;
23};
24
25/**
26 * struct binder_device - information about a binder device node
27 * @hlist: list of binder devices (only used for devices requested via
28 * CONFIG_ANDROID_BINDER_DEVICES)
29 * @miscdev: information about a binder character device node
30 * @context: binder context information
31 * @binderfs_inode: This is the inode of the root dentry of the super block
32 * belonging to a binderfs mount.
33 */
34struct binder_device {
35 struct hlist_node hlist;
36 struct miscdevice miscdev;
37 struct binder_context context;
38 struct inode *binderfs_inode;
39 refcount_t ref;
40};
41
42/**
43 * binderfs_mount_opts - mount options for binderfs
44 * @max: maximum number of allocatable binderfs binder devices
45 * @stats_mode: enable binder stats in binderfs.
46 */
47struct binderfs_mount_opts {
48 int max;
49 int stats_mode;
50};
51
52/**
53 * binderfs_info - information about a binderfs mount
54 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
55 * @control_dentry: This records the dentry of this binderfs mount
56 * binder-control device.
57 * @root_uid: uid that needs to be used when a new binder device is
58 * created.
59 * @root_gid: gid that needs to be used when a new binder device is
60 * created.
61 * @mount_opts: The mount options in use.
62 * @device_count: The current number of allocated binder devices.
63 * @proc_log_dir: Pointer to the directory dentry containing process-specific
64 * logs.
65 */
66struct binderfs_info {
67 struct ipc_namespace *ipc_ns;
68 struct dentry *control_dentry;
69 kuid_t root_uid;
70 kgid_t root_gid;
71 struct binderfs_mount_opts mount_opts;
72 int device_count;
73 struct dentry *proc_log_dir;
74};
75
76extern const struct file_operations binder_fops;
77
78extern char *binder_devices_param;
79
80#ifdef CONFIG_ANDROID_BINDERFS
81extern bool is_binderfs_device(const struct inode *inode);
82extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
83 const struct file_operations *fops,
84 void *data);
85extern void binderfs_remove_file(struct dentry *dentry);
86#else
87static inline bool is_binderfs_device(const struct inode *inode)
88{
89 return false;
90}
91static inline struct dentry *binderfs_create_file(struct dentry *dir,
92 const char *name,
93 const struct file_operations *fops,
94 void *data)
95{
96 return NULL;
97}
98static inline void binderfs_remove_file(struct dentry *dentry) {}
99#endif
100
101#ifdef CONFIG_ANDROID_BINDERFS
102extern int __init init_binderfs(void);
103#else
104static inline int __init init_binderfs(void)
105{
106 return 0;
107}
108#endif
109
110int binder_stats_show(struct seq_file *m, void *unused);
111DEFINE_SHOW_ATTRIBUTE(binder_stats);
112
113int binder_state_show(struct seq_file *m, void *unused);
114DEFINE_SHOW_ATTRIBUTE(binder_state);
115
116int binder_transactions_show(struct seq_file *m, void *unused);
117DEFINE_SHOW_ATTRIBUTE(binder_transactions);
118
119int binder_transaction_log_show(struct seq_file *m, void *unused);
120DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
121
122struct binder_transaction_log_entry {
123 int debug_id;
124 int debug_id_done;
125 int call_type;
126 int from_proc;
127 int from_thread;
128 int target_handle;
129 int to_proc;
130 int to_thread;
131 int to_node;
132 int data_size;
133 int offsets_size;
134 int return_error_line;
135 uint32_t return_error;
136 uint32_t return_error_param;
137 char context_name[BINDERFS_MAX_NAME + 1];
138};
139
140struct binder_transaction_log {
141 atomic_t cur;
142 bool full;
143 struct binder_transaction_log_entry entry[32];
144};
145
146enum binder_stat_types {
147 BINDER_STAT_PROC,
148 BINDER_STAT_THREAD,
149 BINDER_STAT_NODE,
150 BINDER_STAT_REF,
151 BINDER_STAT_DEATH,
152 BINDER_STAT_TRANSACTION,
153 BINDER_STAT_TRANSACTION_COMPLETE,
154 BINDER_STAT_COUNT
155};
156
157struct binder_stats {
158 atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
159 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
160 atomic_t obj_created[BINDER_STAT_COUNT];
161 atomic_t obj_deleted[BINDER_STAT_COUNT];
162};
163
164/**
165 * struct binder_work - work enqueued on a worklist
166 * @entry: node enqueued on list
167 * @type: type of work to be performed
168 *
169 * There are separate work lists for proc, thread, and node (async).
170 */
171struct binder_work {
172 struct list_head entry;
173
174 enum binder_work_type {
175 BINDER_WORK_TRANSACTION = 1,
176 BINDER_WORK_TRANSACTION_COMPLETE,
177 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
178 BINDER_WORK_RETURN_ERROR,
179 BINDER_WORK_NODE,
180 BINDER_WORK_DEAD_BINDER,
181 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
182 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
183 } type;
184};
185
186struct binder_error {
187 struct binder_work work;
188 uint32_t cmd;
189};
190
191/**
192 * struct binder_node - binder node bookkeeping
193 * @debug_id: unique ID for debugging
194 * (invariant after initialized)
195 * @lock: lock for node fields
196 * @work: worklist element for node work
197 * (protected by @proc->inner_lock)
198 * @rb_node: element for proc->nodes tree
199 * (protected by @proc->inner_lock)
200 * @dead_node: element for binder_dead_nodes list
201 * (protected by binder_dead_nodes_lock)
202 * @proc: binder_proc that owns this node
203 * (invariant after initialized)
204 * @refs: list of references on this node
205 * (protected by @lock)
206 * @internal_strong_refs: used to take strong references when
207 * initiating a transaction
208 * (protected by @proc->inner_lock if @proc
209 * and by @lock)
210 * @local_weak_refs: weak user refs from local process
211 * (protected by @proc->inner_lock if @proc
212 * and by @lock)
213 * @local_strong_refs: strong user refs from local process
214 * (protected by @proc->inner_lock if @proc
215 * and by @lock)
216 * @tmp_refs: temporary kernel refs
217 * (protected by @proc->inner_lock while @proc
218 * is valid, and by binder_dead_nodes_lock
219 * if @proc is NULL. During inc/dec and node release
220 * it is also protected by @lock to provide safety
221 * as the node dies and @proc becomes NULL)
222 * @ptr: userspace pointer for node
223 * (invariant, no lock needed)
224 * @cookie: userspace cookie for node
225 * (invariant, no lock needed)
226 * @has_strong_ref: userspace notified of strong ref
227 * (protected by @proc->inner_lock if @proc
228 * and by @lock)
229 * @pending_strong_ref: userspace has acked notification of strong ref
230 * (protected by @proc->inner_lock if @proc
231 * and by @lock)
232 * @has_weak_ref: userspace notified of weak ref
233 * (protected by @proc->inner_lock if @proc
234 * and by @lock)
235 * @pending_weak_ref: userspace has acked notification of weak ref
236 * (protected by @proc->inner_lock if @proc
237 * and by @lock)
238 * @has_async_transaction: async transaction to node in progress
239 * (protected by @lock)
240 * @accept_fds: file descriptor operations supported for node
241 * (invariant after initialized)
242 * @min_priority: minimum scheduling priority
243 * (invariant after initialized)
244 * @txn_security_ctx: require sender's security context
245 * (invariant after initialized)
246 * @async_todo: list of async work items
247 * (protected by @proc->inner_lock)
248 *
249 * Bookkeeping structure for binder nodes.
250 */
251struct binder_node {
252 int debug_id;
253 spinlock_t lock;
254 struct binder_work work;
255 union {
256 struct rb_node rb_node;
257 struct hlist_node dead_node;
258 };
259 struct binder_proc *proc;
260 struct hlist_head refs;
261 int internal_strong_refs;
262 int local_weak_refs;
263 int local_strong_refs;
264 int tmp_refs;
265 binder_uintptr_t ptr;
266 binder_uintptr_t cookie;
267 struct {
268 /*
269 * bitfield elements protected by
270 * proc inner_lock
271 */
272 u8 has_strong_ref:1;
273 u8 pending_strong_ref:1;
274 u8 has_weak_ref:1;
275 u8 pending_weak_ref:1;
276 };
277 struct {
278 /*
279 * invariant after initialization
280 */
281 u8 accept_fds:1;
282 u8 txn_security_ctx:1;
283 u8 min_priority;
284 };
285 bool has_async_transaction;
286 struct list_head async_todo;
287};
288
289struct binder_ref_death {
290 /**
291 * @work: worklist element for death notifications
292 * (protected by inner_lock of the proc that
293 * this ref belongs to)
294 */
295 struct binder_work work;
296 binder_uintptr_t cookie;
297};
298
299/**
300 * struct binder_ref_data - binder_ref counts and id
301 * @debug_id: unique ID for the ref
302 * @desc: unique userspace handle for ref
303 * @strong: strong ref count (debugging only if not locked)
304 * @weak: weak ref count (debugging only if not locked)
305 *
306 * Structure to hold ref count and ref id information. Since
307 * the actual ref can only be accessed with a lock, this structure
308 * is used to return information about the ref to callers of
309 * ref inc/dec functions.
310 */
311struct binder_ref_data {
312 int debug_id;
313 uint32_t desc;
314 int strong;
315 int weak;
316};
317
318/**
319 * struct binder_ref - struct to track references on nodes
320 * @data: binder_ref_data containing id, handle, and current refcounts
321 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
322 * @rb_node_node: node for lookup by @node in proc's rb_tree
323 * @node_entry: list entry for node->refs list in target node
324 * (protected by @node->lock)
325 * @proc: binder_proc containing ref
326 * @node: binder_node of target node. When cleaning up a
327 * ref for deletion in binder_cleanup_ref, a non-NULL
328 * @node indicates the node must be freed
329 * @death: pointer to death notification (ref_death) if requested
330 * (protected by @node->lock)
331 *
332 * Structure to track references from procA to target node (on procB). This
333 * structure is unsafe to access without holding @proc->outer_lock.
334 */
335struct binder_ref {
336 /* Lookups needed: */
337 /* node + proc => ref (transaction) */
338 /* desc + proc => ref (transaction, inc/dec ref) */
339 /* node => refs + procs (proc exit) */
340 struct binder_ref_data data;
341 struct rb_node rb_node_desc;
342 struct rb_node rb_node_node;
343 struct hlist_node node_entry;
344 struct binder_proc *proc;
345 struct binder_node *node;
346 struct binder_ref_death *death;
347};
348
349/**
350 * struct binder_proc - binder process bookkeeping
351 * @proc_node: element for binder_procs list
352 * @threads: rbtree of binder_threads in this proc
353 * (protected by @inner_lock)
354 * @nodes: rbtree of binder nodes associated with
355 * this proc ordered by node->ptr
356 * (protected by @inner_lock)
357 * @refs_by_desc: rbtree of refs ordered by ref->desc
358 * (protected by @outer_lock)
359 * @refs_by_node: rbtree of refs ordered by ref->node
360 * (protected by @outer_lock)
361 * @waiting_threads: threads currently waiting for proc work
362 * (protected by @inner_lock)
363 * @pid PID of group_leader of process
364 * (invariant after initialized)
365 * @tsk task_struct for group_leader of process
366 * (invariant after initialized)
367 * @deferred_work_node: element for binder_deferred_list
368 * (protected by binder_deferred_lock)
369 * @deferred_work: bitmap of deferred work to perform
370 * (protected by binder_deferred_lock)
371 * @outstanding_txns: number of transactions to be transmitted before
372 * processes in freeze_wait are woken up
373 * (protected by @inner_lock)
374 * @is_dead: process is dead and awaiting free
375 * when outstanding transactions are cleaned up
376 * (protected by @inner_lock)
377 * @is_frozen: process is frozen and unable to service
378 * binder transactions
379 * (protected by @inner_lock)
380 * @sync_recv: process received sync transactions since last frozen
381 * bit 0: received sync transaction after being frozen
382 * bit 1: new pending sync transaction during freezing
383 * (protected by @inner_lock)
384 * @async_recv: process received async transactions since last frozen
385 * (protected by @inner_lock)
386 * @freeze_wait: waitqueue of processes waiting for all outstanding
387 * transactions to be processed
388 * (protected by @inner_lock)
389 * @todo: list of work for this process
390 * (protected by @inner_lock)
391 * @stats: per-process binder statistics
392 * (atomics, no lock needed)
393 * @delivered_death: list of delivered death notification
394 * (protected by @inner_lock)
395 * @max_threads: cap on number of binder threads
396 * (protected by @inner_lock)
397 * @requested_threads: number of binder threads requested but not
398 * yet started. In current implementation, can
399 * only be 0 or 1.
400 * (protected by @inner_lock)
401 * @requested_threads_started: number binder threads started
402 * (protected by @inner_lock)
403 * @tmp_ref: temporary reference to indicate proc is in use
404 * (protected by @inner_lock)
405 * @default_priority: default scheduler priority
406 * (invariant after initialized)
407 * @debugfs_entry: debugfs node
408 * @alloc: binder allocator bookkeeping
409 * @context: binder_context for this proc
410 * (invariant after initialized)
411 * @inner_lock: can nest under outer_lock and/or node lock
412 * @outer_lock: no nesting under innor or node lock
413 * Lock order: 1) outer, 2) node, 3) inner
414 * @binderfs_entry: process-specific binderfs log file
415 * @oneway_spam_detection_enabled: process enabled oneway spam detection
416 * or not
417 *
418 * Bookkeeping structure for binder processes
419 */
420struct binder_proc {
421 struct hlist_node proc_node;
422 struct rb_root threads;
423 struct rb_root nodes;
424 struct rb_root refs_by_desc;
425 struct rb_root refs_by_node;
426 struct list_head waiting_threads;
427 int pid;
428 struct task_struct *tsk;
429 struct hlist_node deferred_work_node;
430 int deferred_work;
431 int outstanding_txns;
432 bool is_dead;
433 bool is_frozen;
434 bool sync_recv;
435 bool async_recv;
436 wait_queue_head_t freeze_wait;
437
438 struct list_head todo;
439 struct binder_stats stats;
440 struct list_head delivered_death;
441 int max_threads;
442 int requested_threads;
443 int requested_threads_started;
444 int tmp_ref;
445 long default_priority;
446 struct dentry *debugfs_entry;
447 struct binder_alloc alloc;
448 struct binder_context *context;
449 spinlock_t inner_lock;
450 spinlock_t outer_lock;
451 struct dentry *binderfs_entry;
452 bool oneway_spam_detection_enabled;
453};
454
455/**
456 * struct binder_thread - binder thread bookkeeping
457 * @proc: binder process for this thread
458 * (invariant after initialization)
459 * @rb_node: element for proc->threads rbtree
460 * (protected by @proc->inner_lock)
461 * @waiting_thread_node: element for @proc->waiting_threads list
462 * (protected by @proc->inner_lock)
463 * @pid: PID for this thread
464 * (invariant after initialization)
465 * @looper: bitmap of looping state
466 * (only accessed by this thread)
467 * @looper_needs_return: looping thread needs to exit driver
468 * (no lock needed)
469 * @transaction_stack: stack of in-progress transactions for this thread
470 * (protected by @proc->inner_lock)
471 * @todo: list of work to do for this thread
472 * (protected by @proc->inner_lock)
473 * @process_todo: whether work in @todo should be processed
474 * (protected by @proc->inner_lock)
475 * @return_error: transaction errors reported by this thread
476 * (only accessed by this thread)
477 * @reply_error: transaction errors reported by target thread
478 * (protected by @proc->inner_lock)
479 * @wait: wait queue for thread work
480 * @stats: per-thread statistics
481 * (atomics, no lock needed)
482 * @tmp_ref: temporary reference to indicate thread is in use
483 * (atomic since @proc->inner_lock cannot
484 * always be acquired)
485 * @is_dead: thread is dead and awaiting free
486 * when outstanding transactions are cleaned up
487 * (protected by @proc->inner_lock)
488 *
489 * Bookkeeping structure for binder threads.
490 */
491struct binder_thread {
492 struct binder_proc *proc;
493 struct rb_node rb_node;
494 struct list_head waiting_thread_node;
495 int pid;
496 int looper; /* only modified by this thread */
497 bool looper_need_return; /* can be written by other thread */
498 struct binder_transaction *transaction_stack;
499 struct list_head todo;
500 bool process_todo;
501 struct binder_error return_error;
502 struct binder_error reply_error;
503 wait_queue_head_t wait;
504 struct binder_stats stats;
505 atomic_t tmp_ref;
506 bool is_dead;
507};
508
509/**
510 * struct binder_txn_fd_fixup - transaction fd fixup list element
511 * @fixup_entry: list entry
512 * @file: struct file to be associated with new fd
513 * @offset: offset in buffer data to this fixup
514 *
515 * List element for fd fixups in a transaction. Since file
516 * descriptors need to be allocated in the context of the
517 * target process, we pass each fd to be processed in this
518 * struct.
519 */
520struct binder_txn_fd_fixup {
521 struct list_head fixup_entry;
522 struct file *file;
523 size_t offset;
524};
525
526struct binder_transaction {
527 int debug_id;
528 struct binder_work work;
529 struct binder_thread *from;
530 struct binder_transaction *from_parent;
531 struct binder_proc *to_proc;
532 struct binder_thread *to_thread;
533 struct binder_transaction *to_parent;
534 unsigned need_reply:1;
535 /* unsigned is_dead:1; */ /* not used at the moment */
536
537 struct binder_buffer *buffer;
538 unsigned int code;
539 unsigned int flags;
540 long priority;
541 long saved_priority;
542 kuid_t sender_euid;
543 struct list_head fd_fixups;
544 binder_uintptr_t security_ctx;
545 /**
546 * @lock: protects @from, @to_proc, and @to_thread
547 *
548 * @from, @to_proc, and @to_thread can be set to NULL
549 * during thread teardown
550 */
551 spinlock_t lock;
552};
553
554/**
555 * struct binder_object - union of flat binder object types
556 * @hdr: generic object header
557 * @fbo: binder object (nodes and refs)
558 * @fdo: file descriptor object
559 * @bbo: binder buffer pointer
560 * @fdao: file descriptor array
561 *
562 * Used for type-independent object copies
563 */
564struct binder_object {
565 union {
566 struct binder_object_header hdr;
567 struct flat_binder_object fbo;
568 struct binder_fd_object fdo;
569 struct binder_buffer_object bbo;
570 struct binder_fd_array_object fdao;
571 };
572};
573
574extern struct binder_transaction_log binder_transaction_log;
575extern struct binder_transaction_log binder_transaction_log_failed;
576#endif /* _LINUX_BINDER_INTERNAL_H */