Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _LINUX_BINDER_INTERNAL_H
4#define _LINUX_BINDER_INTERNAL_H
5
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/list.h>
9#include <linux/miscdevice.h>
10#include <linux/mutex.h>
11#include <linux/refcount.h>
12#include <linux/stddef.h>
13#include <linux/types.h>
14#include <linux/uidgid.h>
15#include <uapi/linux/android/binderfs.h>
16#include "binder_alloc.h"
17
18struct binder_context {
19 struct binder_node *binder_context_mgr_node;
20 struct mutex context_mgr_node_lock;
21 kuid_t binder_context_mgr_uid;
22 const char *name;
23};
24
25/**
26 * struct binder_device - information about a binder device node
27 * @hlist: list of binder devices (only used for devices requested via
28 * CONFIG_ANDROID_BINDER_DEVICES)
29 * @miscdev: information about a binder character device node
30 * @context: binder context information
31 * @binderfs_inode: This is the inode of the root dentry of the super block
32 * belonging to a binderfs mount.
33 */
34struct binder_device {
35 struct hlist_node hlist;
36 struct miscdevice miscdev;
37 struct binder_context context;
38 struct inode *binderfs_inode;
39 refcount_t ref;
40};
41
42/**
43 * binderfs_mount_opts - mount options for binderfs
44 * @max: maximum number of allocatable binderfs binder devices
45 * @stats_mode: enable binder stats in binderfs.
46 */
47struct binderfs_mount_opts {
48 int max;
49 int stats_mode;
50};
51
52/**
53 * binderfs_info - information about a binderfs mount
54 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
55 * @control_dentry: This records the dentry of this binderfs mount
56 * binder-control device.
57 * @root_uid: uid that needs to be used when a new binder device is
58 * created.
59 * @root_gid: gid that needs to be used when a new binder device is
60 * created.
61 * @mount_opts: The mount options in use.
62 * @device_count: The current number of allocated binder devices.
63 * @proc_log_dir: Pointer to the directory dentry containing process-specific
64 * logs.
65 */
66struct binderfs_info {
67 struct ipc_namespace *ipc_ns;
68 struct dentry *control_dentry;
69 kuid_t root_uid;
70 kgid_t root_gid;
71 struct binderfs_mount_opts mount_opts;
72 int device_count;
73 struct dentry *proc_log_dir;
74};
75
76extern const struct file_operations binder_fops;
77
78extern char *binder_devices_param;
79
80#ifdef CONFIG_ANDROID_BINDERFS
81extern bool is_binderfs_device(const struct inode *inode);
82extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
83 const struct file_operations *fops,
84 void *data);
85extern void binderfs_remove_file(struct dentry *dentry);
86#else
87static inline bool is_binderfs_device(const struct inode *inode)
88{
89 return false;
90}
91static inline struct dentry *binderfs_create_file(struct dentry *dir,
92 const char *name,
93 const struct file_operations *fops,
94 void *data)
95{
96 return NULL;
97}
98static inline void binderfs_remove_file(struct dentry *dentry) {}
99#endif
100
101#ifdef CONFIG_ANDROID_BINDERFS
102extern int __init init_binderfs(void);
103#else
104static inline int __init init_binderfs(void)
105{
106 return 0;
107}
108#endif
109
110struct binder_debugfs_entry {
111 const char *name;
112 umode_t mode;
113 const struct file_operations *fops;
114 void *data;
115};
116
117extern const struct binder_debugfs_entry binder_debugfs_entries[];
118
119#define binder_for_each_debugfs_entry(entry) \
120 for ((entry) = binder_debugfs_entries; \
121 (entry)->name; \
122 (entry)++)
123
124enum binder_stat_types {
125 BINDER_STAT_PROC,
126 BINDER_STAT_THREAD,
127 BINDER_STAT_NODE,
128 BINDER_STAT_REF,
129 BINDER_STAT_DEATH,
130 BINDER_STAT_TRANSACTION,
131 BINDER_STAT_TRANSACTION_COMPLETE,
132 BINDER_STAT_COUNT
133};
134
135struct binder_stats {
136 atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1];
137 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
138 atomic_t obj_created[BINDER_STAT_COUNT];
139 atomic_t obj_deleted[BINDER_STAT_COUNT];
140};
141
142/**
143 * struct binder_work - work enqueued on a worklist
144 * @entry: node enqueued on list
145 * @type: type of work to be performed
146 *
147 * There are separate work lists for proc, thread, and node (async).
148 */
149struct binder_work {
150 struct list_head entry;
151
152 enum binder_work_type {
153 BINDER_WORK_TRANSACTION = 1,
154 BINDER_WORK_TRANSACTION_COMPLETE,
155 BINDER_WORK_TRANSACTION_PENDING,
156 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
157 BINDER_WORK_RETURN_ERROR,
158 BINDER_WORK_NODE,
159 BINDER_WORK_DEAD_BINDER,
160 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
161 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
162 } type;
163};
164
165struct binder_error {
166 struct binder_work work;
167 uint32_t cmd;
168};
169
170/**
171 * struct binder_node - binder node bookkeeping
172 * @debug_id: unique ID for debugging
173 * (invariant after initialized)
174 * @lock: lock for node fields
175 * @work: worklist element for node work
176 * (protected by @proc->inner_lock)
177 * @rb_node: element for proc->nodes tree
178 * (protected by @proc->inner_lock)
179 * @dead_node: element for binder_dead_nodes list
180 * (protected by binder_dead_nodes_lock)
181 * @proc: binder_proc that owns this node
182 * (invariant after initialized)
183 * @refs: list of references on this node
184 * (protected by @lock)
185 * @internal_strong_refs: used to take strong references when
186 * initiating a transaction
187 * (protected by @proc->inner_lock if @proc
188 * and by @lock)
189 * @local_weak_refs: weak user refs from local process
190 * (protected by @proc->inner_lock if @proc
191 * and by @lock)
192 * @local_strong_refs: strong user refs from local process
193 * (protected by @proc->inner_lock if @proc
194 * and by @lock)
195 * @tmp_refs: temporary kernel refs
196 * (protected by @proc->inner_lock while @proc
197 * is valid, and by binder_dead_nodes_lock
198 * if @proc is NULL. During inc/dec and node release
199 * it is also protected by @lock to provide safety
200 * as the node dies and @proc becomes NULL)
201 * @ptr: userspace pointer for node
202 * (invariant, no lock needed)
203 * @cookie: userspace cookie for node
204 * (invariant, no lock needed)
205 * @has_strong_ref: userspace notified of strong ref
206 * (protected by @proc->inner_lock if @proc
207 * and by @lock)
208 * @pending_strong_ref: userspace has acked notification of strong ref
209 * (protected by @proc->inner_lock if @proc
210 * and by @lock)
211 * @has_weak_ref: userspace notified of weak ref
212 * (protected by @proc->inner_lock if @proc
213 * and by @lock)
214 * @pending_weak_ref: userspace has acked notification of weak ref
215 * (protected by @proc->inner_lock if @proc
216 * and by @lock)
217 * @has_async_transaction: async transaction to node in progress
218 * (protected by @lock)
219 * @accept_fds: file descriptor operations supported for node
220 * (invariant after initialized)
221 * @min_priority: minimum scheduling priority
222 * (invariant after initialized)
223 * @txn_security_ctx: require sender's security context
224 * (invariant after initialized)
225 * @async_todo: list of async work items
226 * (protected by @proc->inner_lock)
227 *
228 * Bookkeeping structure for binder nodes.
229 */
230struct binder_node {
231 int debug_id;
232 spinlock_t lock;
233 struct binder_work work;
234 union {
235 struct rb_node rb_node;
236 struct hlist_node dead_node;
237 };
238 struct binder_proc *proc;
239 struct hlist_head refs;
240 int internal_strong_refs;
241 int local_weak_refs;
242 int local_strong_refs;
243 int tmp_refs;
244 binder_uintptr_t ptr;
245 binder_uintptr_t cookie;
246 struct {
247 /*
248 * bitfield elements protected by
249 * proc inner_lock
250 */
251 u8 has_strong_ref:1;
252 u8 pending_strong_ref:1;
253 u8 has_weak_ref:1;
254 u8 pending_weak_ref:1;
255 };
256 struct {
257 /*
258 * invariant after initialization
259 */
260 u8 accept_fds:1;
261 u8 txn_security_ctx:1;
262 u8 min_priority;
263 };
264 bool has_async_transaction;
265 struct list_head async_todo;
266};
267
268struct binder_ref_death {
269 /**
270 * @work: worklist element for death notifications
271 * (protected by inner_lock of the proc that
272 * this ref belongs to)
273 */
274 struct binder_work work;
275 binder_uintptr_t cookie;
276};
277
278/**
279 * struct binder_ref_data - binder_ref counts and id
280 * @debug_id: unique ID for the ref
281 * @desc: unique userspace handle for ref
282 * @strong: strong ref count (debugging only if not locked)
283 * @weak: weak ref count (debugging only if not locked)
284 *
285 * Structure to hold ref count and ref id information. Since
286 * the actual ref can only be accessed with a lock, this structure
287 * is used to return information about the ref to callers of
288 * ref inc/dec functions.
289 */
290struct binder_ref_data {
291 int debug_id;
292 uint32_t desc;
293 int strong;
294 int weak;
295};
296
297/**
298 * struct binder_ref - struct to track references on nodes
299 * @data: binder_ref_data containing id, handle, and current refcounts
300 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
301 * @rb_node_node: node for lookup by @node in proc's rb_tree
302 * @node_entry: list entry for node->refs list in target node
303 * (protected by @node->lock)
304 * @proc: binder_proc containing ref
305 * @node: binder_node of target node. When cleaning up a
306 * ref for deletion in binder_cleanup_ref, a non-NULL
307 * @node indicates the node must be freed
308 * @death: pointer to death notification (ref_death) if requested
309 * (protected by @node->lock)
310 *
311 * Structure to track references from procA to target node (on procB). This
312 * structure is unsafe to access without holding @proc->outer_lock.
313 */
314struct binder_ref {
315 /* Lookups needed: */
316 /* node + proc => ref (transaction) */
317 /* desc + proc => ref (transaction, inc/dec ref) */
318 /* node => refs + procs (proc exit) */
319 struct binder_ref_data data;
320 struct rb_node rb_node_desc;
321 struct rb_node rb_node_node;
322 struct hlist_node node_entry;
323 struct binder_proc *proc;
324 struct binder_node *node;
325 struct binder_ref_death *death;
326};
327
328/**
329 * struct binder_proc - binder process bookkeeping
330 * @proc_node: element for binder_procs list
331 * @threads: rbtree of binder_threads in this proc
332 * (protected by @inner_lock)
333 * @nodes: rbtree of binder nodes associated with
334 * this proc ordered by node->ptr
335 * (protected by @inner_lock)
336 * @refs_by_desc: rbtree of refs ordered by ref->desc
337 * (protected by @outer_lock)
338 * @refs_by_node: rbtree of refs ordered by ref->node
339 * (protected by @outer_lock)
340 * @waiting_threads: threads currently waiting for proc work
341 * (protected by @inner_lock)
342 * @pid PID of group_leader of process
343 * (invariant after initialized)
344 * @tsk task_struct for group_leader of process
345 * (invariant after initialized)
346 * @cred struct cred associated with the `struct file`
347 * in binder_open()
348 * (invariant after initialized)
349 * @deferred_work_node: element for binder_deferred_list
350 * (protected by binder_deferred_lock)
351 * @deferred_work: bitmap of deferred work to perform
352 * (protected by binder_deferred_lock)
353 * @outstanding_txns: number of transactions to be transmitted before
354 * processes in freeze_wait are woken up
355 * (protected by @inner_lock)
356 * @is_dead: process is dead and awaiting free
357 * when outstanding transactions are cleaned up
358 * (protected by @inner_lock)
359 * @is_frozen: process is frozen and unable to service
360 * binder transactions
361 * (protected by @inner_lock)
362 * @sync_recv: process received sync transactions since last frozen
363 * bit 0: received sync transaction after being frozen
364 * bit 1: new pending sync transaction during freezing
365 * (protected by @inner_lock)
366 * @async_recv: process received async transactions since last frozen
367 * (protected by @inner_lock)
368 * @freeze_wait: waitqueue of processes waiting for all outstanding
369 * transactions to be processed
370 * (protected by @inner_lock)
371 * @todo: list of work for this process
372 * (protected by @inner_lock)
373 * @stats: per-process binder statistics
374 * (atomics, no lock needed)
375 * @delivered_death: list of delivered death notification
376 * (protected by @inner_lock)
377 * @max_threads: cap on number of binder threads
378 * (protected by @inner_lock)
379 * @requested_threads: number of binder threads requested but not
380 * yet started. In current implementation, can
381 * only be 0 or 1.
382 * (protected by @inner_lock)
383 * @requested_threads_started: number binder threads started
384 * (protected by @inner_lock)
385 * @tmp_ref: temporary reference to indicate proc is in use
386 * (protected by @inner_lock)
387 * @default_priority: default scheduler priority
388 * (invariant after initialized)
389 * @debugfs_entry: debugfs node
390 * @alloc: binder allocator bookkeeping
391 * @context: binder_context for this proc
392 * (invariant after initialized)
393 * @inner_lock: can nest under outer_lock and/or node lock
394 * @outer_lock: no nesting under innor or node lock
395 * Lock order: 1) outer, 2) node, 3) inner
396 * @binderfs_entry: process-specific binderfs log file
397 * @oneway_spam_detection_enabled: process enabled oneway spam detection
398 * or not
399 *
400 * Bookkeeping structure for binder processes
401 */
402struct binder_proc {
403 struct hlist_node proc_node;
404 struct rb_root threads;
405 struct rb_root nodes;
406 struct rb_root refs_by_desc;
407 struct rb_root refs_by_node;
408 struct list_head waiting_threads;
409 int pid;
410 struct task_struct *tsk;
411 const struct cred *cred;
412 struct hlist_node deferred_work_node;
413 int deferred_work;
414 int outstanding_txns;
415 bool is_dead;
416 bool is_frozen;
417 bool sync_recv;
418 bool async_recv;
419 wait_queue_head_t freeze_wait;
420
421 struct list_head todo;
422 struct binder_stats stats;
423 struct list_head delivered_death;
424 int max_threads;
425 int requested_threads;
426 int requested_threads_started;
427 int tmp_ref;
428 long default_priority;
429 struct dentry *debugfs_entry;
430 struct binder_alloc alloc;
431 struct binder_context *context;
432 spinlock_t inner_lock;
433 spinlock_t outer_lock;
434 struct dentry *binderfs_entry;
435 bool oneway_spam_detection_enabled;
436};
437
438/**
439 * struct binder_thread - binder thread bookkeeping
440 * @proc: binder process for this thread
441 * (invariant after initialization)
442 * @rb_node: element for proc->threads rbtree
443 * (protected by @proc->inner_lock)
444 * @waiting_thread_node: element for @proc->waiting_threads list
445 * (protected by @proc->inner_lock)
446 * @pid: PID for this thread
447 * (invariant after initialization)
448 * @looper: bitmap of looping state
449 * (only accessed by this thread)
450 * @looper_needs_return: looping thread needs to exit driver
451 * (no lock needed)
452 * @transaction_stack: stack of in-progress transactions for this thread
453 * (protected by @proc->inner_lock)
454 * @todo: list of work to do for this thread
455 * (protected by @proc->inner_lock)
456 * @process_todo: whether work in @todo should be processed
457 * (protected by @proc->inner_lock)
458 * @return_error: transaction errors reported by this thread
459 * (only accessed by this thread)
460 * @reply_error: transaction errors reported by target thread
461 * (protected by @proc->inner_lock)
462 * @ee: extended error information from this thread
463 * (protected by @proc->inner_lock)
464 * @wait: wait queue for thread work
465 * @stats: per-thread statistics
466 * (atomics, no lock needed)
467 * @tmp_ref: temporary reference to indicate thread is in use
468 * (atomic since @proc->inner_lock cannot
469 * always be acquired)
470 * @is_dead: thread is dead and awaiting free
471 * when outstanding transactions are cleaned up
472 * (protected by @proc->inner_lock)
473 *
474 * Bookkeeping structure for binder threads.
475 */
476struct binder_thread {
477 struct binder_proc *proc;
478 struct rb_node rb_node;
479 struct list_head waiting_thread_node;
480 int pid;
481 int looper; /* only modified by this thread */
482 bool looper_need_return; /* can be written by other thread */
483 struct binder_transaction *transaction_stack;
484 struct list_head todo;
485 bool process_todo;
486 struct binder_error return_error;
487 struct binder_error reply_error;
488 struct binder_extended_error ee;
489 wait_queue_head_t wait;
490 struct binder_stats stats;
491 atomic_t tmp_ref;
492 bool is_dead;
493};
494
495/**
496 * struct binder_txn_fd_fixup - transaction fd fixup list element
497 * @fixup_entry: list entry
498 * @file: struct file to be associated with new fd
499 * @offset: offset in buffer data to this fixup
500 * @target_fd: fd to use by the target to install @file
501 *
502 * List element for fd fixups in a transaction. Since file
503 * descriptors need to be allocated in the context of the
504 * target process, we pass each fd to be processed in this
505 * struct.
506 */
507struct binder_txn_fd_fixup {
508 struct list_head fixup_entry;
509 struct file *file;
510 size_t offset;
511 int target_fd;
512};
513
514struct binder_transaction {
515 int debug_id;
516 struct binder_work work;
517 struct binder_thread *from;
518 pid_t from_pid;
519 pid_t from_tid;
520 struct binder_transaction *from_parent;
521 struct binder_proc *to_proc;
522 struct binder_thread *to_thread;
523 struct binder_transaction *to_parent;
524 unsigned need_reply:1;
525 /* unsigned is_dead:1; */ /* not used at the moment */
526
527 struct binder_buffer *buffer;
528 unsigned int code;
529 unsigned int flags;
530 long priority;
531 long saved_priority;
532 kuid_t sender_euid;
533 ktime_t start_time;
534 struct list_head fd_fixups;
535 binder_uintptr_t security_ctx;
536 /**
537 * @lock: protects @from, @to_proc, and @to_thread
538 *
539 * @from, @to_proc, and @to_thread can be set to NULL
540 * during thread teardown
541 */
542 spinlock_t lock;
543};
544
545/**
546 * struct binder_object - union of flat binder object types
547 * @hdr: generic object header
548 * @fbo: binder object (nodes and refs)
549 * @fdo: file descriptor object
550 * @bbo: binder buffer pointer
551 * @fdao: file descriptor array
552 *
553 * Used for type-independent object copies
554 */
555struct binder_object {
556 union {
557 struct binder_object_header hdr;
558 struct flat_binder_object fbo;
559 struct binder_fd_object fdo;
560 struct binder_buffer_object bbo;
561 struct binder_fd_array_object fdao;
562 };
563};
564
565#endif /* _LINUX_BINDER_INTERNAL_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _LINUX_BINDER_INTERNAL_H
4#define _LINUX_BINDER_INTERNAL_H
5
6#include <linux/export.h>
7#include <linux/fs.h>
8#include <linux/list.h>
9#include <linux/miscdevice.h>
10#include <linux/mutex.h>
11#include <linux/refcount.h>
12#include <linux/stddef.h>
13#include <linux/types.h>
14#include <linux/uidgid.h>
15#include <uapi/linux/android/binderfs.h>
16#include "binder_alloc.h"
17#include "dbitmap.h"
18
19struct binder_context {
20 struct binder_node *binder_context_mgr_node;
21 struct mutex context_mgr_node_lock;
22 kuid_t binder_context_mgr_uid;
23 const char *name;
24};
25
26/**
27 * struct binder_device - information about a binder device node
28 * @hlist: list of binder devices (only used for devices requested via
29 * CONFIG_ANDROID_BINDER_DEVICES)
30 * @miscdev: information about a binder character device node
31 * @context: binder context information
32 * @binderfs_inode: This is the inode of the root dentry of the super block
33 * belonging to a binderfs mount.
34 */
35struct binder_device {
36 struct hlist_node hlist;
37 struct miscdevice miscdev;
38 struct binder_context context;
39 struct inode *binderfs_inode;
40 refcount_t ref;
41};
42
43/**
44 * binderfs_mount_opts - mount options for binderfs
45 * @max: maximum number of allocatable binderfs binder devices
46 * @stats_mode: enable binder stats in binderfs.
47 */
48struct binderfs_mount_opts {
49 int max;
50 int stats_mode;
51};
52
53/**
54 * binderfs_info - information about a binderfs mount
55 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
56 * @control_dentry: This records the dentry of this binderfs mount
57 * binder-control device.
58 * @root_uid: uid that needs to be used when a new binder device is
59 * created.
60 * @root_gid: gid that needs to be used when a new binder device is
61 * created.
62 * @mount_opts: The mount options in use.
63 * @device_count: The current number of allocated binder devices.
64 * @proc_log_dir: Pointer to the directory dentry containing process-specific
65 * logs.
66 */
67struct binderfs_info {
68 struct ipc_namespace *ipc_ns;
69 struct dentry *control_dentry;
70 kuid_t root_uid;
71 kgid_t root_gid;
72 struct binderfs_mount_opts mount_opts;
73 int device_count;
74 struct dentry *proc_log_dir;
75};
76
77extern const struct file_operations binder_fops;
78
79extern char *binder_devices_param;
80
81#ifdef CONFIG_ANDROID_BINDERFS
82extern bool is_binderfs_device(const struct inode *inode);
83extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
84 const struct file_operations *fops,
85 void *data);
86extern void binderfs_remove_file(struct dentry *dentry);
87#else
88static inline bool is_binderfs_device(const struct inode *inode)
89{
90 return false;
91}
92static inline struct dentry *binderfs_create_file(struct dentry *dir,
93 const char *name,
94 const struct file_operations *fops,
95 void *data)
96{
97 return NULL;
98}
99static inline void binderfs_remove_file(struct dentry *dentry) {}
100#endif
101
102#ifdef CONFIG_ANDROID_BINDERFS
103extern int __init init_binderfs(void);
104#else
105static inline int __init init_binderfs(void)
106{
107 return 0;
108}
109#endif
110
111struct binder_debugfs_entry {
112 const char *name;
113 umode_t mode;
114 const struct file_operations *fops;
115 void *data;
116};
117
118extern const struct binder_debugfs_entry binder_debugfs_entries[];
119
120#define binder_for_each_debugfs_entry(entry) \
121 for ((entry) = binder_debugfs_entries; \
122 (entry)->name; \
123 (entry)++)
124
125enum binder_stat_types {
126 BINDER_STAT_PROC,
127 BINDER_STAT_THREAD,
128 BINDER_STAT_NODE,
129 BINDER_STAT_REF,
130 BINDER_STAT_DEATH,
131 BINDER_STAT_TRANSACTION,
132 BINDER_STAT_TRANSACTION_COMPLETE,
133 BINDER_STAT_FREEZE,
134 BINDER_STAT_COUNT
135};
136
137struct binder_stats {
138 atomic_t br[_IOC_NR(BR_CLEAR_FREEZE_NOTIFICATION_DONE) + 1];
139 atomic_t bc[_IOC_NR(BC_FREEZE_NOTIFICATION_DONE) + 1];
140 atomic_t obj_created[BINDER_STAT_COUNT];
141 atomic_t obj_deleted[BINDER_STAT_COUNT];
142};
143
144/**
145 * struct binder_work - work enqueued on a worklist
146 * @entry: node enqueued on list
147 * @type: type of work to be performed
148 *
149 * There are separate work lists for proc, thread, and node (async).
150 */
151struct binder_work {
152 struct list_head entry;
153
154 enum binder_work_type {
155 BINDER_WORK_TRANSACTION = 1,
156 BINDER_WORK_TRANSACTION_COMPLETE,
157 BINDER_WORK_TRANSACTION_PENDING,
158 BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
159 BINDER_WORK_RETURN_ERROR,
160 BINDER_WORK_NODE,
161 BINDER_WORK_DEAD_BINDER,
162 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
163 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
164 BINDER_WORK_FROZEN_BINDER,
165 BINDER_WORK_CLEAR_FREEZE_NOTIFICATION,
166 } type;
167};
168
169struct binder_error {
170 struct binder_work work;
171 uint32_t cmd;
172};
173
174/**
175 * struct binder_node - binder node bookkeeping
176 * @debug_id: unique ID for debugging
177 * (invariant after initialized)
178 * @lock: lock for node fields
179 * @work: worklist element for node work
180 * (protected by @proc->inner_lock)
181 * @rb_node: element for proc->nodes tree
182 * (protected by @proc->inner_lock)
183 * @dead_node: element for binder_dead_nodes list
184 * (protected by binder_dead_nodes_lock)
185 * @proc: binder_proc that owns this node
186 * (invariant after initialized)
187 * @refs: list of references on this node
188 * (protected by @lock)
189 * @internal_strong_refs: used to take strong references when
190 * initiating a transaction
191 * (protected by @proc->inner_lock if @proc
192 * and by @lock)
193 * @local_weak_refs: weak user refs from local process
194 * (protected by @proc->inner_lock if @proc
195 * and by @lock)
196 * @local_strong_refs: strong user refs from local process
197 * (protected by @proc->inner_lock if @proc
198 * and by @lock)
199 * @tmp_refs: temporary kernel refs
200 * (protected by @proc->inner_lock while @proc
201 * is valid, and by binder_dead_nodes_lock
202 * if @proc is NULL. During inc/dec and node release
203 * it is also protected by @lock to provide safety
204 * as the node dies and @proc becomes NULL)
205 * @ptr: userspace pointer for node
206 * (invariant, no lock needed)
207 * @cookie: userspace cookie for node
208 * (invariant, no lock needed)
209 * @has_strong_ref: userspace notified of strong ref
210 * (protected by @proc->inner_lock if @proc
211 * and by @lock)
212 * @pending_strong_ref: userspace has acked notification of strong ref
213 * (protected by @proc->inner_lock if @proc
214 * and by @lock)
215 * @has_weak_ref: userspace notified of weak ref
216 * (protected by @proc->inner_lock if @proc
217 * and by @lock)
218 * @pending_weak_ref: userspace has acked notification of weak ref
219 * (protected by @proc->inner_lock if @proc
220 * and by @lock)
221 * @has_async_transaction: async transaction to node in progress
222 * (protected by @lock)
223 * @accept_fds: file descriptor operations supported for node
224 * (invariant after initialized)
225 * @min_priority: minimum scheduling priority
226 * (invariant after initialized)
227 * @txn_security_ctx: require sender's security context
228 * (invariant after initialized)
229 * @async_todo: list of async work items
230 * (protected by @proc->inner_lock)
231 *
232 * Bookkeeping structure for binder nodes.
233 */
234struct binder_node {
235 int debug_id;
236 spinlock_t lock;
237 struct binder_work work;
238 union {
239 struct rb_node rb_node;
240 struct hlist_node dead_node;
241 };
242 struct binder_proc *proc;
243 struct hlist_head refs;
244 int internal_strong_refs;
245 int local_weak_refs;
246 int local_strong_refs;
247 int tmp_refs;
248 binder_uintptr_t ptr;
249 binder_uintptr_t cookie;
250 struct {
251 /*
252 * bitfield elements protected by
253 * proc inner_lock
254 */
255 u8 has_strong_ref:1;
256 u8 pending_strong_ref:1;
257 u8 has_weak_ref:1;
258 u8 pending_weak_ref:1;
259 };
260 struct {
261 /*
262 * invariant after initialization
263 */
264 u8 accept_fds:1;
265 u8 txn_security_ctx:1;
266 u8 min_priority;
267 };
268 bool has_async_transaction;
269 struct list_head async_todo;
270};
271
272struct binder_ref_death {
273 /**
274 * @work: worklist element for death notifications
275 * (protected by inner_lock of the proc that
276 * this ref belongs to)
277 */
278 struct binder_work work;
279 binder_uintptr_t cookie;
280};
281
282struct binder_ref_freeze {
283 struct binder_work work;
284 binder_uintptr_t cookie;
285 bool is_frozen:1;
286 bool sent:1;
287 bool resend:1;
288};
289
290/**
291 * struct binder_ref_data - binder_ref counts and id
292 * @debug_id: unique ID for the ref
293 * @desc: unique userspace handle for ref
294 * @strong: strong ref count (debugging only if not locked)
295 * @weak: weak ref count (debugging only if not locked)
296 *
297 * Structure to hold ref count and ref id information. Since
298 * the actual ref can only be accessed with a lock, this structure
299 * is used to return information about the ref to callers of
300 * ref inc/dec functions.
301 */
302struct binder_ref_data {
303 int debug_id;
304 uint32_t desc;
305 int strong;
306 int weak;
307};
308
309/**
310 * struct binder_ref - struct to track references on nodes
311 * @data: binder_ref_data containing id, handle, and current refcounts
312 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
313 * @rb_node_node: node for lookup by @node in proc's rb_tree
314 * @node_entry: list entry for node->refs list in target node
315 * (protected by @node->lock)
316 * @proc: binder_proc containing ref
317 * @node: binder_node of target node. When cleaning up a
318 * ref for deletion in binder_cleanup_ref, a non-NULL
319 * @node indicates the node must be freed
320 * @death: pointer to death notification (ref_death) if requested
321 * (protected by @node->lock)
322 * @freeze: pointer to freeze notification (ref_freeze) if requested
323 * (protected by @node->lock)
324 *
325 * Structure to track references from procA to target node (on procB). This
326 * structure is unsafe to access without holding @proc->outer_lock.
327 */
328struct binder_ref {
329 /* Lookups needed: */
330 /* node + proc => ref (transaction) */
331 /* desc + proc => ref (transaction, inc/dec ref) */
332 /* node => refs + procs (proc exit) */
333 struct binder_ref_data data;
334 struct rb_node rb_node_desc;
335 struct rb_node rb_node_node;
336 struct hlist_node node_entry;
337 struct binder_proc *proc;
338 struct binder_node *node;
339 struct binder_ref_death *death;
340 struct binder_ref_freeze *freeze;
341};
342
343/**
344 * struct binder_proc - binder process bookkeeping
345 * @proc_node: element for binder_procs list
346 * @threads: rbtree of binder_threads in this proc
347 * (protected by @inner_lock)
348 * @nodes: rbtree of binder nodes associated with
349 * this proc ordered by node->ptr
350 * (protected by @inner_lock)
351 * @refs_by_desc: rbtree of refs ordered by ref->desc
352 * (protected by @outer_lock)
353 * @refs_by_node: rbtree of refs ordered by ref->node
354 * (protected by @outer_lock)
355 * @waiting_threads: threads currently waiting for proc work
356 * (protected by @inner_lock)
357 * @pid PID of group_leader of process
358 * (invariant after initialized)
359 * @tsk task_struct for group_leader of process
360 * (invariant after initialized)
361 * @cred struct cred associated with the `struct file`
362 * in binder_open()
363 * (invariant after initialized)
364 * @deferred_work_node: element for binder_deferred_list
365 * (protected by binder_deferred_lock)
366 * @deferred_work: bitmap of deferred work to perform
367 * (protected by binder_deferred_lock)
368 * @outstanding_txns: number of transactions to be transmitted before
369 * processes in freeze_wait are woken up
370 * (protected by @inner_lock)
371 * @is_dead: process is dead and awaiting free
372 * when outstanding transactions are cleaned up
373 * (protected by @inner_lock)
374 * @is_frozen: process is frozen and unable to service
375 * binder transactions
376 * (protected by @inner_lock)
377 * @sync_recv: process received sync transactions since last frozen
378 * bit 0: received sync transaction after being frozen
379 * bit 1: new pending sync transaction during freezing
380 * (protected by @inner_lock)
381 * @async_recv: process received async transactions since last frozen
382 * (protected by @inner_lock)
383 * @freeze_wait: waitqueue of processes waiting for all outstanding
384 * transactions to be processed
385 * (protected by @inner_lock)
386 * @dmap dbitmap to manage available reference descriptors
387 * (protected by @outer_lock)
388 * @todo: list of work for this process
389 * (protected by @inner_lock)
390 * @stats: per-process binder statistics
391 * (atomics, no lock needed)
392 * @delivered_death: list of delivered death notification
393 * (protected by @inner_lock)
394 * @delivered_freeze: list of delivered freeze notification
395 * (protected by @inner_lock)
396 * @max_threads: cap on number of binder threads
397 * (protected by @inner_lock)
398 * @requested_threads: number of binder threads requested but not
399 * yet started. In current implementation, can
400 * only be 0 or 1.
401 * (protected by @inner_lock)
402 * @requested_threads_started: number binder threads started
403 * (protected by @inner_lock)
404 * @tmp_ref: temporary reference to indicate proc is in use
405 * (protected by @inner_lock)
406 * @default_priority: default scheduler priority
407 * (invariant after initialized)
408 * @debugfs_entry: debugfs node
409 * @alloc: binder allocator bookkeeping
410 * @context: binder_context for this proc
411 * (invariant after initialized)
412 * @inner_lock: can nest under outer_lock and/or node lock
413 * @outer_lock: no nesting under innor or node lock
414 * Lock order: 1) outer, 2) node, 3) inner
415 * @binderfs_entry: process-specific binderfs log file
416 * @oneway_spam_detection_enabled: process enabled oneway spam detection
417 * or not
418 *
419 * Bookkeeping structure for binder processes
420 */
421struct binder_proc {
422 struct hlist_node proc_node;
423 struct rb_root threads;
424 struct rb_root nodes;
425 struct rb_root refs_by_desc;
426 struct rb_root refs_by_node;
427 struct list_head waiting_threads;
428 int pid;
429 struct task_struct *tsk;
430 const struct cred *cred;
431 struct hlist_node deferred_work_node;
432 int deferred_work;
433 int outstanding_txns;
434 bool is_dead;
435 bool is_frozen;
436 bool sync_recv;
437 bool async_recv;
438 wait_queue_head_t freeze_wait;
439 struct dbitmap dmap;
440 struct list_head todo;
441 struct binder_stats stats;
442 struct list_head delivered_death;
443 struct list_head delivered_freeze;
444 u32 max_threads;
445 int requested_threads;
446 int requested_threads_started;
447 int tmp_ref;
448 long default_priority;
449 struct dentry *debugfs_entry;
450 struct binder_alloc alloc;
451 struct binder_context *context;
452 spinlock_t inner_lock;
453 spinlock_t outer_lock;
454 struct dentry *binderfs_entry;
455 bool oneway_spam_detection_enabled;
456};
457
458/**
459 * struct binder_thread - binder thread bookkeeping
460 * @proc: binder process for this thread
461 * (invariant after initialization)
462 * @rb_node: element for proc->threads rbtree
463 * (protected by @proc->inner_lock)
464 * @waiting_thread_node: element for @proc->waiting_threads list
465 * (protected by @proc->inner_lock)
466 * @pid: PID for this thread
467 * (invariant after initialization)
468 * @looper: bitmap of looping state
469 * (only accessed by this thread)
470 * @looper_needs_return: looping thread needs to exit driver
471 * (no lock needed)
472 * @transaction_stack: stack of in-progress transactions for this thread
473 * (protected by @proc->inner_lock)
474 * @todo: list of work to do for this thread
475 * (protected by @proc->inner_lock)
476 * @process_todo: whether work in @todo should be processed
477 * (protected by @proc->inner_lock)
478 * @return_error: transaction errors reported by this thread
479 * (only accessed by this thread)
480 * @reply_error: transaction errors reported by target thread
481 * (protected by @proc->inner_lock)
482 * @ee: extended error information from this thread
483 * (protected by @proc->inner_lock)
484 * @wait: wait queue for thread work
485 * @stats: per-thread statistics
486 * (atomics, no lock needed)
487 * @tmp_ref: temporary reference to indicate thread is in use
488 * (atomic since @proc->inner_lock cannot
489 * always be acquired)
490 * @is_dead: thread is dead and awaiting free
491 * when outstanding transactions are cleaned up
492 * (protected by @proc->inner_lock)
493 *
494 * Bookkeeping structure for binder threads.
495 */
496struct binder_thread {
497 struct binder_proc *proc;
498 struct rb_node rb_node;
499 struct list_head waiting_thread_node;
500 int pid;
501 int looper; /* only modified by this thread */
502 bool looper_need_return; /* can be written by other thread */
503 struct binder_transaction *transaction_stack;
504 struct list_head todo;
505 bool process_todo;
506 struct binder_error return_error;
507 struct binder_error reply_error;
508 struct binder_extended_error ee;
509 wait_queue_head_t wait;
510 struct binder_stats stats;
511 atomic_t tmp_ref;
512 bool is_dead;
513};
514
515/**
516 * struct binder_txn_fd_fixup - transaction fd fixup list element
517 * @fixup_entry: list entry
518 * @file: struct file to be associated with new fd
519 * @offset: offset in buffer data to this fixup
520 * @target_fd: fd to use by the target to install @file
521 *
522 * List element for fd fixups in a transaction. Since file
523 * descriptors need to be allocated in the context of the
524 * target process, we pass each fd to be processed in this
525 * struct.
526 */
527struct binder_txn_fd_fixup {
528 struct list_head fixup_entry;
529 struct file *file;
530 size_t offset;
531 int target_fd;
532};
533
534struct binder_transaction {
535 int debug_id;
536 struct binder_work work;
537 struct binder_thread *from;
538 pid_t from_pid;
539 pid_t from_tid;
540 struct binder_transaction *from_parent;
541 struct binder_proc *to_proc;
542 struct binder_thread *to_thread;
543 struct binder_transaction *to_parent;
544 unsigned need_reply:1;
545 /* unsigned is_dead:1; */ /* not used at the moment */
546
547 struct binder_buffer *buffer;
548 unsigned int code;
549 unsigned int flags;
550 long priority;
551 long saved_priority;
552 kuid_t sender_euid;
553 ktime_t start_time;
554 struct list_head fd_fixups;
555 binder_uintptr_t security_ctx;
556 /**
557 * @lock: protects @from, @to_proc, and @to_thread
558 *
559 * @from, @to_proc, and @to_thread can be set to NULL
560 * during thread teardown
561 */
562 spinlock_t lock;
563};
564
565/**
566 * struct binder_object - union of flat binder object types
567 * @hdr: generic object header
568 * @fbo: binder object (nodes and refs)
569 * @fdo: file descriptor object
570 * @bbo: binder buffer pointer
571 * @fdao: file descriptor array
572 *
573 * Used for type-independent object copies
574 */
575struct binder_object {
576 union {
577 struct binder_object_header hdr;
578 struct flat_binder_object fbo;
579 struct binder_fd_object fdo;
580 struct binder_buffer_object bbo;
581 struct binder_fd_array_object fdao;
582 };
583};
584
585#endif /* _LINUX_BINDER_INTERNAL_H */