Linux Audio

Check our new training course

Loading...
v4.17
 
   1/* binder_alloc.c
   2 *
   3 * Android IPC Subsystem
   4 *
   5 * Copyright (C) 2007-2017 Google, Inc.
   6 *
   7 * This software is licensed under the terms of the GNU General Public
   8 * License version 2, as published by the Free Software Foundation, and
   9 * may be copied, distributed, and modified under those terms.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <asm/cacheflush.h>
  21#include <linux/list.h>
  22#include <linux/sched/mm.h>
  23#include <linux/module.h>
  24#include <linux/rtmutex.h>
  25#include <linux/rbtree.h>
  26#include <linux/seq_file.h>
  27#include <linux/vmalloc.h>
  28#include <linux/slab.h>
  29#include <linux/sched.h>
  30#include <linux/list_lru.h>
 
 
 
 
 
  31#include "binder_alloc.h"
  32#include "binder_trace.h"
  33
  34struct list_lru binder_alloc_lru;
  35
  36static DEFINE_MUTEX(binder_alloc_mmap_lock);
  37
  38enum {
 
  39	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
  40	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
  41	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
  42};
  43static uint32_t binder_alloc_debug_mask;
  44
  45module_param_named(debug_mask, binder_alloc_debug_mask,
  46		   uint, 0644);
  47
  48#define binder_alloc_debug(mask, x...) \
  49	do { \
  50		if (binder_alloc_debug_mask & mask) \
  51			pr_info(x); \
  52	} while (0)
  53
  54static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  55{
  56	return list_entry(buffer->entry.next, struct binder_buffer, entry);
  57}
  58
  59static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  60{
  61	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  62}
  63
  64static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  65				       struct binder_buffer *buffer)
  66{
  67	if (list_is_last(&buffer->entry, &alloc->buffers))
  68		return (u8 *)alloc->buffer +
  69			alloc->buffer_size - (u8 *)buffer->data;
  70	return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
  71}
  72
  73static void binder_insert_free_buffer(struct binder_alloc *alloc,
  74				      struct binder_buffer *new_buffer)
  75{
  76	struct rb_node **p = &alloc->free_buffers.rb_node;
  77	struct rb_node *parent = NULL;
  78	struct binder_buffer *buffer;
  79	size_t buffer_size;
  80	size_t new_buffer_size;
  81
  82	BUG_ON(!new_buffer->free);
  83
  84	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  85
  86	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  87		     "%d: add free buffer, size %zd, at %pK\n",
  88		      alloc->pid, new_buffer_size, new_buffer);
  89
  90	while (*p) {
  91		parent = *p;
  92		buffer = rb_entry(parent, struct binder_buffer, rb_node);
  93		BUG_ON(!buffer->free);
  94
  95		buffer_size = binder_alloc_buffer_size(alloc, buffer);
  96
  97		if (new_buffer_size < buffer_size)
  98			p = &parent->rb_left;
  99		else
 100			p = &parent->rb_right;
 101	}
 102	rb_link_node(&new_buffer->rb_node, parent, p);
 103	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
 104}
 105
 106static void binder_insert_allocated_buffer_locked(
 107		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
 108{
 109	struct rb_node **p = &alloc->allocated_buffers.rb_node;
 110	struct rb_node *parent = NULL;
 111	struct binder_buffer *buffer;
 112
 113	BUG_ON(new_buffer->free);
 114
 115	while (*p) {
 116		parent = *p;
 117		buffer = rb_entry(parent, struct binder_buffer, rb_node);
 118		BUG_ON(buffer->free);
 119
 120		if (new_buffer->data < buffer->data)
 121			p = &parent->rb_left;
 122		else if (new_buffer->data > buffer->data)
 123			p = &parent->rb_right;
 124		else
 125			BUG();
 126	}
 127	rb_link_node(&new_buffer->rb_node, parent, p);
 128	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
 129}
 130
 131static struct binder_buffer *binder_alloc_prepare_to_free_locked(
 132		struct binder_alloc *alloc,
 133		uintptr_t user_ptr)
 134{
 135	struct rb_node *n = alloc->allocated_buffers.rb_node;
 136	struct binder_buffer *buffer;
 137	void *kern_ptr;
 138
 139	kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
 140
 141	while (n) {
 142		buffer = rb_entry(n, struct binder_buffer, rb_node);
 143		BUG_ON(buffer->free);
 144
 145		if (kern_ptr < buffer->data)
 146			n = n->rb_left;
 147		else if (kern_ptr > buffer->data)
 148			n = n->rb_right;
 149		else {
 150			/*
 151			 * Guard against user threads attempting to
 152			 * free the buffer twice
 
 153			 */
 154			if (buffer->free_in_progress) {
 155				pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
 156				       alloc->pid, current->pid, (u64)user_ptr);
 157				return NULL;
 158			}
 159			buffer->free_in_progress = 1;
 160			return buffer;
 161		}
 162	}
 163	return NULL;
 164}
 165
 166/**
 167 * binder_alloc_buffer_lookup() - get buffer given user ptr
 168 * @alloc:	binder_alloc for this proc
 169 * @user_ptr:	User pointer to buffer data
 170 *
 171 * Validate userspace pointer to buffer data and return buffer corresponding to
 172 * that user pointer. Search the rb tree for buffer that matches user data
 173 * pointer.
 174 *
 175 * Return:	Pointer to buffer or NULL
 176 */
 177struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
 178						   uintptr_t user_ptr)
 179{
 180	struct binder_buffer *buffer;
 181
 182	mutex_lock(&alloc->mutex);
 183	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
 184	mutex_unlock(&alloc->mutex);
 185	return buffer;
 186}
 187
 188static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
 189				    void *start, void *end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190{
 191	void *page_addr;
 192	unsigned long user_page_addr;
 193	struct binder_lru_page *page;
 194	struct vm_area_struct *vma = NULL;
 195	struct mm_struct *mm = NULL;
 196	bool need_mm = false;
 197
 198	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 199		     "%d: %s pages %pK-%pK\n", alloc->pid,
 200		     allocate ? "allocate" : "free", start, end);
 201
 202	if (end <= start)
 203		return 0;
 
 
 
 
 204
 205	trace_binder_update_page_range(alloc, allocate, start, end);
 
 206
 207	if (allocate == 0)
 208		goto free_range;
 209
 210	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
 211		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
 212		if (!page->page_ptr) {
 213			need_mm = true;
 214			break;
 215		}
 216	}
 
 217
 218	if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
 219		mm = alloc->vma_vm_mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220
 221	if (mm) {
 222		down_write(&mm->mmap_sem);
 223		vma = alloc->vma;
 
 
 
 
 224	}
 225
 226	if (!vma && need_mm) {
 227		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
 228			alloc->pid);
 229		goto err_no_vma;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230	}
 231
 
 
 
 
 
 
 
 
 
 
 
 
 232	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
 233		int ret;
 234		bool on_lru;
 235		size_t index;
 236
 237		index = (page_addr - alloc->buffer) / PAGE_SIZE;
 238		page = &alloc->pages[index];
 239
 240		if (page->page_ptr) {
 241			trace_binder_alloc_lru_start(alloc, index);
 242
 243			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
 244			WARN_ON(!on_lru);
 245
 246			trace_binder_alloc_lru_end(alloc, index);
 247			continue;
 248		}
 249
 250		if (WARN_ON(!vma))
 251			goto err_page_ptr_cleared;
 252
 253		trace_binder_alloc_page_start(alloc, index);
 254		page->page_ptr = alloc_page(GFP_KERNEL |
 255					    __GFP_HIGHMEM |
 256					    __GFP_ZERO);
 257		if (!page->page_ptr) {
 258			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
 259				alloc->pid, page_addr);
 260			goto err_alloc_page_failed;
 261		}
 262		page->alloc = alloc;
 263		INIT_LIST_HEAD(&page->lru);
 264
 265		ret = map_kernel_range_noflush((unsigned long)page_addr,
 266					       PAGE_SIZE, PAGE_KERNEL,
 267					       &page->page_ptr);
 268		flush_cache_vmap((unsigned long)page_addr,
 269				(unsigned long)page_addr + PAGE_SIZE);
 270		if (ret != 1) {
 271			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
 272			       alloc->pid, page_addr);
 273			goto err_map_kernel_failed;
 274		}
 275		user_page_addr =
 276			(uintptr_t)page_addr + alloc->user_buffer_offset;
 277		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
 278		if (ret) {
 279			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
 280			       alloc->pid, user_page_addr);
 281			goto err_vm_insert_page_failed;
 282		}
 283
 284		if (index + 1 > alloc->pages_high)
 285			alloc->pages_high = index + 1;
 286
 287		trace_binder_alloc_page_end(alloc, index);
 288		/* vm_insert_page does not seem to increment the refcount */
 289	}
 290	if (mm) {
 291		up_write(&mm->mmap_sem);
 292		mmput(mm);
 293	}
 294	return 0;
 295
 296free_range:
 297	for (page_addr = end - PAGE_SIZE; page_addr >= start;
 298	     page_addr -= PAGE_SIZE) {
 299		bool ret;
 300		size_t index;
 
 301
 302		index = (page_addr - alloc->buffer) / PAGE_SIZE;
 303		page = &alloc->pages[index];
 
 
 
 
 304
 305		trace_binder_free_lru_start(alloc, index);
 
 
 
 
 
 
 
 
 
 
 306
 307		ret = list_lru_add(&binder_alloc_lru, &page->lru);
 308		WARN_ON(!ret);
 
 
 
 
 
 
 309
 310		trace_binder_free_lru_end(alloc, index);
 311		continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312
 313err_vm_insert_page_failed:
 314		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
 315err_map_kernel_failed:
 316		__free_page(page->page_ptr);
 317		page->page_ptr = NULL;
 318err_alloc_page_failed:
 319err_page_ptr_cleared:
 320		;
 321	}
 322err_no_vma:
 323	if (mm) {
 324		up_write(&mm->mmap_sem);
 325		mmput(mm);
 326	}
 327	return vma ? -ENOMEM : -ESRCH;
 328}
 329
 
 330static struct binder_buffer *binder_alloc_new_buf_locked(
 331				struct binder_alloc *alloc,
 332				size_t data_size,
 333				size_t offsets_size,
 334				size_t extra_buffers_size,
 335				int is_async)
 336{
 337	struct rb_node *n = alloc->free_buffers.rb_node;
 
 338	struct binder_buffer *buffer;
 
 
 339	size_t buffer_size;
 340	struct rb_node *best_fit = NULL;
 341	void *has_page_addr;
 342	void *end_page_addr;
 343	size_t size, data_offsets_size;
 344	int ret;
 345
 346	if (alloc->vma == NULL) {
 347		pr_err("%d: binder_alloc_buf, no vma\n",
 348		       alloc->pid);
 349		return ERR_PTR(-ESRCH);
 350	}
 351
 352	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
 353		ALIGN(offsets_size, sizeof(void *));
 354
 355	if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
 356		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 357				"%d: got transaction with invalid size %zd-%zd\n",
 358				alloc->pid, data_size, offsets_size);
 359		return ERR_PTR(-EINVAL);
 360	}
 361	size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
 362	if (size < data_offsets_size || size < extra_buffers_size) {
 363		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 364				"%d: got transaction with invalid extra_buffers_size %zd\n",
 365				alloc->pid, extra_buffers_size);
 366		return ERR_PTR(-EINVAL);
 367	}
 368	if (is_async &&
 369	    alloc->free_async_space < size + sizeof(struct binder_buffer)) {
 370		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 371			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
 372			      alloc->pid, size);
 373		return ERR_PTR(-ENOSPC);
 
 374	}
 375
 376	/* Pad 0-size buffers so they get assigned unique addresses */
 377	size = max(size, sizeof(void *));
 378
 379	while (n) {
 380		buffer = rb_entry(n, struct binder_buffer, rb_node);
 381		BUG_ON(!buffer->free);
 382		buffer_size = binder_alloc_buffer_size(alloc, buffer);
 383
 384		if (size < buffer_size) {
 385			best_fit = n;
 386			n = n->rb_left;
 387		} else if (size > buffer_size)
 388			n = n->rb_right;
 389		else {
 390			best_fit = n;
 391			break;
 392		}
 393	}
 394	if (best_fit == NULL) {
 395		size_t allocated_buffers = 0;
 396		size_t largest_alloc_size = 0;
 397		size_t total_alloc_size = 0;
 398		size_t free_buffers = 0;
 399		size_t largest_free_size = 0;
 400		size_t total_free_size = 0;
 401
 402		for (n = rb_first(&alloc->allocated_buffers); n != NULL;
 403		     n = rb_next(n)) {
 404			buffer = rb_entry(n, struct binder_buffer, rb_node);
 405			buffer_size = binder_alloc_buffer_size(alloc, buffer);
 406			allocated_buffers++;
 407			total_alloc_size += buffer_size;
 408			if (buffer_size > largest_alloc_size)
 409				largest_alloc_size = buffer_size;
 410		}
 411		for (n = rb_first(&alloc->free_buffers); n != NULL;
 412		     n = rb_next(n)) {
 413			buffer = rb_entry(n, struct binder_buffer, rb_node);
 414			buffer_size = binder_alloc_buffer_size(alloc, buffer);
 415			free_buffers++;
 416			total_free_size += buffer_size;
 417			if (buffer_size > largest_free_size)
 418				largest_free_size = buffer_size;
 419		}
 420		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
 421			alloc->pid, size);
 422		pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
 423		       total_alloc_size, allocated_buffers, largest_alloc_size,
 424		       total_free_size, free_buffers, largest_free_size);
 425		return ERR_PTR(-ENOSPC);
 426	}
 427	if (n == NULL) {
 
 
 428		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
 429		buffer_size = binder_alloc_buffer_size(alloc, buffer);
 
 
 
 
 
 
 
 430	}
 431
 432	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 433		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
 434		      alloc->pid, size, buffer, buffer_size);
 435
 436	has_page_addr =
 437		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
 438	WARN_ON(n && buffer_size != size);
 439	end_page_addr =
 440		(void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
 441	if (end_page_addr > has_page_addr)
 442		end_page_addr = has_page_addr;
 443	ret = binder_update_page_range(alloc, 1,
 444	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
 445	if (ret)
 446		return ERR_PTR(ret);
 447
 448	if (buffer_size != size) {
 449		struct binder_buffer *new_buffer;
 450
 451		new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
 452		if (!new_buffer) {
 453			pr_err("%s: %d failed to alloc new buffer struct\n",
 454			       __func__, alloc->pid);
 455			goto err_alloc_buf_struct_failed;
 456		}
 457		new_buffer->data = (u8 *)buffer->data + size;
 458		list_add(&new_buffer->entry, &buffer->entry);
 459		new_buffer->free = 1;
 460		binder_insert_free_buffer(alloc, new_buffer);
 461	}
 462
 463	rb_erase(best_fit, &alloc->free_buffers);
 464	buffer->free = 0;
 465	buffer->free_in_progress = 0;
 466	binder_insert_allocated_buffer_locked(alloc, buffer);
 467	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 468		     "%d: binder_alloc_buf size %zd got %pK\n",
 469		      alloc->pid, size, buffer);
 470	buffer->data_size = data_size;
 471	buffer->offsets_size = offsets_size;
 472	buffer->async_transaction = is_async;
 473	buffer->extra_buffers_size = extra_buffers_size;
 474	if (is_async) {
 475		alloc->free_async_space -= size + sizeof(struct binder_buffer);
 476		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
 477			     "%d: binder_alloc_buf size %zd async free %zd\n",
 478			      alloc->pid, size, alloc->free_async_space);
 
 
 479	}
 
 
 
 
 480	return buffer;
 
 481
 482err_alloc_buf_struct_failed:
 483	binder_update_page_range(alloc, 0,
 484				 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
 485				 end_page_addr);
 486	return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487}
 488
 489/**
 490 * binder_alloc_new_buf() - Allocate a new binder buffer
 491 * @alloc:              binder_alloc for this proc
 492 * @data_size:          size of user data buffer
 493 * @offsets_size:       user specified buffer offset
 494 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
 495 * @is_async:           buffer for async transaction
 496 *
 497 * Allocate a new buffer given the requested sizes. Returns
 498 * the kernel version of the buffer pointer. The size allocated
 499 * is the sum of the three given sizes (each rounded up to
 500 * pointer-sized boundary)
 501 *
 502 * Return:	The allocated buffer or %NULL if error
 503 */
 504struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 505					   size_t data_size,
 506					   size_t offsets_size,
 507					   size_t extra_buffers_size,
 508					   int is_async)
 509{
 510	struct binder_buffer *buffer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511
 512	mutex_lock(&alloc->mutex);
 513	buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
 514					     extra_buffers_size, is_async);
 515	mutex_unlock(&alloc->mutex);
 
 
 516	return buffer;
 517}
 518
 519static void *buffer_start_page(struct binder_buffer *buffer)
 520{
 521	return (void *)((uintptr_t)buffer->data & PAGE_MASK);
 522}
 523
 524static void *prev_buffer_end_page(struct binder_buffer *buffer)
 525{
 526	return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
 527}
 528
 529static void binder_delete_free_buffer(struct binder_alloc *alloc,
 530				      struct binder_buffer *buffer)
 531{
 532	struct binder_buffer *prev, *next = NULL;
 533	bool to_free = true;
 
 
 
 534	BUG_ON(alloc->buffers.next == &buffer->entry);
 535	prev = binder_buffer_prev(buffer);
 536	BUG_ON(!prev->free);
 537	if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
 538		to_free = false;
 539		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 540				   "%d: merge free, buffer %pK share page with %pK\n",
 541				   alloc->pid, buffer->data, prev->data);
 542	}
 543
 544	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
 545		next = binder_buffer_next(buffer);
 546		if (buffer_start_page(next) == buffer_start_page(buffer)) {
 547			to_free = false;
 548			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 549					   "%d: merge free, buffer %pK share page with %pK\n",
 550					   alloc->pid,
 551					   buffer->data,
 552					   next->data);
 553		}
 554	}
 555
 556	if (PAGE_ALIGNED(buffer->data)) {
 557		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 558				   "%d: merge free, buffer start %pK is page aligned\n",
 559				   alloc->pid, buffer->data);
 560		to_free = false;
 561	}
 562
 563	if (to_free) {
 564		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 565				   "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
 566				   alloc->pid, buffer->data,
 567				   prev->data, next ? next->data : NULL);
 568		binder_update_page_range(alloc, 0, buffer_start_page(buffer),
 569					 buffer_start_page(buffer) + PAGE_SIZE);
 570	}
 571	list_del(&buffer->entry);
 572	kfree(buffer);
 573}
 574
 575static void binder_free_buf_locked(struct binder_alloc *alloc,
 576				   struct binder_buffer *buffer)
 577{
 578	size_t size, buffer_size;
 579
 580	buffer_size = binder_alloc_buffer_size(alloc, buffer);
 581
 582	size = ALIGN(buffer->data_size, sizeof(void *)) +
 583		ALIGN(buffer->offsets_size, sizeof(void *)) +
 584		ALIGN(buffer->extra_buffers_size, sizeof(void *));
 585
 586	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 587		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
 588		      alloc->pid, buffer, size, buffer_size);
 589
 590	BUG_ON(buffer->free);
 591	BUG_ON(size > buffer_size);
 592	BUG_ON(buffer->transaction != NULL);
 593	BUG_ON(buffer->data < alloc->buffer);
 594	BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
 595
 596	if (buffer->async_transaction) {
 597		alloc->free_async_space += size + sizeof(struct binder_buffer);
 598
 599		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
 600			     "%d: binder_free_buf size %zd async free %zd\n",
 601			      alloc->pid, size, alloc->free_async_space);
 602	}
 603
 604	binder_update_page_range(alloc, 0,
 605		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
 606		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
 607
 608	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 609	buffer->free = 1;
 610	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
 611		struct binder_buffer *next = binder_buffer_next(buffer);
 612
 613		if (next->free) {
 614			rb_erase(&next->rb_node, &alloc->free_buffers);
 615			binder_delete_free_buffer(alloc, next);
 616		}
 617	}
 618	if (alloc->buffers.next != &buffer->entry) {
 619		struct binder_buffer *prev = binder_buffer_prev(buffer);
 620
 621		if (prev->free) {
 622			binder_delete_free_buffer(alloc, buffer);
 623			rb_erase(&prev->rb_node, &alloc->free_buffers);
 624			buffer = prev;
 625		}
 626	}
 627	binder_insert_free_buffer(alloc, buffer);
 628}
 629
 630/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 631 * binder_alloc_free_buf() - free a binder buffer
 632 * @alloc:	binder_alloc for this proc
 633 * @buffer:	kernel pointer to buffer
 634 *
 635 * Free the buffer allocated via binder_alloc_new_buffer()
 636 */
 637void binder_alloc_free_buf(struct binder_alloc *alloc,
 638			    struct binder_buffer *buffer)
 639{
 640	mutex_lock(&alloc->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 641	binder_free_buf_locked(alloc, buffer);
 642	mutex_unlock(&alloc->mutex);
 643}
 644
 645/**
 646 * binder_alloc_mmap_handler() - map virtual address space for proc
 647 * @alloc:	alloc structure for this proc
 648 * @vma:	vma passed to mmap()
 649 *
 650 * Called by binder_mmap() to initialize the space specified in
 651 * vma for allocating binder buffers
 652 *
 653 * Return:
 654 *      0 = success
 655 *      -EBUSY = address space already mapped
 656 *      -ENOMEM = failed to map memory to given address space
 657 */
 658int binder_alloc_mmap_handler(struct binder_alloc *alloc,
 659			      struct vm_area_struct *vma)
 660{
 661	int ret;
 662	struct vm_struct *area;
 663	const char *failure_string;
 664	struct binder_buffer *buffer;
 
 
 
 
 
 
 
 
 665
 666	mutex_lock(&binder_alloc_mmap_lock);
 667	if (alloc->buffer) {
 668		ret = -EBUSY;
 669		failure_string = "already mapped";
 670		goto err_already_mapped;
 671	}
 672
 673	area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
 674	if (area == NULL) {
 675		ret = -ENOMEM;
 676		failure_string = "get_vm_area";
 677		goto err_get_vm_area_failed;
 678	}
 679	alloc->buffer = area->addr;
 680	alloc->user_buffer_offset =
 681		vma->vm_start - (uintptr_t)alloc->buffer;
 682	mutex_unlock(&binder_alloc_mmap_lock);
 683
 684#ifdef CONFIG_CPU_CACHE_VIPT
 685	if (cache_is_vipt_aliasing()) {
 686		while (CACHE_COLOUR(
 687				(vma->vm_start ^ (uint32_t)alloc->buffer))) {
 688			pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
 689				__func__, alloc->pid, vma->vm_start,
 690				vma->vm_end, alloc->buffer);
 691			vma->vm_start += PAGE_SIZE;
 692		}
 693	}
 694#endif
 695	alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
 696				   ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
 697			       GFP_KERNEL);
 698	if (alloc->pages == NULL) {
 699		ret = -ENOMEM;
 700		failure_string = "alloc page array";
 701		goto err_alloc_pages_failed;
 702	}
 703	alloc->buffer_size = vma->vm_end - vma->vm_start;
 
 
 
 
 704
 705	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
 706	if (!buffer) {
 707		ret = -ENOMEM;
 708		failure_string = "alloc buffer struct";
 709		goto err_alloc_buf_struct_failed;
 710	}
 711
 712	buffer->data = alloc->buffer;
 713	list_add(&buffer->entry, &alloc->buffers);
 714	buffer->free = 1;
 715	binder_insert_free_buffer(alloc, buffer);
 716	alloc->free_async_space = alloc->buffer_size / 2;
 717	barrier();
 718	alloc->vma = vma;
 719	alloc->vma_vm_mm = vma->vm_mm;
 720	mmgrab(alloc->vma_vm_mm);
 721
 722	return 0;
 723
 724err_alloc_buf_struct_failed:
 725	kfree(alloc->pages);
 726	alloc->pages = NULL;
 727err_alloc_pages_failed:
 
 728	mutex_lock(&binder_alloc_mmap_lock);
 729	vfree(alloc->buffer);
 730	alloc->buffer = NULL;
 731err_get_vm_area_failed:
 732err_already_mapped:
 733	mutex_unlock(&binder_alloc_mmap_lock);
 734	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
 735	       alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
 
 
 
 736	return ret;
 737}
 738
 739
 740void binder_alloc_deferred_release(struct binder_alloc *alloc)
 741{
 742	struct rb_node *n;
 743	int buffers, page_count;
 744	struct binder_buffer *buffer;
 745
 
 
 746	BUG_ON(alloc->vma);
 747
 748	buffers = 0;
 749	mutex_lock(&alloc->mutex);
 750	while ((n = rb_first(&alloc->allocated_buffers))) {
 751		buffer = rb_entry(n, struct binder_buffer, rb_node);
 752
 753		/* Transaction should already have been freed */
 754		BUG_ON(buffer->transaction);
 755
 
 
 
 
 756		binder_free_buf_locked(alloc, buffer);
 757		buffers++;
 758	}
 759
 760	while (!list_empty(&alloc->buffers)) {
 761		buffer = list_first_entry(&alloc->buffers,
 762					  struct binder_buffer, entry);
 763		WARN_ON(!buffer->free);
 764
 765		list_del(&buffer->entry);
 766		WARN_ON_ONCE(!list_empty(&alloc->buffers));
 767		kfree(buffer);
 768	}
 769
 770	page_count = 0;
 771	if (alloc->pages) {
 772		int i;
 773
 774		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
 775			void *page_addr;
 776			bool on_lru;
 777
 778			if (!alloc->pages[i].page_ptr)
 779				continue;
 780
 781			on_lru = list_lru_del(&binder_alloc_lru,
 782					      &alloc->pages[i].lru);
 783			page_addr = alloc->buffer + i * PAGE_SIZE;
 784			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 785				     "%s: %d: page %d at %pK %s\n",
 786				     __func__, alloc->pid, i, page_addr,
 787				     on_lru ? "on lru" : "active");
 788			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
 789			__free_page(alloc->pages[i].page_ptr);
 790			page_count++;
 791		}
 792		kfree(alloc->pages);
 793		vfree(alloc->buffer);
 794	}
 795	mutex_unlock(&alloc->mutex);
 796	if (alloc->vma_vm_mm)
 797		mmdrop(alloc->vma_vm_mm);
 
 798
 799	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
 800		     "%s: %d buffers %d, pages %d\n",
 801		     __func__, alloc->pid, buffers, page_count);
 802}
 803
 804static void print_binder_buffer(struct seq_file *m, const char *prefix,
 805				struct binder_buffer *buffer)
 806{
 807	seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
 808		   prefix, buffer->debug_id, buffer->data,
 809		   buffer->data_size, buffer->offsets_size,
 810		   buffer->extra_buffers_size,
 811		   buffer->transaction ? "active" : "delivered");
 812}
 813
 814/**
 815 * binder_alloc_print_allocated() - print buffer info
 816 * @m:     seq_file for output via seq_printf()
 817 * @alloc: binder_alloc for this proc
 818 *
 819 * Prints information about every buffer associated with
 820 * the binder_alloc state to the given seq_file
 821 */
 822void binder_alloc_print_allocated(struct seq_file *m,
 823				  struct binder_alloc *alloc)
 824{
 
 825	struct rb_node *n;
 826
 827	mutex_lock(&alloc->mutex);
 828	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
 829		print_binder_buffer(m, "  buffer",
 830				    rb_entry(n, struct binder_buffer, rb_node));
 831	mutex_unlock(&alloc->mutex);
 
 
 
 
 
 
 832}
 833
 834/**
 835 * binder_alloc_print_pages() - print page usage
 836 * @m:     seq_file for output via seq_printf()
 837 * @alloc: binder_alloc for this proc
 838 */
 839void binder_alloc_print_pages(struct seq_file *m,
 840			      struct binder_alloc *alloc)
 841{
 842	struct binder_lru_page *page;
 843	int i;
 844	int active = 0;
 845	int lru = 0;
 846	int free = 0;
 847
 848	mutex_lock(&alloc->mutex);
 849	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
 850		page = &alloc->pages[i];
 851		if (!page->page_ptr)
 852			free++;
 853		else if (list_empty(&page->lru))
 854			active++;
 855		else
 856			lru++;
 
 
 
 
 
 
 857	}
 858	mutex_unlock(&alloc->mutex);
 859	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
 860	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
 861}
 862
 863/**
 864 * binder_alloc_get_allocated_count() - return count of buffers
 865 * @alloc: binder_alloc for this proc
 866 *
 867 * Return: count of allocated buffers
 868 */
 869int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
 870{
 871	struct rb_node *n;
 872	int count = 0;
 873
 874	mutex_lock(&alloc->mutex);
 875	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
 876		count++;
 877	mutex_unlock(&alloc->mutex);
 878	return count;
 879}
 880
 881
 882/**
 883 * binder_alloc_vma_close() - invalidate address space
 884 * @alloc: binder_alloc for this proc
 885 *
 886 * Called from binder_vma_close() when releasing address space.
 887 * Clears alloc->vma to prevent new incoming transactions from
 888 * allocating more buffers.
 889 */
 890void binder_alloc_vma_close(struct binder_alloc *alloc)
 891{
 892	WRITE_ONCE(alloc->vma, NULL);
 893}
 894
 895/**
 896 * binder_alloc_free_page() - shrinker callback to free pages
 897 * @item:   item to free
 898 * @lock:   lock protecting the item
 899 * @cb_arg: callback argument
 900 *
 901 * Called from list_lru_walk() in binder_shrink_scan() to free
 902 * up pages when the system is under memory pressure.
 903 */
 904enum lru_status binder_alloc_free_page(struct list_head *item,
 905				       struct list_lru_one *lru,
 906				       spinlock_t *lock,
 907				       void *cb_arg)
 
 908{
 909	struct mm_struct *mm = NULL;
 910	struct binder_lru_page *page = container_of(item,
 911						    struct binder_lru_page,
 912						    lru);
 913	struct binder_alloc *alloc;
 914	uintptr_t page_addr;
 915	size_t index;
 916	struct vm_area_struct *vma;
 
 
 
 917
 918	alloc = page->alloc;
 919	if (!mutex_trylock(&alloc->mutex))
 920		goto err_get_alloc_mutex_failed;
 921
 
 
 922	if (!page->page_ptr)
 923		goto err_page_already_freed;
 924
 925	index = page - alloc->pages;
 926	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
 927	vma = alloc->vma;
 928	if (vma) {
 929		if (!mmget_not_zero(alloc->vma_vm_mm))
 930			goto err_mmget;
 931		mm = alloc->vma_vm_mm;
 932		if (!down_write_trylock(&mm->mmap_sem))
 933			goto err_down_write_mmap_sem_failed;
 934	}
 
 
 
 935
 936	list_lru_isolate(lru, item);
 937	spin_unlock(lock);
 
 938
 939	if (vma) {
 940		trace_binder_unmap_user_start(alloc, index);
 941
 942		zap_page_range(vma,
 943			       page_addr + alloc->user_buffer_offset,
 944			       PAGE_SIZE);
 945
 946		trace_binder_unmap_user_end(alloc, index);
 947
 948		up_write(&mm->mmap_sem);
 949		mmput(mm);
 950	}
 951
 952	trace_binder_unmap_kernel_start(alloc, index);
 953
 954	unmap_kernel_range(page_addr, PAGE_SIZE);
 955	__free_page(page->page_ptr);
 956	page->page_ptr = NULL;
 957
 958	trace_binder_unmap_kernel_end(alloc, index);
 959
 960	spin_lock(lock);
 961	mutex_unlock(&alloc->mutex);
 962	return LRU_REMOVED_RETRY;
 963
 964err_down_write_mmap_sem_failed:
 
 
 
 
 
 965	mmput_async(mm);
 966err_mmget:
 967err_page_already_freed:
 968	mutex_unlock(&alloc->mutex);
 969err_get_alloc_mutex_failed:
 970	return LRU_SKIP;
 971}
 972
 973static unsigned long
 974binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 975{
 976	unsigned long ret = list_lru_count(&binder_alloc_lru);
 977	return ret;
 978}
 979
 980static unsigned long
 981binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 982{
 983	unsigned long ret;
 984
 985	ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
 986			    NULL, sc->nr_to_scan);
 987	return ret;
 988}
 989
 990static struct shrinker binder_shrinker = {
 991	.count_objects = binder_shrink_count,
 992	.scan_objects = binder_shrink_scan,
 993	.seeks = DEFAULT_SEEKS,
 994};
 995
 996/**
 997 * binder_alloc_init() - called by binder_open() for per-proc initialization
 998 * @alloc: binder_alloc for this proc
 999 *
1000 * Called from binder_open() to initialize binder_alloc fields for
1001 * new binder proc
1002 */
1003void binder_alloc_init(struct binder_alloc *alloc)
1004{
1005	alloc->pid = current->group_leader->pid;
1006	mutex_init(&alloc->mutex);
 
 
1007	INIT_LIST_HEAD(&alloc->buffers);
1008}
1009
1010int binder_alloc_shrinker_init(void)
1011{
1012	int ret = list_lru_init(&binder_alloc_lru);
1013
1014	if (ret == 0) {
1015		ret = register_shrinker(&binder_shrinker);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016		if (ret)
1017			list_lru_destroy(&binder_alloc_lru);
 
 
 
1018	}
1019	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* binder_alloc.c
   3 *
   4 * Android IPC Subsystem
   5 *
   6 * Copyright (C) 2007-2017 Google, Inc.
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
 
  11#include <linux/list.h>
  12#include <linux/sched/mm.h>
  13#include <linux/module.h>
  14#include <linux/rtmutex.h>
  15#include <linux/rbtree.h>
  16#include <linux/seq_file.h>
  17#include <linux/vmalloc.h>
  18#include <linux/slab.h>
  19#include <linux/sched.h>
  20#include <linux/list_lru.h>
  21#include <linux/ratelimit.h>
  22#include <asm/cacheflush.h>
  23#include <linux/uaccess.h>
  24#include <linux/highmem.h>
  25#include <linux/sizes.h>
  26#include "binder_alloc.h"
  27#include "binder_trace.h"
  28
  29struct list_lru binder_freelist;
  30
  31static DEFINE_MUTEX(binder_alloc_mmap_lock);
  32
  33enum {
  34	BINDER_DEBUG_USER_ERROR             = 1U << 0,
  35	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
  36	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
  37	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
  38};
  39static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
  40
  41module_param_named(debug_mask, binder_alloc_debug_mask,
  42		   uint, 0644);
  43
  44#define binder_alloc_debug(mask, x...) \
  45	do { \
  46		if (binder_alloc_debug_mask & mask) \
  47			pr_info_ratelimited(x); \
  48	} while (0)
  49
  50static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  51{
  52	return list_entry(buffer->entry.next, struct binder_buffer, entry);
  53}
  54
  55static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  56{
  57	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  58}
  59
  60static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  61				       struct binder_buffer *buffer)
  62{
  63	if (list_is_last(&buffer->entry, &alloc->buffers))
  64		return alloc->buffer + alloc->buffer_size - buffer->user_data;
  65	return binder_buffer_next(buffer)->user_data - buffer->user_data;
 
  66}
  67
  68static void binder_insert_free_buffer(struct binder_alloc *alloc,
  69				      struct binder_buffer *new_buffer)
  70{
  71	struct rb_node **p = &alloc->free_buffers.rb_node;
  72	struct rb_node *parent = NULL;
  73	struct binder_buffer *buffer;
  74	size_t buffer_size;
  75	size_t new_buffer_size;
  76
  77	BUG_ON(!new_buffer->free);
  78
  79	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  80
  81	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  82		     "%d: add free buffer, size %zd, at %pK\n",
  83		      alloc->pid, new_buffer_size, new_buffer);
  84
  85	while (*p) {
  86		parent = *p;
  87		buffer = rb_entry(parent, struct binder_buffer, rb_node);
  88		BUG_ON(!buffer->free);
  89
  90		buffer_size = binder_alloc_buffer_size(alloc, buffer);
  91
  92		if (new_buffer_size < buffer_size)
  93			p = &parent->rb_left;
  94		else
  95			p = &parent->rb_right;
  96	}
  97	rb_link_node(&new_buffer->rb_node, parent, p);
  98	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  99}
 100
 101static void binder_insert_allocated_buffer_locked(
 102		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
 103{
 104	struct rb_node **p = &alloc->allocated_buffers.rb_node;
 105	struct rb_node *parent = NULL;
 106	struct binder_buffer *buffer;
 107
 108	BUG_ON(new_buffer->free);
 109
 110	while (*p) {
 111		parent = *p;
 112		buffer = rb_entry(parent, struct binder_buffer, rb_node);
 113		BUG_ON(buffer->free);
 114
 115		if (new_buffer->user_data < buffer->user_data)
 116			p = &parent->rb_left;
 117		else if (new_buffer->user_data > buffer->user_data)
 118			p = &parent->rb_right;
 119		else
 120			BUG();
 121	}
 122	rb_link_node(&new_buffer->rb_node, parent, p);
 123	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
 124}
 125
 126static struct binder_buffer *binder_alloc_prepare_to_free_locked(
 127		struct binder_alloc *alloc,
 128		unsigned long user_ptr)
 129{
 130	struct rb_node *n = alloc->allocated_buffers.rb_node;
 131	struct binder_buffer *buffer;
 
 
 
 132
 133	while (n) {
 134		buffer = rb_entry(n, struct binder_buffer, rb_node);
 135		BUG_ON(buffer->free);
 136
 137		if (user_ptr < buffer->user_data) {
 138			n = n->rb_left;
 139		} else if (user_ptr > buffer->user_data) {
 140			n = n->rb_right;
 141		} else {
 142			/*
 143			 * Guard against user threads attempting to
 144			 * free the buffer when in use by kernel or
 145			 * after it's already been freed.
 146			 */
 147			if (!buffer->allow_user_free)
 148				return ERR_PTR(-EPERM);
 149			buffer->allow_user_free = 0;
 
 
 
 150			return buffer;
 151		}
 152	}
 153	return NULL;
 154}
 155
 156/**
 157 * binder_alloc_prepare_to_free() - get buffer given user ptr
 158 * @alloc:	binder_alloc for this proc
 159 * @user_ptr:	User pointer to buffer data
 160 *
 161 * Validate userspace pointer to buffer data and return buffer corresponding to
 162 * that user pointer. Search the rb tree for buffer that matches user data
 163 * pointer.
 164 *
 165 * Return:	Pointer to buffer or NULL
 166 */
 167struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
 168						   unsigned long user_ptr)
 169{
 170	struct binder_buffer *buffer;
 171
 172	spin_lock(&alloc->lock);
 173	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
 174	spin_unlock(&alloc->lock);
 175	return buffer;
 176}
 177
 178static inline void
 179binder_set_installed_page(struct binder_lru_page *lru_page,
 180			  struct page *page)
 181{
 182	/* Pairs with acquire in binder_get_installed_page() */
 183	smp_store_release(&lru_page->page_ptr, page);
 184}
 185
 186static inline struct page *
 187binder_get_installed_page(struct binder_lru_page *lru_page)
 188{
 189	/* Pairs with release in binder_set_installed_page() */
 190	return smp_load_acquire(&lru_page->page_ptr);
 191}
 192
 193static void binder_lru_freelist_add(struct binder_alloc *alloc,
 194				    unsigned long start, unsigned long end)
 195{
 
 
 196	struct binder_lru_page *page;
 197	unsigned long page_addr;
 
 
 198
 199	trace_binder_update_page_range(alloc, false, start, end);
 
 
 200
 201	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
 202		size_t index;
 203		int ret;
 204
 205		index = (page_addr - alloc->buffer) / PAGE_SIZE;
 206		page = &alloc->pages[index];
 207
 208		if (!binder_get_installed_page(page))
 209			continue;
 210
 211		trace_binder_free_lru_start(alloc, index);
 
 212
 213		ret = list_lru_add_obj(&binder_freelist, &page->lru);
 214		WARN_ON(!ret);
 215
 216		trace_binder_free_lru_end(alloc, index);
 
 
 217	}
 218}
 219
 220static int binder_install_single_page(struct binder_alloc *alloc,
 221				      struct binder_lru_page *lru_page,
 222				      unsigned long addr)
 223{
 224	struct page *page;
 225	int ret = 0;
 226
 227	if (!mmget_not_zero(alloc->mm))
 228		return -ESRCH;
 229
 230	/*
 231	 * Protected with mmap_sem in write mode as multiple tasks
 232	 * might race to install the same page.
 233	 */
 234	mmap_write_lock(alloc->mm);
 235	if (binder_get_installed_page(lru_page))
 236		goto out;
 237
 238	if (!alloc->vma) {
 239		pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
 240		ret = -ESRCH;
 241		goto out;
 242	}
 243
 244	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
 245	if (!page) {
 246		pr_err("%d: failed to allocate page\n", alloc->pid);
 247		ret = -ENOMEM;
 248		goto out;
 249	}
 250
 251	ret = vm_insert_page(alloc->vma, addr, page);
 252	if (ret) {
 253		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
 254		       alloc->pid, __func__, addr - alloc->buffer, ret);
 255		__free_page(page);
 256		ret = -ENOMEM;
 257		goto out;
 258	}
 259
 260	/* Mark page installation complete and safe to use */
 261	binder_set_installed_page(lru_page, page);
 262out:
 263	mmap_write_unlock(alloc->mm);
 264	mmput_async(alloc->mm);
 265	return ret;
 266}
 267
 268static int binder_install_buffer_pages(struct binder_alloc *alloc,
 269				       struct binder_buffer *buffer,
 270				       size_t size)
 271{
 272	struct binder_lru_page *page;
 273	unsigned long start, final;
 274	unsigned long page_addr;
 275
 276	start = buffer->user_data & PAGE_MASK;
 277	final = PAGE_ALIGN(buffer->user_data + size);
 278
 279	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
 280		unsigned long index;
 281		int ret;
 282
 283		index = (page_addr - alloc->buffer) / PAGE_SIZE;
 284		page = &alloc->pages[index];
 285
 286		if (binder_get_installed_page(page))
 287			continue;
 288
 289		trace_binder_alloc_page_start(alloc, index);
 290
 291		ret = binder_install_single_page(alloc, page, page_addr);
 292		if (ret)
 293			return ret;
 294
 295		trace_binder_alloc_page_end(alloc, index);
 296	}
 297
 298	return 0;
 299}
 300
 301/* The range of pages should exclude those shared with other buffers */
 302static void binder_lru_freelist_del(struct binder_alloc *alloc,
 303				    unsigned long start, unsigned long end)
 304{
 305	struct binder_lru_page *page;
 306	unsigned long page_addr;
 307
 308	trace_binder_update_page_range(alloc, true, start, end);
 309
 310	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
 311		unsigned long index;
 312		bool on_lru;
 
 313
 314		index = (page_addr - alloc->buffer) / PAGE_SIZE;
 315		page = &alloc->pages[index];
 316
 317		if (page->page_ptr) {
 318			trace_binder_alloc_lru_start(alloc, index);
 319
 320			on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
 321			WARN_ON(!on_lru);
 322
 323			trace_binder_alloc_lru_end(alloc, index);
 324			continue;
 325		}
 326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327		if (index + 1 > alloc->pages_high)
 328			alloc->pages_high = index + 1;
 
 
 
 
 
 
 
 329	}
 330}
 331
 332static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
 333		struct vm_area_struct *vma)
 334{
 335	/* pairs with smp_load_acquire in binder_alloc_get_vma() */
 336	smp_store_release(&alloc->vma, vma);
 337}
 338
 339static inline struct vm_area_struct *binder_alloc_get_vma(
 340		struct binder_alloc *alloc)
 341{
 342	/* pairs with smp_store_release in binder_alloc_set_vma() */
 343	return smp_load_acquire(&alloc->vma);
 344}
 345
 346static void debug_no_space_locked(struct binder_alloc *alloc)
 347{
 348	size_t largest_alloc_size = 0;
 349	struct binder_buffer *buffer;
 350	size_t allocated_buffers = 0;
 351	size_t largest_free_size = 0;
 352	size_t total_alloc_size = 0;
 353	size_t total_free_size = 0;
 354	size_t free_buffers = 0;
 355	size_t buffer_size;
 356	struct rb_node *n;
 357
 358	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
 359		buffer = rb_entry(n, struct binder_buffer, rb_node);
 360		buffer_size = binder_alloc_buffer_size(alloc, buffer);
 361		allocated_buffers++;
 362		total_alloc_size += buffer_size;
 363		if (buffer_size > largest_alloc_size)
 364			largest_alloc_size = buffer_size;
 365	}
 366
 367	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
 368		buffer = rb_entry(n, struct binder_buffer, rb_node);
 369		buffer_size = binder_alloc_buffer_size(alloc, buffer);
 370		free_buffers++;
 371		total_free_size += buffer_size;
 372		if (buffer_size > largest_free_size)
 373			largest_free_size = buffer_size;
 374	}
 375
 376	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
 377			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
 378			   total_alloc_size, allocated_buffers,
 379			   largest_alloc_size, total_free_size,
 380			   free_buffers, largest_free_size);
 381}
 382
 383static bool debug_low_async_space_locked(struct binder_alloc *alloc)
 384{
 385	/*
 386	 * Find the amount and size of buffers allocated by the current caller;
 387	 * The idea is that once we cross the threshold, whoever is responsible
 388	 * for the low async space is likely to try to send another async txn,
 389	 * and at some point we'll catch them in the act. This is more efficient
 390	 * than keeping a map per pid.
 391	 */
 392	struct binder_buffer *buffer;
 393	size_t total_alloc_size = 0;
 394	int pid = current->tgid;
 395	size_t num_buffers = 0;
 396	struct rb_node *n;
 397
 398	/*
 399	 * Only start detecting spammers once we have less than 20% of async
 400	 * space left (which is less than 10% of total buffer size).
 401	 */
 402	if (alloc->free_async_space >= alloc->buffer_size / 10) {
 403		alloc->oneway_spam_detected = false;
 404		return false;
 405	}
 406
 407	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
 408		 n = rb_next(n)) {
 409		buffer = rb_entry(n, struct binder_buffer, rb_node);
 410		if (buffer->pid != pid)
 411			continue;
 412		if (!buffer->async_transaction)
 413			continue;
 414		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
 415		num_buffers++;
 416	}
 417
 418	/*
 419	 * Warn if this pid has more than 50 transactions, or more than 50% of
 420	 * async space (which is 25% of total buffer size). Oneway spam is only
 421	 * detected when the threshold is exceeded.
 422	 */
 423	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
 424		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
 425			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
 426			      alloc->pid, pid, num_buffers, total_alloc_size);
 427		if (!alloc->oneway_spam_detected) {
 428			alloc->oneway_spam_detected = true;
 429			return true;
 430		}
 431	}
 432	return false;
 433}
 434
 435/* Callers preallocate @new_buffer, it is freed by this function if unused */
 436static struct binder_buffer *binder_alloc_new_buf_locked(
 437				struct binder_alloc *alloc,
 438				struct binder_buffer *new_buffer,
 439				size_t size,
 
 440				int is_async)
 441{
 442	struct rb_node *n = alloc->free_buffers.rb_node;
 443	struct rb_node *best_fit = NULL;
 444	struct binder_buffer *buffer;
 445	unsigned long next_used_page;
 446	unsigned long curr_last_page;
 447	size_t buffer_size;
 
 
 
 
 
 448
 449	if (is_async && alloc->free_async_space < size) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 451			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
 452			      alloc->pid, size);
 453		buffer = ERR_PTR(-ENOSPC);
 454		goto out;
 455	}
 456
 
 
 
 457	while (n) {
 458		buffer = rb_entry(n, struct binder_buffer, rb_node);
 459		BUG_ON(!buffer->free);
 460		buffer_size = binder_alloc_buffer_size(alloc, buffer);
 461
 462		if (size < buffer_size) {
 463			best_fit = n;
 464			n = n->rb_left;
 465		} else if (size > buffer_size) {
 466			n = n->rb_right;
 467		} else {
 468			best_fit = n;
 469			break;
 470		}
 471	}
 472
 473	if (unlikely(!best_fit)) {
 474		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
 475				   "%d: binder_alloc_buf size %zd failed, no address space\n",
 476				   alloc->pid, size);
 477		debug_no_space_locked(alloc);
 478		buffer = ERR_PTR(-ENOSPC);
 479		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480	}
 481
 482	if (buffer_size != size) {
 483		/* Found an oversized buffer and needs to be split */
 484		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
 485		buffer_size = binder_alloc_buffer_size(alloc, buffer);
 486
 487		WARN_ON(n || buffer_size == size);
 488		new_buffer->user_data = buffer->user_data + size;
 489		list_add(&new_buffer->entry, &buffer->entry);
 490		new_buffer->free = 1;
 491		binder_insert_free_buffer(alloc, new_buffer);
 492		new_buffer = NULL;
 493	}
 494
 495	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 496		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
 497		      alloc->pid, size, buffer, buffer_size);
 498
 499	/*
 500	 * Now we remove the pages from the freelist. A clever calculation
 501	 * with buffer_size determines if the last page is shared with an
 502	 * adjacent in-use buffer. In such case, the page has been already
 503	 * removed from the freelist so we trim our range short.
 504	 */
 505	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
 506	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
 507	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
 508				min(next_used_page, curr_last_page));
 
 509
 510	rb_erase(&buffer->rb_node, &alloc->free_buffers);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511	buffer->free = 0;
 512	buffer->allow_user_free = 0;
 513	binder_insert_allocated_buffer_locked(alloc, buffer);
 
 
 
 
 
 514	buffer->async_transaction = is_async;
 515	buffer->oneway_spam_suspect = false;
 516	if (is_async) {
 517		alloc->free_async_space -= size;
 518		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
 519			     "%d: binder_alloc_buf size %zd async free %zd\n",
 520			      alloc->pid, size, alloc->free_async_space);
 521		if (debug_low_async_space_locked(alloc))
 522			buffer->oneway_spam_suspect = true;
 523	}
 524
 525out:
 526	/* Discard possibly unused new_buffer */
 527	kfree(new_buffer);
 528	return buffer;
 529}
 530
 531/* Calculate the sanitized total size, returns 0 for invalid request */
 532static inline size_t sanitized_size(size_t data_size,
 533				    size_t offsets_size,
 534				    size_t extra_buffers_size)
 535{
 536	size_t total, tmp;
 537
 538	/* Align to pointer size and check for overflows */
 539	tmp = ALIGN(data_size, sizeof(void *)) +
 540		ALIGN(offsets_size, sizeof(void *));
 541	if (tmp < data_size || tmp < offsets_size)
 542		return 0;
 543	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
 544	if (total < tmp || total < extra_buffers_size)
 545		return 0;
 546
 547	/* Pad 0-sized buffers so they get a unique address */
 548	total = max(total, sizeof(void *));
 549
 550	return total;
 551}
 552
 553/**
 554 * binder_alloc_new_buf() - Allocate a new binder buffer
 555 * @alloc:              binder_alloc for this proc
 556 * @data_size:          size of user data buffer
 557 * @offsets_size:       user specified buffer offset
 558 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
 559 * @is_async:           buffer for async transaction
 560 *
 561 * Allocate a new buffer given the requested sizes. Returns
 562 * the kernel version of the buffer pointer. The size allocated
 563 * is the sum of the three given sizes (each rounded up to
 564 * pointer-sized boundary)
 565 *
 566 * Return:	The allocated buffer or %ERR_PTR(-errno) if error
 567 */
 568struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 569					   size_t data_size,
 570					   size_t offsets_size,
 571					   size_t extra_buffers_size,
 572					   int is_async)
 573{
 574	struct binder_buffer *buffer, *next;
 575	size_t size;
 576	int ret;
 577
 578	/* Check binder_alloc is fully initialized */
 579	if (!binder_alloc_get_vma(alloc)) {
 580		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
 581				   "%d: binder_alloc_buf, no vma\n",
 582				   alloc->pid);
 583		return ERR_PTR(-ESRCH);
 584	}
 585
 586	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
 587	if (unlikely(!size)) {
 588		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 589				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
 590				   alloc->pid, data_size, offsets_size,
 591				   extra_buffers_size);
 592		return ERR_PTR(-EINVAL);
 593	}
 594
 595	/* Preallocate the next buffer */
 596	next = kzalloc(sizeof(*next), GFP_KERNEL);
 597	if (!next)
 598		return ERR_PTR(-ENOMEM);
 599
 600	spin_lock(&alloc->lock);
 601	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
 602	if (IS_ERR(buffer)) {
 603		spin_unlock(&alloc->lock);
 604		goto out;
 605	}
 606
 607	buffer->data_size = data_size;
 608	buffer->offsets_size = offsets_size;
 609	buffer->extra_buffers_size = extra_buffers_size;
 610	buffer->pid = current->tgid;
 611	spin_unlock(&alloc->lock);
 612
 613	ret = binder_install_buffer_pages(alloc, buffer, size);
 614	if (ret) {
 615		binder_alloc_free_buf(alloc, buffer);
 616		buffer = ERR_PTR(ret);
 617	}
 618out:
 619	return buffer;
 620}
 621
 622static unsigned long buffer_start_page(struct binder_buffer *buffer)
 623{
 624	return buffer->user_data & PAGE_MASK;
 625}
 626
 627static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
 628{
 629	return (buffer->user_data - 1) & PAGE_MASK;
 630}
 631
 632static void binder_delete_free_buffer(struct binder_alloc *alloc,
 633				      struct binder_buffer *buffer)
 634{
 635	struct binder_buffer *prev, *next;
 636
 637	if (PAGE_ALIGNED(buffer->user_data))
 638		goto skip_freelist;
 639
 640	BUG_ON(alloc->buffers.next == &buffer->entry);
 641	prev = binder_buffer_prev(buffer);
 642	BUG_ON(!prev->free);
 643	if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
 644		goto skip_freelist;
 
 
 
 
 645
 646	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
 647		next = binder_buffer_next(buffer);
 648		if (buffer_start_page(next) == buffer_start_page(buffer))
 649			goto skip_freelist;
 
 
 
 
 
 
 650	}
 651
 652	binder_lru_freelist_add(alloc, buffer_start_page(buffer),
 653				buffer_start_page(buffer) + PAGE_SIZE);
 654skip_freelist:
 
 
 
 
 
 
 
 
 
 
 
 
 655	list_del(&buffer->entry);
 656	kfree(buffer);
 657}
 658
 659static void binder_free_buf_locked(struct binder_alloc *alloc,
 660				   struct binder_buffer *buffer)
 661{
 662	size_t size, buffer_size;
 663
 664	buffer_size = binder_alloc_buffer_size(alloc, buffer);
 665
 666	size = ALIGN(buffer->data_size, sizeof(void *)) +
 667		ALIGN(buffer->offsets_size, sizeof(void *)) +
 668		ALIGN(buffer->extra_buffers_size, sizeof(void *));
 669
 670	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 671		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
 672		      alloc->pid, buffer, size, buffer_size);
 673
 674	BUG_ON(buffer->free);
 675	BUG_ON(size > buffer_size);
 676	BUG_ON(buffer->transaction != NULL);
 677	BUG_ON(buffer->user_data < alloc->buffer);
 678	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
 679
 680	if (buffer->async_transaction) {
 681		alloc->free_async_space += buffer_size;
 
 682		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
 683			     "%d: binder_free_buf size %zd async free %zd\n",
 684			      alloc->pid, size, alloc->free_async_space);
 685	}
 686
 687	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
 688				(buffer->user_data + buffer_size) & PAGE_MASK);
 
 689
 690	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 691	buffer->free = 1;
 692	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
 693		struct binder_buffer *next = binder_buffer_next(buffer);
 694
 695		if (next->free) {
 696			rb_erase(&next->rb_node, &alloc->free_buffers);
 697			binder_delete_free_buffer(alloc, next);
 698		}
 699	}
 700	if (alloc->buffers.next != &buffer->entry) {
 701		struct binder_buffer *prev = binder_buffer_prev(buffer);
 702
 703		if (prev->free) {
 704			binder_delete_free_buffer(alloc, buffer);
 705			rb_erase(&prev->rb_node, &alloc->free_buffers);
 706			buffer = prev;
 707		}
 708	}
 709	binder_insert_free_buffer(alloc, buffer);
 710}
 711
 712/**
 713 * binder_alloc_get_page() - get kernel pointer for given buffer offset
 714 * @alloc: binder_alloc for this proc
 715 * @buffer: binder buffer to be accessed
 716 * @buffer_offset: offset into @buffer data
 717 * @pgoffp: address to copy final page offset to
 718 *
 719 * Lookup the struct page corresponding to the address
 720 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
 721 * NULL, the byte-offset into the page is written there.
 722 *
 723 * The caller is responsible to ensure that the offset points
 724 * to a valid address within the @buffer and that @buffer is
 725 * not freeable by the user. Since it can't be freed, we are
 726 * guaranteed that the corresponding elements of @alloc->pages[]
 727 * cannot change.
 728 *
 729 * Return: struct page
 730 */
 731static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
 732					  struct binder_buffer *buffer,
 733					  binder_size_t buffer_offset,
 734					  pgoff_t *pgoffp)
 735{
 736	binder_size_t buffer_space_offset = buffer_offset +
 737		(buffer->user_data - alloc->buffer);
 738	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
 739	size_t index = buffer_space_offset >> PAGE_SHIFT;
 740	struct binder_lru_page *lru_page;
 741
 742	lru_page = &alloc->pages[index];
 743	*pgoffp = pgoff;
 744	return lru_page->page_ptr;
 745}
 746
 747/**
 748 * binder_alloc_clear_buf() - zero out buffer
 749 * @alloc: binder_alloc for this proc
 750 * @buffer: binder buffer to be cleared
 751 *
 752 * memset the given buffer to 0
 753 */
 754static void binder_alloc_clear_buf(struct binder_alloc *alloc,
 755				   struct binder_buffer *buffer)
 756{
 757	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
 758	binder_size_t buffer_offset = 0;
 759
 760	while (bytes) {
 761		unsigned long size;
 762		struct page *page;
 763		pgoff_t pgoff;
 764
 765		page = binder_alloc_get_page(alloc, buffer,
 766					     buffer_offset, &pgoff);
 767		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
 768		memset_page(page, pgoff, 0, size);
 769		bytes -= size;
 770		buffer_offset += size;
 771	}
 772}
 773
 774/**
 775 * binder_alloc_free_buf() - free a binder buffer
 776 * @alloc:	binder_alloc for this proc
 777 * @buffer:	kernel pointer to buffer
 778 *
 779 * Free the buffer allocated via binder_alloc_new_buf()
 780 */
 781void binder_alloc_free_buf(struct binder_alloc *alloc,
 782			    struct binder_buffer *buffer)
 783{
 784	/*
 785	 * We could eliminate the call to binder_alloc_clear_buf()
 786	 * from binder_alloc_deferred_release() by moving this to
 787	 * binder_free_buf_locked(). However, that could
 788	 * increase contention for the alloc->lock if clear_on_free
 789	 * is used frequently for large buffers. This lock is not
 790	 * needed for correctness here.
 791	 */
 792	if (buffer->clear_on_free) {
 793		binder_alloc_clear_buf(alloc, buffer);
 794		buffer->clear_on_free = false;
 795	}
 796	spin_lock(&alloc->lock);
 797	binder_free_buf_locked(alloc, buffer);
 798	spin_unlock(&alloc->lock);
 799}
 800
 801/**
 802 * binder_alloc_mmap_handler() - map virtual address space for proc
 803 * @alloc:	alloc structure for this proc
 804 * @vma:	vma passed to mmap()
 805 *
 806 * Called by binder_mmap() to initialize the space specified in
 807 * vma for allocating binder buffers
 808 *
 809 * Return:
 810 *      0 = success
 811 *      -EBUSY = address space already mapped
 812 *      -ENOMEM = failed to map memory to given address space
 813 */
 814int binder_alloc_mmap_handler(struct binder_alloc *alloc,
 815			      struct vm_area_struct *vma)
 816{
 
 
 
 817	struct binder_buffer *buffer;
 818	const char *failure_string;
 819	int ret, i;
 820
 821	if (unlikely(vma->vm_mm != alloc->mm)) {
 822		ret = -EINVAL;
 823		failure_string = "invalid vma->vm_mm";
 824		goto err_invalid_mm;
 825	}
 826
 827	mutex_lock(&binder_alloc_mmap_lock);
 828	if (alloc->buffer_size) {
 829		ret = -EBUSY;
 830		failure_string = "already mapped";
 831		goto err_already_mapped;
 832	}
 833	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
 834				   SZ_4M);
 
 
 
 
 
 
 
 
 835	mutex_unlock(&binder_alloc_mmap_lock);
 836
 837	alloc->buffer = vma->vm_start;
 838
 839	alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
 840				sizeof(alloc->pages[0]),
 841				GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 842	if (alloc->pages == NULL) {
 843		ret = -ENOMEM;
 844		failure_string = "alloc page array";
 845		goto err_alloc_pages_failed;
 846	}
 847
 848	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
 849		alloc->pages[i].alloc = alloc;
 850		INIT_LIST_HEAD(&alloc->pages[i].lru);
 851	}
 852
 853	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
 854	if (!buffer) {
 855		ret = -ENOMEM;
 856		failure_string = "alloc buffer struct";
 857		goto err_alloc_buf_struct_failed;
 858	}
 859
 860	buffer->user_data = alloc->buffer;
 861	list_add(&buffer->entry, &alloc->buffers);
 862	buffer->free = 1;
 863	binder_insert_free_buffer(alloc, buffer);
 864	alloc->free_async_space = alloc->buffer_size / 2;
 865
 866	/* Signal binder_alloc is fully initialized */
 867	binder_alloc_set_vma(alloc, vma);
 
 868
 869	return 0;
 870
 871err_alloc_buf_struct_failed:
 872	kvfree(alloc->pages);
 873	alloc->pages = NULL;
 874err_alloc_pages_failed:
 875	alloc->buffer = 0;
 876	mutex_lock(&binder_alloc_mmap_lock);
 877	alloc->buffer_size = 0;
 
 
 878err_already_mapped:
 879	mutex_unlock(&binder_alloc_mmap_lock);
 880err_invalid_mm:
 881	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
 882			   "%s: %d %lx-%lx %s failed %d\n", __func__,
 883			   alloc->pid, vma->vm_start, vma->vm_end,
 884			   failure_string, ret);
 885	return ret;
 886}
 887
 888
 889void binder_alloc_deferred_release(struct binder_alloc *alloc)
 890{
 891	struct rb_node *n;
 892	int buffers, page_count;
 893	struct binder_buffer *buffer;
 894
 895	buffers = 0;
 896	spin_lock(&alloc->lock);
 897	BUG_ON(alloc->vma);
 898
 
 
 899	while ((n = rb_first(&alloc->allocated_buffers))) {
 900		buffer = rb_entry(n, struct binder_buffer, rb_node);
 901
 902		/* Transaction should already have been freed */
 903		BUG_ON(buffer->transaction);
 904
 905		if (buffer->clear_on_free) {
 906			binder_alloc_clear_buf(alloc, buffer);
 907			buffer->clear_on_free = false;
 908		}
 909		binder_free_buf_locked(alloc, buffer);
 910		buffers++;
 911	}
 912
 913	while (!list_empty(&alloc->buffers)) {
 914		buffer = list_first_entry(&alloc->buffers,
 915					  struct binder_buffer, entry);
 916		WARN_ON(!buffer->free);
 917
 918		list_del(&buffer->entry);
 919		WARN_ON_ONCE(!list_empty(&alloc->buffers));
 920		kfree(buffer);
 921	}
 922
 923	page_count = 0;
 924	if (alloc->pages) {
 925		int i;
 926
 927		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
 
 928			bool on_lru;
 929
 930			if (!alloc->pages[i].page_ptr)
 931				continue;
 932
 933			on_lru = list_lru_del_obj(&binder_freelist,
 934						  &alloc->pages[i].lru);
 
 935			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 936				     "%s: %d: page %d %s\n",
 937				     __func__, alloc->pid, i,
 938				     on_lru ? "on lru" : "active");
 
 939			__free_page(alloc->pages[i].page_ptr);
 940			page_count++;
 941		}
 
 
 942	}
 943	spin_unlock(&alloc->lock);
 944	kvfree(alloc->pages);
 945	if (alloc->mm)
 946		mmdrop(alloc->mm);
 947
 948	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
 949		     "%s: %d buffers %d, pages %d\n",
 950		     __func__, alloc->pid, buffers, page_count);
 951}
 952
 
 
 
 
 
 
 
 
 
 
 953/**
 954 * binder_alloc_print_allocated() - print buffer info
 955 * @m:     seq_file for output via seq_printf()
 956 * @alloc: binder_alloc for this proc
 957 *
 958 * Prints information about every buffer associated with
 959 * the binder_alloc state to the given seq_file
 960 */
 961void binder_alloc_print_allocated(struct seq_file *m,
 962				  struct binder_alloc *alloc)
 963{
 964	struct binder_buffer *buffer;
 965	struct rb_node *n;
 966
 967	spin_lock(&alloc->lock);
 968	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
 969		buffer = rb_entry(n, struct binder_buffer, rb_node);
 970		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
 971			   buffer->debug_id,
 972			   buffer->user_data - alloc->buffer,
 973			   buffer->data_size, buffer->offsets_size,
 974			   buffer->extra_buffers_size,
 975			   buffer->transaction ? "active" : "delivered");
 976	}
 977	spin_unlock(&alloc->lock);
 978}
 979
 980/**
 981 * binder_alloc_print_pages() - print page usage
 982 * @m:     seq_file for output via seq_printf()
 983 * @alloc: binder_alloc for this proc
 984 */
 985void binder_alloc_print_pages(struct seq_file *m,
 986			      struct binder_alloc *alloc)
 987{
 988	struct binder_lru_page *page;
 989	int i;
 990	int active = 0;
 991	int lru = 0;
 992	int free = 0;
 993
 994	spin_lock(&alloc->lock);
 995	/*
 996	 * Make sure the binder_alloc is fully initialized, otherwise we might
 997	 * read inconsistent state.
 998	 */
 999	if (binder_alloc_get_vma(alloc) != NULL) {
1000		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1001			page = &alloc->pages[i];
1002			if (!page->page_ptr)
1003				free++;
1004			else if (list_empty(&page->lru))
1005				active++;
1006			else
1007				lru++;
1008		}
1009	}
1010	spin_unlock(&alloc->lock);
1011	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1012	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1013}
1014
1015/**
1016 * binder_alloc_get_allocated_count() - return count of buffers
1017 * @alloc: binder_alloc for this proc
1018 *
1019 * Return: count of allocated buffers
1020 */
1021int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1022{
1023	struct rb_node *n;
1024	int count = 0;
1025
1026	spin_lock(&alloc->lock);
1027	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1028		count++;
1029	spin_unlock(&alloc->lock);
1030	return count;
1031}
1032
1033
1034/**
1035 * binder_alloc_vma_close() - invalidate address space
1036 * @alloc: binder_alloc for this proc
1037 *
1038 * Called from binder_vma_close() when releasing address space.
1039 * Clears alloc->vma to prevent new incoming transactions from
1040 * allocating more buffers.
1041 */
1042void binder_alloc_vma_close(struct binder_alloc *alloc)
1043{
1044	binder_alloc_set_vma(alloc, NULL);
1045}
1046
1047/**
1048 * binder_alloc_free_page() - shrinker callback to free pages
1049 * @item:   item to free
1050 * @lru:    list_lru instance of the item
1051 * @cb_arg: callback argument
1052 *
1053 * Called from list_lru_walk() in binder_shrink_scan() to free
1054 * up pages when the system is under memory pressure.
1055 */
1056enum lru_status binder_alloc_free_page(struct list_head *item,
1057				       struct list_lru_one *lru,
 
1058				       void *cb_arg)
1059	__must_hold(&lru->lock)
1060{
1061	struct binder_lru_page *page = container_of(item, typeof(*page), lru);
1062	struct binder_alloc *alloc = page->alloc;
1063	struct mm_struct *mm = alloc->mm;
 
 
 
 
1064	struct vm_area_struct *vma;
1065	struct page *page_to_free;
1066	unsigned long page_addr;
1067	size_t index;
1068
1069	if (!mmget_not_zero(mm))
1070		goto err_mmget;
1071	if (!mmap_read_trylock(mm))
1072		goto err_mmap_read_lock_failed;
1073	if (!spin_trylock(&alloc->lock))
1074		goto err_get_alloc_lock_failed;
1075	if (!page->page_ptr)
1076		goto err_page_already_freed;
1077
1078	index = page - alloc->pages;
1079	page_addr = alloc->buffer + index * PAGE_SIZE;
1080
1081	vma = vma_lookup(mm, page_addr);
1082	if (vma && vma != binder_alloc_get_vma(alloc))
1083		goto err_invalid_vma;
1084
1085	trace_binder_unmap_kernel_start(alloc, index);
1086
1087	page_to_free = page->page_ptr;
1088	page->page_ptr = NULL;
1089
1090	trace_binder_unmap_kernel_end(alloc, index);
1091
1092	list_lru_isolate(lru, item);
1093	spin_unlock(&alloc->lock);
1094	spin_unlock(&lru->lock);
1095
1096	if (vma) {
1097		trace_binder_unmap_user_start(alloc, index);
1098
1099		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
 
 
1100
1101		trace_binder_unmap_user_end(alloc, index);
 
 
 
1102	}
1103
1104	mmap_read_unlock(mm);
1105	mmput_async(mm);
1106	__free_page(page_to_free);
 
 
 
 
1107
 
 
1108	return LRU_REMOVED_RETRY;
1109
1110err_invalid_vma:
1111err_page_already_freed:
1112	spin_unlock(&alloc->lock);
1113err_get_alloc_lock_failed:
1114	mmap_read_unlock(mm);
1115err_mmap_read_lock_failed:
1116	mmput_async(mm);
1117err_mmget:
 
 
 
1118	return LRU_SKIP;
1119}
1120
1121static unsigned long
1122binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1123{
1124	return list_lru_count(&binder_freelist);
 
1125}
1126
1127static unsigned long
1128binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1129{
1130	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
 
 
1131			    NULL, sc->nr_to_scan);
 
1132}
1133
1134static struct shrinker *binder_shrinker;
 
 
 
 
1135
1136/**
1137 * binder_alloc_init() - called by binder_open() for per-proc initialization
1138 * @alloc: binder_alloc for this proc
1139 *
1140 * Called from binder_open() to initialize binder_alloc fields for
1141 * new binder proc
1142 */
1143void binder_alloc_init(struct binder_alloc *alloc)
1144{
1145	alloc->pid = current->group_leader->pid;
1146	alloc->mm = current->mm;
1147	mmgrab(alloc->mm);
1148	spin_lock_init(&alloc->lock);
1149	INIT_LIST_HEAD(&alloc->buffers);
1150}
1151
1152int binder_alloc_shrinker_init(void)
1153{
1154	int ret;
1155
1156	ret = list_lru_init(&binder_freelist);
1157	if (ret)
1158		return ret;
1159
1160	binder_shrinker = shrinker_alloc(0, "android-binder");
1161	if (!binder_shrinker) {
1162		list_lru_destroy(&binder_freelist);
1163		return -ENOMEM;
1164	}
1165
1166	binder_shrinker->count_objects = binder_shrink_count;
1167	binder_shrinker->scan_objects = binder_shrink_scan;
1168
1169	shrinker_register(binder_shrinker);
1170
1171	return 0;
1172}
1173
1174void binder_alloc_shrinker_exit(void)
1175{
1176	shrinker_free(binder_shrinker);
1177	list_lru_destroy(&binder_freelist);
1178}
1179
1180/**
1181 * check_buffer() - verify that buffer/offset is safe to access
1182 * @alloc: binder_alloc for this proc
1183 * @buffer: binder buffer to be accessed
1184 * @offset: offset into @buffer data
1185 * @bytes: bytes to access from offset
1186 *
1187 * Check that the @offset/@bytes are within the size of the given
1188 * @buffer and that the buffer is currently active and not freeable.
1189 * Offsets must also be multiples of sizeof(u32). The kernel is
1190 * allowed to touch the buffer in two cases:
1191 *
1192 * 1) when the buffer is being created:
1193 *     (buffer->free == 0 && buffer->allow_user_free == 0)
1194 * 2) when the buffer is being torn down:
1195 *     (buffer->free == 0 && buffer->transaction == NULL).
1196 *
1197 * Return: true if the buffer is safe to access
1198 */
1199static inline bool check_buffer(struct binder_alloc *alloc,
1200				struct binder_buffer *buffer,
1201				binder_size_t offset, size_t bytes)
1202{
1203	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1204
1205	return buffer_size >= bytes &&
1206		offset <= buffer_size - bytes &&
1207		IS_ALIGNED(offset, sizeof(u32)) &&
1208		!buffer->free &&
1209		(!buffer->allow_user_free || !buffer->transaction);
1210}
1211
1212/**
1213 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1214 * @alloc: binder_alloc for this proc
1215 * @buffer: binder buffer to be accessed
1216 * @buffer_offset: offset into @buffer data
1217 * @from: userspace pointer to source buffer
1218 * @bytes: bytes to copy
1219 *
1220 * Copy bytes from source userspace to target buffer.
1221 *
1222 * Return: bytes remaining to be copied
1223 */
1224unsigned long
1225binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1226				 struct binder_buffer *buffer,
1227				 binder_size_t buffer_offset,
1228				 const void __user *from,
1229				 size_t bytes)
1230{
1231	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1232		return bytes;
1233
1234	while (bytes) {
1235		unsigned long size;
1236		unsigned long ret;
1237		struct page *page;
1238		pgoff_t pgoff;
1239		void *kptr;
1240
1241		page = binder_alloc_get_page(alloc, buffer,
1242					     buffer_offset, &pgoff);
1243		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1244		kptr = kmap_local_page(page) + pgoff;
1245		ret = copy_from_user(kptr, from, size);
1246		kunmap_local(kptr);
1247		if (ret)
1248			return bytes - size + ret;
1249		bytes -= size;
1250		from += size;
1251		buffer_offset += size;
1252	}
1253	return 0;
1254}
1255
1256static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1257				       bool to_buffer,
1258				       struct binder_buffer *buffer,
1259				       binder_size_t buffer_offset,
1260				       void *ptr,
1261				       size_t bytes)
1262{
1263	/* All copies must be 32-bit aligned and 32-bit size */
1264	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1265		return -EINVAL;
1266
1267	while (bytes) {
1268		unsigned long size;
1269		struct page *page;
1270		pgoff_t pgoff;
1271
1272		page = binder_alloc_get_page(alloc, buffer,
1273					     buffer_offset, &pgoff);
1274		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1275		if (to_buffer)
1276			memcpy_to_page(page, pgoff, ptr, size);
1277		else
1278			memcpy_from_page(ptr, page, pgoff, size);
1279		bytes -= size;
1280		pgoff = 0;
1281		ptr = ptr + size;
1282		buffer_offset += size;
1283	}
1284	return 0;
1285}
1286
1287int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1288				struct binder_buffer *buffer,
1289				binder_size_t buffer_offset,
1290				void *src,
1291				size_t bytes)
1292{
1293	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1294					   src, bytes);
1295}
1296
1297int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1298				  void *dest,
1299				  struct binder_buffer *buffer,
1300				  binder_size_t buffer_offset,
1301				  size_t bytes)
1302{
1303	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1304					   dest, bytes);
1305}