Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright © 2015 Broadcom
 
 
 
 
   4 */
   5
   6/**
   7 * DOC: VC4 GEM BO management support
   8 *
   9 * The VC4 GPU architecture (both scanout and rendering) has direct
  10 * access to system memory with no MMU in between.  To support it, we
  11 * use the GEM DMA helper functions to allocate contiguous ranges of
  12 * physical memory for our BOs.
  13 *
  14 * Since the DMA allocator is very slow, we keep a cache of recently
  15 * freed BOs around so that the kernel's allocation of objects for 3D
  16 * rendering can return quickly.
  17 */
  18
  19#include <linux/dma-buf.h>
  20
  21#include <drm/drm_fourcc.h>
  22
  23#include "vc4_drv.h"
  24#include "uapi/drm/vc4_drm.h"
  25
  26static const struct drm_gem_object_funcs vc4_gem_object_funcs;
  27
  28static const char * const bo_type_names[] = {
  29	"kernel",
  30	"V3D",
  31	"V3D shader",
  32	"dumb",
  33	"binner",
  34	"RCL",
  35	"BCL",
  36	"kernel BO cache",
  37};
  38
  39static bool is_user_label(int label)
  40{
  41	return label >= VC4_BO_TYPE_COUNT;
  42}
  43
  44static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45{
 
 
 
  46	int i;
  47
 
  48	for (i = 0; i < vc4->num_labels; i++) {
  49		if (!vc4->bo_labels[i].num_allocated)
  50			continue;
  51
  52		drm_printf(p, "%30s: %6dkb BOs (%d)\n",
  53			   vc4->bo_labels[i].name,
  54			   vc4->bo_labels[i].size_allocated / 1024,
  55			   vc4->bo_labels[i].num_allocated);
  56	}
 
  57
  58	mutex_lock(&vc4->purgeable.lock);
  59	if (vc4->purgeable.num)
  60		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
  61			   vc4->purgeable.size / 1024, vc4->purgeable.num);
  62
  63	if (vc4->purgeable.purged_num)
  64		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
  65			   vc4->purgeable.purged_size / 1024,
  66			   vc4->purgeable.purged_num);
  67	mutex_unlock(&vc4->purgeable.lock);
  68}
  69
  70static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
  71{
  72	struct drm_debugfs_entry *entry = m->private;
  73	struct drm_device *dev = entry->dev;
  74	struct vc4_dev *vc4 = to_vc4_dev(dev);
  75	struct drm_printer p = drm_seq_file_printer(m);
  76
  77	vc4_bo_stats_print(&p, vc4);
  78
  79	return 0;
  80}
 
  81
  82/* Takes ownership of *name and returns the appropriate slot for it in
  83 * the bo_labels[] array, extending it as necessary.
  84 *
  85 * This is inefficient and could use a hash table instead of walking
  86 * an array and strcmp()ing.  However, the assumption is that user
  87 * labeling will be infrequent (scanout buffers and other long-lived
  88 * objects, or debug driver builds), so we can live with it for now.
  89 */
  90static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
  91{
  92	int i;
  93	int free_slot = -1;
  94
  95	for (i = 0; i < vc4->num_labels; i++) {
  96		if (!vc4->bo_labels[i].name) {
  97			free_slot = i;
  98		} else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
  99			kfree(name);
 100			return i;
 101		}
 102	}
 103
 104	if (free_slot != -1) {
 105		WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
 106		vc4->bo_labels[free_slot].name = name;
 107		return free_slot;
 108	} else {
 109		u32 new_label_count = vc4->num_labels + 1;
 110		struct vc4_label *new_labels =
 111			krealloc(vc4->bo_labels,
 112				 new_label_count * sizeof(*new_labels),
 113				 GFP_KERNEL);
 114
 115		if (!new_labels) {
 116			kfree(name);
 117			return -1;
 118		}
 119
 120		free_slot = vc4->num_labels;
 121		vc4->bo_labels = new_labels;
 122		vc4->num_labels = new_label_count;
 123
 124		vc4->bo_labels[free_slot].name = name;
 125		vc4->bo_labels[free_slot].num_allocated = 0;
 126		vc4->bo_labels[free_slot].size_allocated = 0;
 127
 128		return free_slot;
 129	}
 130}
 131
 132static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
 133{
 134	struct vc4_bo *bo = to_vc4_bo(gem_obj);
 135	struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
 136
 137	lockdep_assert_held(&vc4->bo_lock);
 138
 139	if (label != -1) {
 140		vc4->bo_labels[label].num_allocated++;
 141		vc4->bo_labels[label].size_allocated += gem_obj->size;
 142	}
 143
 144	vc4->bo_labels[bo->label].num_allocated--;
 145	vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
 146
 147	if (vc4->bo_labels[bo->label].num_allocated == 0 &&
 148	    is_user_label(bo->label)) {
 149		/* Free user BO label slots on last unreference.
 150		 * Slots are just where we track the stats for a given
 151		 * name, and once a name is unused we can reuse that
 152		 * slot.
 153		 */
 154		kfree(vc4->bo_labels[bo->label].name);
 155		vc4->bo_labels[bo->label].name = NULL;
 156	}
 157
 158	bo->label = label;
 159}
 160
 161static uint32_t bo_page_index(size_t size)
 162{
 163	return (size / PAGE_SIZE) - 1;
 164}
 165
 166static void vc4_bo_destroy(struct vc4_bo *bo)
 167{
 168	struct drm_gem_object *obj = &bo->base.base;
 169	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
 170
 171	lockdep_assert_held(&vc4->bo_lock);
 172
 173	vc4_bo_set_label(obj, -1);
 174
 175	if (bo->validated_shader) {
 176		kfree(bo->validated_shader->uniform_addr_offsets);
 177		kfree(bo->validated_shader->texture_samples);
 178		kfree(bo->validated_shader);
 179		bo->validated_shader = NULL;
 180	}
 181
 182	mutex_destroy(&bo->madv_lock);
 183	drm_gem_dma_free(&bo->base);
 
 184}
 185
 186static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
 187{
 188	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 189
 190	lockdep_assert_held(&vc4->bo_lock);
 191	list_del(&bo->unref_head);
 192	list_del(&bo->size_head);
 193}
 194
 195static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
 196						     size_t size)
 197{
 198	struct vc4_dev *vc4 = to_vc4_dev(dev);
 199	uint32_t page_index = bo_page_index(size);
 200
 201	if (vc4->bo_cache.size_list_size <= page_index) {
 202		uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
 203					page_index + 1);
 204		struct list_head *new_list;
 205		uint32_t i;
 206
 207		new_list = kmalloc_array(new_size, sizeof(struct list_head),
 208					 GFP_KERNEL);
 209		if (!new_list)
 210			return NULL;
 211
 212		/* Rebase the old cached BO lists to their new list
 213		 * head locations.
 214		 */
 215		for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
 216			struct list_head *old_list =
 217				&vc4->bo_cache.size_list[i];
 218
 219			if (list_empty(old_list))
 220				INIT_LIST_HEAD(&new_list[i]);
 221			else
 222				list_replace(old_list, &new_list[i]);
 223		}
 224		/* And initialize the brand new BO list heads. */
 225		for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
 226			INIT_LIST_HEAD(&new_list[i]);
 227
 228		kfree(vc4->bo_cache.size_list);
 229		vc4->bo_cache.size_list = new_list;
 230		vc4->bo_cache.size_list_size = new_size;
 231	}
 232
 233	return &vc4->bo_cache.size_list[page_index];
 234}
 235
 236static void vc4_bo_cache_purge(struct drm_device *dev)
 237{
 238	struct vc4_dev *vc4 = to_vc4_dev(dev);
 239
 240	mutex_lock(&vc4->bo_lock);
 241	while (!list_empty(&vc4->bo_cache.time_list)) {
 242		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
 243						    struct vc4_bo, unref_head);
 244		vc4_bo_remove_from_cache(bo);
 245		vc4_bo_destroy(bo);
 246	}
 247	mutex_unlock(&vc4->bo_lock);
 248}
 249
 250void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
 251{
 252	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 253
 254	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 255		return;
 256
 257	mutex_lock(&vc4->purgeable.lock);
 258	list_add_tail(&bo->size_head, &vc4->purgeable.list);
 259	vc4->purgeable.num++;
 260	vc4->purgeable.size += bo->base.base.size;
 261	mutex_unlock(&vc4->purgeable.lock);
 262}
 263
 264static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
 265{
 266	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 267
 268	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 269		return;
 270
 271	/* list_del_init() is used here because the caller might release
 272	 * the purgeable lock in order to acquire the madv one and update the
 273	 * madv status.
 274	 * During this short period of time a user might decide to mark
 275	 * the BO as unpurgeable, and if bo->madv is set to
 276	 * VC4_MADV_DONTNEED it will try to remove the BO from the
 277	 * purgeable list which will fail if the ->next/prev fields
 278	 * are set to LIST_POISON1/LIST_POISON2 (which is what
 279	 * list_del() does).
 280	 * Re-initializing the list element guarantees that list_del()
 281	 * will work correctly even if it's a NOP.
 282	 */
 283	list_del_init(&bo->size_head);
 284	vc4->purgeable.num--;
 285	vc4->purgeable.size -= bo->base.base.size;
 286}
 287
 288void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
 289{
 290	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 291
 292	mutex_lock(&vc4->purgeable.lock);
 293	vc4_bo_remove_from_purgeable_pool_locked(bo);
 294	mutex_unlock(&vc4->purgeable.lock);
 295}
 296
 297static void vc4_bo_purge(struct drm_gem_object *obj)
 298{
 299	struct vc4_bo *bo = to_vc4_bo(obj);
 300	struct drm_device *dev = obj->dev;
 301
 302	WARN_ON(!mutex_is_locked(&bo->madv_lock));
 303	WARN_ON(bo->madv != VC4_MADV_DONTNEED);
 304
 305	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 306
 307	dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr);
 308	bo->base.vaddr = NULL;
 309	bo->madv = __VC4_MADV_PURGED;
 310}
 311
 312static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
 313{
 314	struct vc4_dev *vc4 = to_vc4_dev(dev);
 315
 316	mutex_lock(&vc4->purgeable.lock);
 317	while (!list_empty(&vc4->purgeable.list)) {
 318		struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
 319						     struct vc4_bo, size_head);
 320		struct drm_gem_object *obj = &bo->base.base;
 321		size_t purged_size = 0;
 322
 323		vc4_bo_remove_from_purgeable_pool_locked(bo);
 324
 325		/* Release the purgeable lock while we're purging the BO so
 326		 * that other people can continue inserting things in the
 327		 * purgeable pool without having to wait for all BOs to be
 328		 * purged.
 329		 */
 330		mutex_unlock(&vc4->purgeable.lock);
 331		mutex_lock(&bo->madv_lock);
 332
 333		/* Since we released the purgeable pool lock before acquiring
 334		 * the BO madv one, the user may have marked the BO as WILLNEED
 335		 * and re-used it in the meantime.
 336		 * Before purging the BO we need to make sure
 337		 * - it is still marked as DONTNEED
 338		 * - it has not been re-inserted in the purgeable list
 339		 * - it is not used by HW blocks
 340		 * If one of these conditions is not met, just skip the entry.
 341		 */
 342		if (bo->madv == VC4_MADV_DONTNEED &&
 343		    list_empty(&bo->size_head) &&
 344		    !refcount_read(&bo->usecnt)) {
 345			purged_size = bo->base.base.size;
 346			vc4_bo_purge(obj);
 347		}
 348		mutex_unlock(&bo->madv_lock);
 349		mutex_lock(&vc4->purgeable.lock);
 350
 351		if (purged_size) {
 352			vc4->purgeable.purged_size += purged_size;
 353			vc4->purgeable.purged_num++;
 354		}
 355	}
 356	mutex_unlock(&vc4->purgeable.lock);
 357}
 358
 359static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
 360					    uint32_t size,
 361					    enum vc4_kernel_bo_type type)
 362{
 363	struct vc4_dev *vc4 = to_vc4_dev(dev);
 364	uint32_t page_index = bo_page_index(size);
 365	struct vc4_bo *bo = NULL;
 366
 
 
 367	mutex_lock(&vc4->bo_lock);
 368	if (page_index >= vc4->bo_cache.size_list_size)
 369		goto out;
 370
 371	if (list_empty(&vc4->bo_cache.size_list[page_index]))
 372		goto out;
 373
 374	bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
 375			      struct vc4_bo, size_head);
 376	vc4_bo_remove_from_cache(bo);
 377	kref_init(&bo->base.base.refcount);
 378
 379out:
 380	if (bo)
 381		vc4_bo_set_label(&bo->base.base, type);
 382	mutex_unlock(&vc4->bo_lock);
 383	return bo;
 384}
 385
 386/**
 387 * vc4_create_object - Implementation of driver->gem_create_object.
 388 * @dev: DRM device
 389 * @size: Size in bytes of the memory the object will reference
 390 *
 391 * This lets the DMA helpers allocate object structs for us, and keep
 392 * our BO stats correct.
 393 */
 394struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
 395{
 396	struct vc4_dev *vc4 = to_vc4_dev(dev);
 397	struct vc4_bo *bo;
 398
 399	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 400		return ERR_PTR(-ENODEV);
 401
 402	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 403	if (!bo)
 404		return ERR_PTR(-ENOMEM);
 405
 406	bo->madv = VC4_MADV_WILLNEED;
 407	refcount_set(&bo->usecnt, 0);
 408
 409	mutex_init(&bo->madv_lock);
 410
 411	mutex_lock(&vc4->bo_lock);
 412	bo->label = VC4_BO_TYPE_KERNEL;
 413	vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
 414	vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
 415	mutex_unlock(&vc4->bo_lock);
 416
 417	bo->base.base.funcs = &vc4_gem_object_funcs;
 418
 419	return &bo->base.base;
 420}
 421
 422struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
 423			     bool allow_unzeroed, enum vc4_kernel_bo_type type)
 424{
 425	size_t size = roundup(unaligned_size, PAGE_SIZE);
 426	struct vc4_dev *vc4 = to_vc4_dev(dev);
 427	struct drm_gem_dma_object *dma_obj;
 428	struct vc4_bo *bo;
 429
 430	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 431		return ERR_PTR(-ENODEV);
 432
 433	if (size == 0)
 434		return ERR_PTR(-EINVAL);
 435
 436	/* First, try to get a vc4_bo from the kernel BO cache. */
 437	bo = vc4_bo_get_from_cache(dev, size, type);
 438	if (bo) {
 439		if (!allow_unzeroed)
 440			memset(bo->base.vaddr, 0, bo->base.base.size);
 441		return bo;
 442	}
 443
 444	dma_obj = drm_gem_dma_create(dev, size);
 445	if (IS_ERR(dma_obj)) {
 446		/*
 447		 * If we've run out of DMA memory, kill the cache of
 448		 * DMA allocations we've got laying around and try again.
 449		 */
 450		vc4_bo_cache_purge(dev);
 451		dma_obj = drm_gem_dma_create(dev, size);
 452	}
 453
 454	if (IS_ERR(dma_obj)) {
 455		/*
 456		 * Still not enough DMA memory, purge the userspace BO
 457		 * cache and retry.
 458		 * This is sub-optimal since we purge the whole userspace
 459		 * BO cache which forces user that want to re-use the BO to
 460		 * restore its initial content.
 461		 * Ideally, we should purge entries one by one and retry
 462		 * after each to see if DMA allocation succeeds. Or even
 463		 * better, try to find an entry with at least the same
 464		 * size.
 465		 */
 466		vc4_bo_userspace_cache_purge(dev);
 467		dma_obj = drm_gem_dma_create(dev, size);
 468	}
 469
 470	if (IS_ERR(dma_obj)) {
 471		struct drm_printer p = drm_info_printer(vc4->base.dev);
 472		drm_err(dev, "Failed to allocate from GEM DMA helper:\n");
 473		vc4_bo_stats_print(&p, vc4);
 474		return ERR_PTR(-ENOMEM);
 475	}
 476	bo = to_vc4_bo(&dma_obj->base);
 477
 478	/* By default, BOs do not support the MADV ioctl. This will be enabled
 479	 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
 480	 * BOs).
 481	 */
 482	bo->madv = __VC4_MADV_NOTSUPP;
 483
 484	mutex_lock(&vc4->bo_lock);
 485	vc4_bo_set_label(&dma_obj->base, type);
 486	mutex_unlock(&vc4->bo_lock);
 487
 488	return bo;
 489}
 490
 491int vc4_bo_dumb_create(struct drm_file *file_priv,
 492		       struct drm_device *dev,
 493		       struct drm_mode_create_dumb *args)
 494{
 495	struct vc4_dev *vc4 = to_vc4_dev(dev);
 496	struct vc4_bo *bo = NULL;
 497	int ret;
 498
 499	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 500		return -ENODEV;
 501
 502	ret = vc4_dumb_fixup_args(args);
 503	if (ret)
 504		return ret;
 505
 506	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
 507	if (IS_ERR(bo))
 508		return PTR_ERR(bo);
 509
 510	bo->madv = VC4_MADV_WILLNEED;
 511
 512	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 513	drm_gem_object_put(&bo->base.base);
 514
 515	return ret;
 516}
 517
 518static void vc4_bo_cache_free_old(struct drm_device *dev)
 519{
 520	struct vc4_dev *vc4 = to_vc4_dev(dev);
 521	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
 522
 523	lockdep_assert_held(&vc4->bo_lock);
 524
 525	while (!list_empty(&vc4->bo_cache.time_list)) {
 526		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
 527						    struct vc4_bo, unref_head);
 528		if (time_before(expire_time, bo->free_time)) {
 529			mod_timer(&vc4->bo_cache.time_timer,
 530				  round_jiffies_up(jiffies +
 531						   msecs_to_jiffies(1000)));
 532			return;
 533		}
 534
 535		vc4_bo_remove_from_cache(bo);
 536		vc4_bo_destroy(bo);
 537	}
 538}
 539
 540/* Called on the last userspace/kernel unreference of the BO.  Returns
 541 * it to the BO cache if possible, otherwise frees it.
 542 */
 543static void vc4_free_object(struct drm_gem_object *gem_bo)
 544{
 545	struct drm_device *dev = gem_bo->dev;
 546	struct vc4_dev *vc4 = to_vc4_dev(dev);
 547	struct vc4_bo *bo = to_vc4_bo(gem_bo);
 548	struct list_head *cache_list;
 549
 550	/* Remove the BO from the purgeable list. */
 551	mutex_lock(&bo->madv_lock);
 552	if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
 553		vc4_bo_remove_from_purgeable_pool(bo);
 554	mutex_unlock(&bo->madv_lock);
 555
 556	mutex_lock(&vc4->bo_lock);
 557	/* If the object references someone else's memory, we can't cache it.
 558	 */
 559	if (gem_bo->import_attach) {
 560		vc4_bo_destroy(bo);
 561		goto out;
 562	}
 563
 564	/* Don't cache if it was publicly named. */
 565	if (gem_bo->name) {
 566		vc4_bo_destroy(bo);
 567		goto out;
 568	}
 569
 570	/* If this object was partially constructed but DMA allocation
 571	 * had failed, just free it. Can also happen when the BO has been
 572	 * purged.
 573	 */
 574	if (!bo->base.vaddr) {
 575		vc4_bo_destroy(bo);
 576		goto out;
 577	}
 578
 579	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
 580	if (!cache_list) {
 581		vc4_bo_destroy(bo);
 582		goto out;
 583	}
 584
 585	if (bo->validated_shader) {
 586		kfree(bo->validated_shader->uniform_addr_offsets);
 587		kfree(bo->validated_shader->texture_samples);
 588		kfree(bo->validated_shader);
 589		bo->validated_shader = NULL;
 590	}
 591
 592	/* Reset madv and usecnt before adding the BO to the cache. */
 593	bo->madv = __VC4_MADV_NOTSUPP;
 594	refcount_set(&bo->usecnt, 0);
 595
 596	bo->t_format = false;
 597	bo->free_time = jiffies;
 598	list_add(&bo->size_head, cache_list);
 599	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
 600
 601	vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
 602
 603	vc4_bo_cache_free_old(dev);
 604
 605out:
 606	mutex_unlock(&vc4->bo_lock);
 607}
 608
 609static void vc4_bo_cache_time_work(struct work_struct *work)
 610{
 611	struct vc4_dev *vc4 =
 612		container_of(work, struct vc4_dev, bo_cache.time_work);
 613	struct drm_device *dev = &vc4->base;
 614
 615	mutex_lock(&vc4->bo_lock);
 616	vc4_bo_cache_free_old(dev);
 617	mutex_unlock(&vc4->bo_lock);
 618}
 619
 620int vc4_bo_inc_usecnt(struct vc4_bo *bo)
 621{
 622	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 623	int ret;
 624
 625	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 626		return -ENODEV;
 627
 628	/* Fast path: if the BO is already retained by someone, no need to
 629	 * check the madv status.
 630	 */
 631	if (refcount_inc_not_zero(&bo->usecnt))
 632		return 0;
 633
 634	mutex_lock(&bo->madv_lock);
 635	switch (bo->madv) {
 636	case VC4_MADV_WILLNEED:
 637		if (!refcount_inc_not_zero(&bo->usecnt))
 638			refcount_set(&bo->usecnt, 1);
 639		ret = 0;
 640		break;
 641	case VC4_MADV_DONTNEED:
 642		/* We shouldn't use a BO marked as purgeable if at least
 643		 * someone else retained its content by incrementing usecnt.
 644		 * Luckily the BO hasn't been purged yet, but something wrong
 645		 * is happening here. Just throw an error instead of
 646		 * authorizing this use case.
 647		 */
 648	case __VC4_MADV_PURGED:
 649		/* We can't use a purged BO. */
 650	default:
 651		/* Invalid madv value. */
 652		ret = -EINVAL;
 653		break;
 654	}
 655	mutex_unlock(&bo->madv_lock);
 656
 657	return ret;
 658}
 659
 660void vc4_bo_dec_usecnt(struct vc4_bo *bo)
 661{
 662	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 663
 664	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 665		return;
 666
 667	/* Fast path: if the BO is still retained by someone, no need to test
 668	 * the madv value.
 669	 */
 670	if (refcount_dec_not_one(&bo->usecnt))
 671		return;
 672
 673	mutex_lock(&bo->madv_lock);
 674	if (refcount_dec_and_test(&bo->usecnt) &&
 675	    bo->madv == VC4_MADV_DONTNEED)
 676		vc4_bo_add_to_purgeable_pool(bo);
 677	mutex_unlock(&bo->madv_lock);
 678}
 679
 680static void vc4_bo_cache_time_timer(struct timer_list *t)
 681{
 682	struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
 683
 684	schedule_work(&vc4->bo_cache.time_work);
 685}
 686
 687static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
 
 
 
 
 
 
 
 
 688{
 689	struct vc4_bo *bo = to_vc4_bo(obj);
 690	struct dma_buf *dmabuf;
 691	int ret;
 692
 693	if (bo->validated_shader) {
 694		DRM_DEBUG("Attempting to export shader BO\n");
 695		return ERR_PTR(-EINVAL);
 696	}
 697
 698	/* Note: as soon as the BO is exported it becomes unpurgeable, because
 699	 * noone ever decrements the usecnt even if the reference held by the
 700	 * exported BO is released. This shouldn't be a problem since we don't
 701	 * expect exported BOs to be marked as purgeable.
 702	 */
 703	ret = vc4_bo_inc_usecnt(bo);
 704	if (ret) {
 705		drm_err(obj->dev, "Failed to increment BO usecnt\n");
 706		return ERR_PTR(ret);
 707	}
 708
 709	dmabuf = drm_gem_prime_export(obj, flags);
 710	if (IS_ERR(dmabuf))
 711		vc4_bo_dec_usecnt(bo);
 712
 713	return dmabuf;
 714}
 715
 716static vm_fault_t vc4_fault(struct vm_fault *vmf)
 717{
 718	struct vm_area_struct *vma = vmf->vma;
 719	struct drm_gem_object *obj = vma->vm_private_data;
 720	struct vc4_bo *bo = to_vc4_bo(obj);
 721
 722	/* The only reason we would end up here is when user-space accesses
 723	 * BO's memory after it's been purged.
 724	 */
 725	mutex_lock(&bo->madv_lock);
 726	WARN_ON(bo->madv != __VC4_MADV_PURGED);
 727	mutex_unlock(&bo->madv_lock);
 728
 729	return VM_FAULT_SIGBUS;
 730}
 731
 732static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 733{
 734	struct vc4_bo *bo = to_vc4_bo(obj);
 
 
 
 
 
 
 
 
 
 
 735
 736	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
 737		DRM_DEBUG("mmapping of shader BOs for writing not allowed.\n");
 738		return -EINVAL;
 739	}
 740
 741	if (bo->madv != VC4_MADV_WILLNEED) {
 742		DRM_DEBUG("mmapping of %s BO not allowed\n",
 743			  bo->madv == VC4_MADV_DONTNEED ?
 744			  "purgeable" : "purged");
 745		return -EINVAL;
 746	}
 747
 748	return drm_gem_dma_mmap(&bo->base, vma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 749}
 750
 751static const struct vm_operations_struct vc4_vm_ops = {
 752	.fault = vc4_fault,
 753	.open = drm_gem_vm_open,
 754	.close = drm_gem_vm_close,
 755};
 756
 757static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
 758	.free = vc4_free_object,
 759	.export = vc4_prime_export,
 760	.get_sg_table = drm_gem_dma_object_get_sg_table,
 761	.vmap = drm_gem_dma_object_vmap,
 762	.mmap = vc4_gem_object_mmap,
 763	.vm_ops = &vc4_vm_ops,
 764};
 765
 766static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
 767{
 768	if (!vc4->v3d)
 769		return -ENODEV;
 770
 771	if (vc4file->bin_bo_used)
 772		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 773
 774	return vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
 775}
 776
 777int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
 778			struct drm_file *file_priv)
 779{
 780	struct drm_vc4_create_bo *args = data;
 781	struct vc4_file *vc4file = file_priv->driver_priv;
 782	struct vc4_dev *vc4 = to_vc4_dev(dev);
 783	struct vc4_bo *bo = NULL;
 784	int ret;
 785
 786	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 787		return -ENODEV;
 788
 789	ret = vc4_grab_bin_bo(vc4, vc4file);
 790	if (ret)
 791		return ret;
 792
 793	/*
 794	 * We can't allocate from the BO cache, because the BOs don't
 795	 * get zeroed, and that might leak data between users.
 796	 */
 797	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
 798	if (IS_ERR(bo))
 799		return PTR_ERR(bo);
 800
 801	bo->madv = VC4_MADV_WILLNEED;
 802
 803	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 804	drm_gem_object_put(&bo->base.base);
 805
 806	return ret;
 807}
 808
 809int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
 810		      struct drm_file *file_priv)
 811{
 812	struct vc4_dev *vc4 = to_vc4_dev(dev);
 813	struct drm_vc4_mmap_bo *args = data;
 814	struct drm_gem_object *gem_obj;
 815
 816	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 817		return -ENODEV;
 818
 819	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 820	if (!gem_obj) {
 821		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 822		return -EINVAL;
 823	}
 824
 825	/* The mmap offset was set up at BO allocation time. */
 826	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 827
 828	drm_gem_object_put(gem_obj);
 829	return 0;
 830}
 831
 832int
 833vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
 834			   struct drm_file *file_priv)
 835{
 836	struct drm_vc4_create_shader_bo *args = data;
 837	struct vc4_file *vc4file = file_priv->driver_priv;
 838	struct vc4_dev *vc4 = to_vc4_dev(dev);
 839	struct vc4_bo *bo = NULL;
 840	int ret;
 841
 842	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 843		return -ENODEV;
 844
 845	if (args->size == 0)
 846		return -EINVAL;
 847
 848	if (args->size % sizeof(u64) != 0)
 849		return -EINVAL;
 850
 851	if (args->flags != 0) {
 852		DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
 853		return -EINVAL;
 854	}
 855
 856	if (args->pad != 0) {
 857		DRM_INFO("Pad set: 0x%08x\n", args->pad);
 858		return -EINVAL;
 859	}
 860
 861	ret = vc4_grab_bin_bo(vc4, vc4file);
 862	if (ret)
 863		return ret;
 864
 865	bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
 866	if (IS_ERR(bo))
 867		return PTR_ERR(bo);
 868
 869	bo->madv = VC4_MADV_WILLNEED;
 870
 871	if (copy_from_user(bo->base.vaddr,
 872			     (void __user *)(uintptr_t)args->data,
 873			     args->size)) {
 874		ret = -EFAULT;
 875		goto fail;
 876	}
 877	/* Clear the rest of the memory from allocating from the BO
 878	 * cache.
 879	 */
 880	memset(bo->base.vaddr + args->size, 0,
 881	       bo->base.base.size - args->size);
 882
 883	bo->validated_shader = vc4_validate_shader(&bo->base);
 884	if (!bo->validated_shader) {
 885		ret = -EINVAL;
 886		goto fail;
 887	}
 888
 889	/* We have to create the handle after validation, to avoid
 890	 * races for users to do doing things like mmap the shader BO.
 891	 */
 892	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 893
 894fail:
 895	drm_gem_object_put(&bo->base.base);
 896
 897	return ret;
 898}
 899
 900/**
 901 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
 902 * @dev: DRM device
 903 * @data: ioctl argument
 904 * @file_priv: DRM file for this fd
 905 *
 906 * The tiling state of the BO decides the default modifier of an fb if
 907 * no specific modifier was set by userspace, and the return value of
 908 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
 909 * received from dmabuf as the same tiling format as the producer
 910 * used).
 911 */
 912int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
 913			 struct drm_file *file_priv)
 914{
 915	struct vc4_dev *vc4 = to_vc4_dev(dev);
 916	struct drm_vc4_set_tiling *args = data;
 917	struct drm_gem_object *gem_obj;
 918	struct vc4_bo *bo;
 919	bool t_format;
 920
 921	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 922		return -ENODEV;
 923
 924	if (args->flags != 0)
 925		return -EINVAL;
 926
 927	switch (args->modifier) {
 928	case DRM_FORMAT_MOD_NONE:
 929		t_format = false;
 930		break;
 931	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
 932		t_format = true;
 933		break;
 934	default:
 935		return -EINVAL;
 936	}
 937
 938	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 939	if (!gem_obj) {
 940		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 941		return -ENOENT;
 942	}
 943	bo = to_vc4_bo(gem_obj);
 944	bo->t_format = t_format;
 945
 946	drm_gem_object_put(gem_obj);
 947
 948	return 0;
 949}
 950
 951/**
 952 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
 953 * @dev: DRM device
 954 * @data: ioctl argument
 955 * @file_priv: DRM file for this fd
 956 *
 957 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
 958 */
 959int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
 960			 struct drm_file *file_priv)
 961{
 962	struct vc4_dev *vc4 = to_vc4_dev(dev);
 963	struct drm_vc4_get_tiling *args = data;
 964	struct drm_gem_object *gem_obj;
 965	struct vc4_bo *bo;
 966
 967	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
 968		return -ENODEV;
 969
 970	if (args->flags != 0 || args->modifier != 0)
 971		return -EINVAL;
 972
 973	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 974	if (!gem_obj) {
 975		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 976		return -ENOENT;
 977	}
 978	bo = to_vc4_bo(gem_obj);
 979
 980	if (bo->t_format)
 981		args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
 982	else
 983		args->modifier = DRM_FORMAT_MOD_NONE;
 984
 985	drm_gem_object_put(gem_obj);
 986
 987	return 0;
 988}
 989
 990int vc4_bo_debugfs_init(struct drm_minor *minor)
 991{
 992	struct drm_device *drm = minor->dev;
 993	struct vc4_dev *vc4 = to_vc4_dev(drm);
 994
 995	if (!vc4->v3d)
 996		return -ENODEV;
 997
 998	drm_debugfs_add_file(drm, "bo_stats", vc4_bo_stats_debugfs, NULL);
 999
1000	return 0;
1001}
1002
1003static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
1004int vc4_bo_cache_init(struct drm_device *dev)
1005{
1006	struct vc4_dev *vc4 = to_vc4_dev(dev);
1007	int ret;
1008	int i;
1009
1010	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
1011		return -ENODEV;
1012
1013	/* Create the initial set of BO labels that the kernel will
1014	 * use.  This lets us avoid a bunch of string reallocation in
1015	 * the kernel's draw and BO allocation paths.
1016	 */
1017	vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1018				 GFP_KERNEL);
1019	if (!vc4->bo_labels)
1020		return -ENOMEM;
1021	vc4->num_labels = VC4_BO_TYPE_COUNT;
1022
1023	BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1024	for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1025		vc4->bo_labels[i].name = bo_type_names[i];
1026
1027	ret = drmm_mutex_init(dev, &vc4->bo_lock);
1028	if (ret) {
1029		kfree(vc4->bo_labels);
1030		return ret;
1031	}
1032
1033	INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1034
1035	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
1036	timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
1037
1038	return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
1039}
1040
1041static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
1042{
1043	struct vc4_dev *vc4 = to_vc4_dev(dev);
1044	int i;
1045
1046	del_timer(&vc4->bo_cache.time_timer);
1047	cancel_work_sync(&vc4->bo_cache.time_work);
1048
1049	vc4_bo_cache_purge(dev);
1050
1051	for (i = 0; i < vc4->num_labels; i++) {
1052		if (vc4->bo_labels[i].num_allocated) {
1053			drm_err(dev, "Destroying BO cache with %d %s "
1054				"BOs still allocated\n",
1055				vc4->bo_labels[i].num_allocated,
1056				vc4->bo_labels[i].name);
1057		}
1058
1059		if (is_user_label(i))
1060			kfree(vc4->bo_labels[i].name);
1061	}
1062	kfree(vc4->bo_labels);
1063}
1064
1065int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1066		       struct drm_file *file_priv)
1067{
1068	struct vc4_dev *vc4 = to_vc4_dev(dev);
1069	struct drm_vc4_label_bo *args = data;
1070	char *name;
1071	struct drm_gem_object *gem_obj;
1072	int ret = 0, label;
1073
1074	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
1075		return -ENODEV;
1076
1077	if (!args->len)
1078		return -EINVAL;
1079
1080	name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1081	if (IS_ERR(name))
1082		return PTR_ERR(name);
1083
1084	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1085	if (!gem_obj) {
1086		drm_err(dev, "Failed to look up GEM BO %d\n", args->handle);
1087		kfree(name);
1088		return -ENOENT;
1089	}
1090
1091	mutex_lock(&vc4->bo_lock);
1092	label = vc4_get_user_label(vc4, name);
1093	if (label != -1)
1094		vc4_bo_set_label(gem_obj, label);
1095	else
1096		ret = -ENOMEM;
1097	mutex_unlock(&vc4->bo_lock);
1098
1099	drm_gem_object_put(gem_obj);
1100
1101	return ret;
1102}
v4.17
 
   1/*
   2 *  Copyright © 2015 Broadcom
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9/**
  10 * DOC: VC4 GEM BO management support
  11 *
  12 * The VC4 GPU architecture (both scanout and rendering) has direct
  13 * access to system memory with no MMU in between.  To support it, we
  14 * use the GEM CMA helper functions to allocate contiguous ranges of
  15 * physical memory for our BOs.
  16 *
  17 * Since the CMA allocator is very slow, we keep a cache of recently
  18 * freed BOs around so that the kernel's allocation of objects for 3D
  19 * rendering can return quickly.
  20 */
  21
  22#include <linux/dma-buf.h>
  23
 
 
  24#include "vc4_drv.h"
  25#include "uapi/drm/vc4_drm.h"
  26
 
 
  27static const char * const bo_type_names[] = {
  28	"kernel",
  29	"V3D",
  30	"V3D shader",
  31	"dumb",
  32	"binner",
  33	"RCL",
  34	"BCL",
  35	"kernel BO cache",
  36};
  37
  38static bool is_user_label(int label)
  39{
  40	return label >= VC4_BO_TYPE_COUNT;
  41}
  42
  43static void vc4_bo_stats_dump(struct vc4_dev *vc4)
  44{
  45	int i;
  46
  47	for (i = 0; i < vc4->num_labels; i++) {
  48		if (!vc4->bo_labels[i].num_allocated)
  49			continue;
  50
  51		DRM_INFO("%30s: %6dkb BOs (%d)\n",
  52			 vc4->bo_labels[i].name,
  53			 vc4->bo_labels[i].size_allocated / 1024,
  54			 vc4->bo_labels[i].num_allocated);
  55	}
  56
  57	mutex_lock(&vc4->purgeable.lock);
  58	if (vc4->purgeable.num)
  59		DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
  60			 vc4->purgeable.size / 1024, vc4->purgeable.num);
  61
  62	if (vc4->purgeable.purged_num)
  63		DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
  64			 vc4->purgeable.purged_size / 1024,
  65			 vc4->purgeable.purged_num);
  66	mutex_unlock(&vc4->purgeable.lock);
  67}
  68
  69#ifdef CONFIG_DEBUG_FS
  70int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
  71{
  72	struct drm_info_node *node = (struct drm_info_node *)m->private;
  73	struct drm_device *dev = node->minor->dev;
  74	struct vc4_dev *vc4 = to_vc4_dev(dev);
  75	int i;
  76
  77	mutex_lock(&vc4->bo_lock);
  78	for (i = 0; i < vc4->num_labels; i++) {
  79		if (!vc4->bo_labels[i].num_allocated)
  80			continue;
  81
  82		seq_printf(m, "%30s: %6dkb BOs (%d)\n",
  83			   vc4->bo_labels[i].name,
  84			   vc4->bo_labels[i].size_allocated / 1024,
  85			   vc4->bo_labels[i].num_allocated);
  86	}
  87	mutex_unlock(&vc4->bo_lock);
  88
  89	mutex_lock(&vc4->purgeable.lock);
  90	if (vc4->purgeable.num)
  91		seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
  92			   vc4->purgeable.size / 1024, vc4->purgeable.num);
  93
  94	if (vc4->purgeable.purged_num)
  95		seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
  96			   vc4->purgeable.purged_size / 1024,
  97			   vc4->purgeable.purged_num);
  98	mutex_unlock(&vc4->purgeable.lock);
 
 
 
 
 
 
 
 
 
 
  99
 100	return 0;
 101}
 102#endif
 103
 104/* Takes ownership of *name and returns the appropriate slot for it in
 105 * the bo_labels[] array, extending it as necessary.
 106 *
 107 * This is inefficient and could use a hash table instead of walking
 108 * an array and strcmp()ing.  However, the assumption is that user
 109 * labeling will be infrequent (scanout buffers and other long-lived
 110 * objects, or debug driver builds), so we can live with it for now.
 111 */
 112static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
 113{
 114	int i;
 115	int free_slot = -1;
 116
 117	for (i = 0; i < vc4->num_labels; i++) {
 118		if (!vc4->bo_labels[i].name) {
 119			free_slot = i;
 120		} else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
 121			kfree(name);
 122			return i;
 123		}
 124	}
 125
 126	if (free_slot != -1) {
 127		WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
 128		vc4->bo_labels[free_slot].name = name;
 129		return free_slot;
 130	} else {
 131		u32 new_label_count = vc4->num_labels + 1;
 132		struct vc4_label *new_labels =
 133			krealloc(vc4->bo_labels,
 134				 new_label_count * sizeof(*new_labels),
 135				 GFP_KERNEL);
 136
 137		if (!new_labels) {
 138			kfree(name);
 139			return -1;
 140		}
 141
 142		free_slot = vc4->num_labels;
 143		vc4->bo_labels = new_labels;
 144		vc4->num_labels = new_label_count;
 145
 146		vc4->bo_labels[free_slot].name = name;
 147		vc4->bo_labels[free_slot].num_allocated = 0;
 148		vc4->bo_labels[free_slot].size_allocated = 0;
 149
 150		return free_slot;
 151	}
 152}
 153
 154static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
 155{
 156	struct vc4_bo *bo = to_vc4_bo(gem_obj);
 157	struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
 158
 159	lockdep_assert_held(&vc4->bo_lock);
 160
 161	if (label != -1) {
 162		vc4->bo_labels[label].num_allocated++;
 163		vc4->bo_labels[label].size_allocated += gem_obj->size;
 164	}
 165
 166	vc4->bo_labels[bo->label].num_allocated--;
 167	vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
 168
 169	if (vc4->bo_labels[bo->label].num_allocated == 0 &&
 170	    is_user_label(bo->label)) {
 171		/* Free user BO label slots on last unreference.
 172		 * Slots are just where we track the stats for a given
 173		 * name, and once a name is unused we can reuse that
 174		 * slot.
 175		 */
 176		kfree(vc4->bo_labels[bo->label].name);
 177		vc4->bo_labels[bo->label].name = NULL;
 178	}
 179
 180	bo->label = label;
 181}
 182
 183static uint32_t bo_page_index(size_t size)
 184{
 185	return (size / PAGE_SIZE) - 1;
 186}
 187
 188static void vc4_bo_destroy(struct vc4_bo *bo)
 189{
 190	struct drm_gem_object *obj = &bo->base.base;
 191	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
 192
 193	lockdep_assert_held(&vc4->bo_lock);
 194
 195	vc4_bo_set_label(obj, -1);
 196
 197	if (bo->validated_shader) {
 198		kfree(bo->validated_shader->uniform_addr_offsets);
 199		kfree(bo->validated_shader->texture_samples);
 200		kfree(bo->validated_shader);
 201		bo->validated_shader = NULL;
 202	}
 203
 204	reservation_object_fini(&bo->_resv);
 205
 206	drm_gem_cma_free_object(obj);
 207}
 208
 209static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
 210{
 211	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 212
 213	lockdep_assert_held(&vc4->bo_lock);
 214	list_del(&bo->unref_head);
 215	list_del(&bo->size_head);
 216}
 217
 218static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
 219						     size_t size)
 220{
 221	struct vc4_dev *vc4 = to_vc4_dev(dev);
 222	uint32_t page_index = bo_page_index(size);
 223
 224	if (vc4->bo_cache.size_list_size <= page_index) {
 225		uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
 226					page_index + 1);
 227		struct list_head *new_list;
 228		uint32_t i;
 229
 230		new_list = kmalloc_array(new_size, sizeof(struct list_head),
 231					 GFP_KERNEL);
 232		if (!new_list)
 233			return NULL;
 234
 235		/* Rebase the old cached BO lists to their new list
 236		 * head locations.
 237		 */
 238		for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
 239			struct list_head *old_list =
 240				&vc4->bo_cache.size_list[i];
 241
 242			if (list_empty(old_list))
 243				INIT_LIST_HEAD(&new_list[i]);
 244			else
 245				list_replace(old_list, &new_list[i]);
 246		}
 247		/* And initialize the brand new BO list heads. */
 248		for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
 249			INIT_LIST_HEAD(&new_list[i]);
 250
 251		kfree(vc4->bo_cache.size_list);
 252		vc4->bo_cache.size_list = new_list;
 253		vc4->bo_cache.size_list_size = new_size;
 254	}
 255
 256	return &vc4->bo_cache.size_list[page_index];
 257}
 258
 259static void vc4_bo_cache_purge(struct drm_device *dev)
 260{
 261	struct vc4_dev *vc4 = to_vc4_dev(dev);
 262
 263	mutex_lock(&vc4->bo_lock);
 264	while (!list_empty(&vc4->bo_cache.time_list)) {
 265		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
 266						    struct vc4_bo, unref_head);
 267		vc4_bo_remove_from_cache(bo);
 268		vc4_bo_destroy(bo);
 269	}
 270	mutex_unlock(&vc4->bo_lock);
 271}
 272
 273void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
 274{
 275	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 276
 
 
 
 277	mutex_lock(&vc4->purgeable.lock);
 278	list_add_tail(&bo->size_head, &vc4->purgeable.list);
 279	vc4->purgeable.num++;
 280	vc4->purgeable.size += bo->base.base.size;
 281	mutex_unlock(&vc4->purgeable.lock);
 282}
 283
 284static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
 285{
 286	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 287
 
 
 
 288	/* list_del_init() is used here because the caller might release
 289	 * the purgeable lock in order to acquire the madv one and update the
 290	 * madv status.
 291	 * During this short period of time a user might decide to mark
 292	 * the BO as unpurgeable, and if bo->madv is set to
 293	 * VC4_MADV_DONTNEED it will try to remove the BO from the
 294	 * purgeable list which will fail if the ->next/prev fields
 295	 * are set to LIST_POISON1/LIST_POISON2 (which is what
 296	 * list_del() does).
 297	 * Re-initializing the list element guarantees that list_del()
 298	 * will work correctly even if it's a NOP.
 299	 */
 300	list_del_init(&bo->size_head);
 301	vc4->purgeable.num--;
 302	vc4->purgeable.size -= bo->base.base.size;
 303}
 304
 305void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
 306{
 307	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
 308
 309	mutex_lock(&vc4->purgeable.lock);
 310	vc4_bo_remove_from_purgeable_pool_locked(bo);
 311	mutex_unlock(&vc4->purgeable.lock);
 312}
 313
 314static void vc4_bo_purge(struct drm_gem_object *obj)
 315{
 316	struct vc4_bo *bo = to_vc4_bo(obj);
 317	struct drm_device *dev = obj->dev;
 318
 319	WARN_ON(!mutex_is_locked(&bo->madv_lock));
 320	WARN_ON(bo->madv != VC4_MADV_DONTNEED);
 321
 322	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 323
 324	dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
 325	bo->base.vaddr = NULL;
 326	bo->madv = __VC4_MADV_PURGED;
 327}
 328
 329static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
 330{
 331	struct vc4_dev *vc4 = to_vc4_dev(dev);
 332
 333	mutex_lock(&vc4->purgeable.lock);
 334	while (!list_empty(&vc4->purgeable.list)) {
 335		struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
 336						     struct vc4_bo, size_head);
 337		struct drm_gem_object *obj = &bo->base.base;
 338		size_t purged_size = 0;
 339
 340		vc4_bo_remove_from_purgeable_pool_locked(bo);
 341
 342		/* Release the purgeable lock while we're purging the BO so
 343		 * that other people can continue inserting things in the
 344		 * purgeable pool without having to wait for all BOs to be
 345		 * purged.
 346		 */
 347		mutex_unlock(&vc4->purgeable.lock);
 348		mutex_lock(&bo->madv_lock);
 349
 350		/* Since we released the purgeable pool lock before acquiring
 351		 * the BO madv one, the user may have marked the BO as WILLNEED
 352		 * and re-used it in the meantime.
 353		 * Before purging the BO we need to make sure
 354		 * - it is still marked as DONTNEED
 355		 * - it has not been re-inserted in the purgeable list
 356		 * - it is not used by HW blocks
 357		 * If one of these conditions is not met, just skip the entry.
 358		 */
 359		if (bo->madv == VC4_MADV_DONTNEED &&
 360		    list_empty(&bo->size_head) &&
 361		    !refcount_read(&bo->usecnt)) {
 362			purged_size = bo->base.base.size;
 363			vc4_bo_purge(obj);
 364		}
 365		mutex_unlock(&bo->madv_lock);
 366		mutex_lock(&vc4->purgeable.lock);
 367
 368		if (purged_size) {
 369			vc4->purgeable.purged_size += purged_size;
 370			vc4->purgeable.purged_num++;
 371		}
 372	}
 373	mutex_unlock(&vc4->purgeable.lock);
 374}
 375
 376static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
 377					    uint32_t size,
 378					    enum vc4_kernel_bo_type type)
 379{
 380	struct vc4_dev *vc4 = to_vc4_dev(dev);
 381	uint32_t page_index = bo_page_index(size);
 382	struct vc4_bo *bo = NULL;
 383
 384	size = roundup(size, PAGE_SIZE);
 385
 386	mutex_lock(&vc4->bo_lock);
 387	if (page_index >= vc4->bo_cache.size_list_size)
 388		goto out;
 389
 390	if (list_empty(&vc4->bo_cache.size_list[page_index]))
 391		goto out;
 392
 393	bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
 394			      struct vc4_bo, size_head);
 395	vc4_bo_remove_from_cache(bo);
 396	kref_init(&bo->base.base.refcount);
 397
 398out:
 399	if (bo)
 400		vc4_bo_set_label(&bo->base.base, type);
 401	mutex_unlock(&vc4->bo_lock);
 402	return bo;
 403}
 404
 405/**
 406 * vc4_gem_create_object - Implementation of driver->gem_create_object.
 407 * @dev: DRM device
 408 * @size: Size in bytes of the memory the object will reference
 409 *
 410 * This lets the CMA helpers allocate object structs for us, and keep
 411 * our BO stats correct.
 412 */
 413struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
 414{
 415	struct vc4_dev *vc4 = to_vc4_dev(dev);
 416	struct vc4_bo *bo;
 417
 
 
 
 418	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 419	if (!bo)
 420		return ERR_PTR(-ENOMEM);
 421
 422	bo->madv = VC4_MADV_WILLNEED;
 423	refcount_set(&bo->usecnt, 0);
 
 424	mutex_init(&bo->madv_lock);
 
 425	mutex_lock(&vc4->bo_lock);
 426	bo->label = VC4_BO_TYPE_KERNEL;
 427	vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
 428	vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
 429	mutex_unlock(&vc4->bo_lock);
 430	bo->resv = &bo->_resv;
 431	reservation_object_init(bo->resv);
 432
 433	return &bo->base.base;
 434}
 435
 436struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
 437			     bool allow_unzeroed, enum vc4_kernel_bo_type type)
 438{
 439	size_t size = roundup(unaligned_size, PAGE_SIZE);
 440	struct vc4_dev *vc4 = to_vc4_dev(dev);
 441	struct drm_gem_cma_object *cma_obj;
 442	struct vc4_bo *bo;
 443
 
 
 
 444	if (size == 0)
 445		return ERR_PTR(-EINVAL);
 446
 447	/* First, try to get a vc4_bo from the kernel BO cache. */
 448	bo = vc4_bo_get_from_cache(dev, size, type);
 449	if (bo) {
 450		if (!allow_unzeroed)
 451			memset(bo->base.vaddr, 0, bo->base.base.size);
 452		return bo;
 453	}
 454
 455	cma_obj = drm_gem_cma_create(dev, size);
 456	if (IS_ERR(cma_obj)) {
 457		/*
 458		 * If we've run out of CMA memory, kill the cache of
 459		 * CMA allocations we've got laying around and try again.
 460		 */
 461		vc4_bo_cache_purge(dev);
 462		cma_obj = drm_gem_cma_create(dev, size);
 463	}
 464
 465	if (IS_ERR(cma_obj)) {
 466		/*
 467		 * Still not enough CMA memory, purge the userspace BO
 468		 * cache and retry.
 469		 * This is sub-optimal since we purge the whole userspace
 470		 * BO cache which forces user that want to re-use the BO to
 471		 * restore its initial content.
 472		 * Ideally, we should purge entries one by one and retry
 473		 * after each to see if CMA allocation succeeds. Or even
 474		 * better, try to find an entry with at least the same
 475		 * size.
 476		 */
 477		vc4_bo_userspace_cache_purge(dev);
 478		cma_obj = drm_gem_cma_create(dev, size);
 479	}
 480
 481	if (IS_ERR(cma_obj)) {
 482		DRM_ERROR("Failed to allocate from CMA:\n");
 483		vc4_bo_stats_dump(vc4);
 
 484		return ERR_PTR(-ENOMEM);
 485	}
 486	bo = to_vc4_bo(&cma_obj->base);
 487
 488	/* By default, BOs do not support the MADV ioctl. This will be enabled
 489	 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
 490	 * BOs).
 491	 */
 492	bo->madv = __VC4_MADV_NOTSUPP;
 493
 494	mutex_lock(&vc4->bo_lock);
 495	vc4_bo_set_label(&cma_obj->base, type);
 496	mutex_unlock(&vc4->bo_lock);
 497
 498	return bo;
 499}
 500
 501int vc4_dumb_create(struct drm_file *file_priv,
 502		    struct drm_device *dev,
 503		    struct drm_mode_create_dumb *args)
 504{
 505	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
 506	struct vc4_bo *bo = NULL;
 507	int ret;
 508
 509	if (args->pitch < min_pitch)
 510		args->pitch = min_pitch;
 511
 512	if (args->size < args->pitch * args->height)
 513		args->size = args->pitch * args->height;
 
 514
 515	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
 516	if (IS_ERR(bo))
 517		return PTR_ERR(bo);
 518
 519	bo->madv = VC4_MADV_WILLNEED;
 520
 521	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 522	drm_gem_object_put_unlocked(&bo->base.base);
 523
 524	return ret;
 525}
 526
 527static void vc4_bo_cache_free_old(struct drm_device *dev)
 528{
 529	struct vc4_dev *vc4 = to_vc4_dev(dev);
 530	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
 531
 532	lockdep_assert_held(&vc4->bo_lock);
 533
 534	while (!list_empty(&vc4->bo_cache.time_list)) {
 535		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
 536						    struct vc4_bo, unref_head);
 537		if (time_before(expire_time, bo->free_time)) {
 538			mod_timer(&vc4->bo_cache.time_timer,
 539				  round_jiffies_up(jiffies +
 540						   msecs_to_jiffies(1000)));
 541			return;
 542		}
 543
 544		vc4_bo_remove_from_cache(bo);
 545		vc4_bo_destroy(bo);
 546	}
 547}
 548
 549/* Called on the last userspace/kernel unreference of the BO.  Returns
 550 * it to the BO cache if possible, otherwise frees it.
 551 */
 552void vc4_free_object(struct drm_gem_object *gem_bo)
 553{
 554	struct drm_device *dev = gem_bo->dev;
 555	struct vc4_dev *vc4 = to_vc4_dev(dev);
 556	struct vc4_bo *bo = to_vc4_bo(gem_bo);
 557	struct list_head *cache_list;
 558
 559	/* Remove the BO from the purgeable list. */
 560	mutex_lock(&bo->madv_lock);
 561	if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
 562		vc4_bo_remove_from_purgeable_pool(bo);
 563	mutex_unlock(&bo->madv_lock);
 564
 565	mutex_lock(&vc4->bo_lock);
 566	/* If the object references someone else's memory, we can't cache it.
 567	 */
 568	if (gem_bo->import_attach) {
 569		vc4_bo_destroy(bo);
 570		goto out;
 571	}
 572
 573	/* Don't cache if it was publicly named. */
 574	if (gem_bo->name) {
 575		vc4_bo_destroy(bo);
 576		goto out;
 577	}
 578
 579	/* If this object was partially constructed but CMA allocation
 580	 * had failed, just free it. Can also happen when the BO has been
 581	 * purged.
 582	 */
 583	if (!bo->base.vaddr) {
 584		vc4_bo_destroy(bo);
 585		goto out;
 586	}
 587
 588	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
 589	if (!cache_list) {
 590		vc4_bo_destroy(bo);
 591		goto out;
 592	}
 593
 594	if (bo->validated_shader) {
 595		kfree(bo->validated_shader->uniform_addr_offsets);
 596		kfree(bo->validated_shader->texture_samples);
 597		kfree(bo->validated_shader);
 598		bo->validated_shader = NULL;
 599	}
 600
 601	/* Reset madv and usecnt before adding the BO to the cache. */
 602	bo->madv = __VC4_MADV_NOTSUPP;
 603	refcount_set(&bo->usecnt, 0);
 604
 605	bo->t_format = false;
 606	bo->free_time = jiffies;
 607	list_add(&bo->size_head, cache_list);
 608	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
 609
 610	vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
 611
 612	vc4_bo_cache_free_old(dev);
 613
 614out:
 615	mutex_unlock(&vc4->bo_lock);
 616}
 617
 618static void vc4_bo_cache_time_work(struct work_struct *work)
 619{
 620	struct vc4_dev *vc4 =
 621		container_of(work, struct vc4_dev, bo_cache.time_work);
 622	struct drm_device *dev = vc4->dev;
 623
 624	mutex_lock(&vc4->bo_lock);
 625	vc4_bo_cache_free_old(dev);
 626	mutex_unlock(&vc4->bo_lock);
 627}
 628
 629int vc4_bo_inc_usecnt(struct vc4_bo *bo)
 630{
 
 631	int ret;
 632
 
 
 
 633	/* Fast path: if the BO is already retained by someone, no need to
 634	 * check the madv status.
 635	 */
 636	if (refcount_inc_not_zero(&bo->usecnt))
 637		return 0;
 638
 639	mutex_lock(&bo->madv_lock);
 640	switch (bo->madv) {
 641	case VC4_MADV_WILLNEED:
 642		if (!refcount_inc_not_zero(&bo->usecnt))
 643			refcount_set(&bo->usecnt, 1);
 644		ret = 0;
 645		break;
 646	case VC4_MADV_DONTNEED:
 647		/* We shouldn't use a BO marked as purgeable if at least
 648		 * someone else retained its content by incrementing usecnt.
 649		 * Luckily the BO hasn't been purged yet, but something wrong
 650		 * is happening here. Just throw an error instead of
 651		 * authorizing this use case.
 652		 */
 653	case __VC4_MADV_PURGED:
 654		/* We can't use a purged BO. */
 655	default:
 656		/* Invalid madv value. */
 657		ret = -EINVAL;
 658		break;
 659	}
 660	mutex_unlock(&bo->madv_lock);
 661
 662	return ret;
 663}
 664
 665void vc4_bo_dec_usecnt(struct vc4_bo *bo)
 666{
 
 
 
 
 
 667	/* Fast path: if the BO is still retained by someone, no need to test
 668	 * the madv value.
 669	 */
 670	if (refcount_dec_not_one(&bo->usecnt))
 671		return;
 672
 673	mutex_lock(&bo->madv_lock);
 674	if (refcount_dec_and_test(&bo->usecnt) &&
 675	    bo->madv == VC4_MADV_DONTNEED)
 676		vc4_bo_add_to_purgeable_pool(bo);
 677	mutex_unlock(&bo->madv_lock);
 678}
 679
 680static void vc4_bo_cache_time_timer(struct timer_list *t)
 681{
 682	struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
 683
 684	schedule_work(&vc4->bo_cache.time_work);
 685}
 686
 687struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
 688{
 689	struct vc4_bo *bo = to_vc4_bo(obj);
 690
 691	return bo->resv;
 692}
 693
 694struct dma_buf *
 695vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
 696{
 697	struct vc4_bo *bo = to_vc4_bo(obj);
 698	struct dma_buf *dmabuf;
 699	int ret;
 700
 701	if (bo->validated_shader) {
 702		DRM_DEBUG("Attempting to export shader BO\n");
 703		return ERR_PTR(-EINVAL);
 704	}
 705
 706	/* Note: as soon as the BO is exported it becomes unpurgeable, because
 707	 * noone ever decrements the usecnt even if the reference held by the
 708	 * exported BO is released. This shouldn't be a problem since we don't
 709	 * expect exported BOs to be marked as purgeable.
 710	 */
 711	ret = vc4_bo_inc_usecnt(bo);
 712	if (ret) {
 713		DRM_ERROR("Failed to increment BO usecnt\n");
 714		return ERR_PTR(ret);
 715	}
 716
 717	dmabuf = drm_gem_prime_export(dev, obj, flags);
 718	if (IS_ERR(dmabuf))
 719		vc4_bo_dec_usecnt(bo);
 720
 721	return dmabuf;
 722}
 723
 724int vc4_fault(struct vm_fault *vmf)
 725{
 726	struct vm_area_struct *vma = vmf->vma;
 727	struct drm_gem_object *obj = vma->vm_private_data;
 728	struct vc4_bo *bo = to_vc4_bo(obj);
 729
 730	/* The only reason we would end up here is when user-space accesses
 731	 * BO's memory after it's been purged.
 732	 */
 733	mutex_lock(&bo->madv_lock);
 734	WARN_ON(bo->madv != __VC4_MADV_PURGED);
 735	mutex_unlock(&bo->madv_lock);
 736
 737	return VM_FAULT_SIGBUS;
 738}
 739
 740int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
 741{
 742	struct drm_gem_object *gem_obj;
 743	unsigned long vm_pgoff;
 744	struct vc4_bo *bo;
 745	int ret;
 746
 747	ret = drm_gem_mmap(filp, vma);
 748	if (ret)
 749		return ret;
 750
 751	gem_obj = vma->vm_private_data;
 752	bo = to_vc4_bo(gem_obj);
 753
 754	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
 755		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
 756		return -EINVAL;
 757	}
 758
 759	if (bo->madv != VC4_MADV_WILLNEED) {
 760		DRM_DEBUG("mmaping of %s BO not allowed\n",
 761			  bo->madv == VC4_MADV_DONTNEED ?
 762			  "purgeable" : "purged");
 763		return -EINVAL;
 764	}
 765
 766	/*
 767	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
 768	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 769	 * the whole buffer.
 770	 */
 771	vma->vm_flags &= ~VM_PFNMAP;
 772
 773	/* This ->vm_pgoff dance is needed to make all parties happy:
 774	 * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
 775	 *   mem-region, hence the need to set it to zero (the value set by
 776	 *   the DRM core is a virtual offset encoding the GEM object-id)
 777	 * - the mmap() core logic needs ->vm_pgoff to be restored to its
 778	 *   initial value before returning from this function because it
 779	 *   encodes the  offset of this GEM in the dev->anon_inode pseudo-file
 780	 *   and this information will be used when we invalidate userspace
 781	 *   mappings  with drm_vma_node_unmap() (called from vc4_gem_purge()).
 782	 */
 783	vm_pgoff = vma->vm_pgoff;
 784	vma->vm_pgoff = 0;
 785	ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
 786			  bo->base.paddr, vma->vm_end - vma->vm_start);
 787	vma->vm_pgoff = vm_pgoff;
 788
 789	if (ret)
 790		drm_gem_vm_close(vma);
 791
 792	return ret;
 793}
 794
 795int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 796{
 797	struct vc4_bo *bo = to_vc4_bo(obj);
 
 
 798
 799	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
 800		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
 801		return -EINVAL;
 802	}
 803
 804	return drm_gem_cma_prime_mmap(obj, vma);
 805}
 
 806
 807void *vc4_prime_vmap(struct drm_gem_object *obj)
 808{
 809	struct vc4_bo *bo = to_vc4_bo(obj);
 
 810
 811	if (bo->validated_shader) {
 812		DRM_DEBUG("mmaping of shader BOs not allowed.\n");
 813		return ERR_PTR(-EINVAL);
 814	}
 815
 816	return drm_gem_cma_prime_vmap(obj);
 817}
 818
 819struct drm_gem_object *
 820vc4_prime_import_sg_table(struct drm_device *dev,
 821			  struct dma_buf_attachment *attach,
 822			  struct sg_table *sgt)
 823{
 824	struct drm_gem_object *obj;
 825	struct vc4_bo *bo;
 826
 827	obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
 828	if (IS_ERR(obj))
 829		return obj;
 830
 831	bo = to_vc4_bo(obj);
 832	bo->resv = attach->dmabuf->resv;
 833
 834	return obj;
 835}
 836
 837int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
 838			struct drm_file *file_priv)
 839{
 840	struct drm_vc4_create_bo *args = data;
 
 
 841	struct vc4_bo *bo = NULL;
 842	int ret;
 843
 
 
 
 
 
 
 
 844	/*
 845	 * We can't allocate from the BO cache, because the BOs don't
 846	 * get zeroed, and that might leak data between users.
 847	 */
 848	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
 849	if (IS_ERR(bo))
 850		return PTR_ERR(bo);
 851
 852	bo->madv = VC4_MADV_WILLNEED;
 853
 854	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 855	drm_gem_object_put_unlocked(&bo->base.base);
 856
 857	return ret;
 858}
 859
 860int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
 861		      struct drm_file *file_priv)
 862{
 
 863	struct drm_vc4_mmap_bo *args = data;
 864	struct drm_gem_object *gem_obj;
 865
 
 
 
 866	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 867	if (!gem_obj) {
 868		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 869		return -EINVAL;
 870	}
 871
 872	/* The mmap offset was set up at BO allocation time. */
 873	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 874
 875	drm_gem_object_put_unlocked(gem_obj);
 876	return 0;
 877}
 878
 879int
 880vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
 881			   struct drm_file *file_priv)
 882{
 883	struct drm_vc4_create_shader_bo *args = data;
 
 
 884	struct vc4_bo *bo = NULL;
 885	int ret;
 886
 
 
 
 887	if (args->size == 0)
 888		return -EINVAL;
 889
 890	if (args->size % sizeof(u64) != 0)
 891		return -EINVAL;
 892
 893	if (args->flags != 0) {
 894		DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
 895		return -EINVAL;
 896	}
 897
 898	if (args->pad != 0) {
 899		DRM_INFO("Pad set: 0x%08x\n", args->pad);
 900		return -EINVAL;
 901	}
 902
 
 
 
 
 903	bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
 904	if (IS_ERR(bo))
 905		return PTR_ERR(bo);
 906
 907	bo->madv = VC4_MADV_WILLNEED;
 908
 909	if (copy_from_user(bo->base.vaddr,
 910			     (void __user *)(uintptr_t)args->data,
 911			     args->size)) {
 912		ret = -EFAULT;
 913		goto fail;
 914	}
 915	/* Clear the rest of the memory from allocating from the BO
 916	 * cache.
 917	 */
 918	memset(bo->base.vaddr + args->size, 0,
 919	       bo->base.base.size - args->size);
 920
 921	bo->validated_shader = vc4_validate_shader(&bo->base);
 922	if (!bo->validated_shader) {
 923		ret = -EINVAL;
 924		goto fail;
 925	}
 926
 927	/* We have to create the handle after validation, to avoid
 928	 * races for users to do doing things like mmap the shader BO.
 929	 */
 930	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
 931
 932 fail:
 933	drm_gem_object_put_unlocked(&bo->base.base);
 934
 935	return ret;
 936}
 937
 938/**
 939 * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
 940 * @dev: DRM device
 941 * @data: ioctl argument
 942 * @file_priv: DRM file for this fd
 943 *
 944 * The tiling state of the BO decides the default modifier of an fb if
 945 * no specific modifier was set by userspace, and the return value of
 946 * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
 947 * received from dmabuf as the same tiling format as the producer
 948 * used).
 949 */
 950int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
 951			 struct drm_file *file_priv)
 952{
 
 953	struct drm_vc4_set_tiling *args = data;
 954	struct drm_gem_object *gem_obj;
 955	struct vc4_bo *bo;
 956	bool t_format;
 957
 
 
 
 958	if (args->flags != 0)
 959		return -EINVAL;
 960
 961	switch (args->modifier) {
 962	case DRM_FORMAT_MOD_NONE:
 963		t_format = false;
 964		break;
 965	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
 966		t_format = true;
 967		break;
 968	default:
 969		return -EINVAL;
 970	}
 971
 972	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
 973	if (!gem_obj) {
 974		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 975		return -ENOENT;
 976	}
 977	bo = to_vc4_bo(gem_obj);
 978	bo->t_format = t_format;
 979
 980	drm_gem_object_put_unlocked(gem_obj);
 981
 982	return 0;
 983}
 984
 985/**
 986 * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
 987 * @dev: DRM device
 988 * @data: ioctl argument
 989 * @file_priv: DRM file for this fd
 990 *
 991 * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
 992 */
 993int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
 994			 struct drm_file *file_priv)
 995{
 
 996	struct drm_vc4_get_tiling *args = data;
 997	struct drm_gem_object *gem_obj;
 998	struct vc4_bo *bo;
 999
 
 
 
1000	if (args->flags != 0 || args->modifier != 0)
1001		return -EINVAL;
1002
1003	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1004	if (!gem_obj) {
1005		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1006		return -ENOENT;
1007	}
1008	bo = to_vc4_bo(gem_obj);
1009
1010	if (bo->t_format)
1011		args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1012	else
1013		args->modifier = DRM_FORMAT_MOD_NONE;
1014
1015	drm_gem_object_put_unlocked(gem_obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
1016
1017	return 0;
1018}
1019
 
1020int vc4_bo_cache_init(struct drm_device *dev)
1021{
1022	struct vc4_dev *vc4 = to_vc4_dev(dev);
 
1023	int i;
1024
 
 
 
1025	/* Create the initial set of BO labels that the kernel will
1026	 * use.  This lets us avoid a bunch of string reallocation in
1027	 * the kernel's draw and BO allocation paths.
1028	 */
1029	vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1030				 GFP_KERNEL);
1031	if (!vc4->bo_labels)
1032		return -ENOMEM;
1033	vc4->num_labels = VC4_BO_TYPE_COUNT;
1034
1035	BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1036	for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1037		vc4->bo_labels[i].name = bo_type_names[i];
1038
1039	mutex_init(&vc4->bo_lock);
 
 
 
 
1040
1041	INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1042
1043	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
1044	timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
1045
1046	return 0;
1047}
1048
1049void vc4_bo_cache_destroy(struct drm_device *dev)
1050{
1051	struct vc4_dev *vc4 = to_vc4_dev(dev);
1052	int i;
1053
1054	del_timer(&vc4->bo_cache.time_timer);
1055	cancel_work_sync(&vc4->bo_cache.time_work);
1056
1057	vc4_bo_cache_purge(dev);
1058
1059	for (i = 0; i < vc4->num_labels; i++) {
1060		if (vc4->bo_labels[i].num_allocated) {
1061			DRM_ERROR("Destroying BO cache with %d %s "
1062				  "BOs still allocated\n",
1063				  vc4->bo_labels[i].num_allocated,
1064				  vc4->bo_labels[i].name);
1065		}
1066
1067		if (is_user_label(i))
1068			kfree(vc4->bo_labels[i].name);
1069	}
1070	kfree(vc4->bo_labels);
1071}
1072
1073int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1074		       struct drm_file *file_priv)
1075{
1076	struct vc4_dev *vc4 = to_vc4_dev(dev);
1077	struct drm_vc4_label_bo *args = data;
1078	char *name;
1079	struct drm_gem_object *gem_obj;
1080	int ret = 0, label;
1081
 
 
 
1082	if (!args->len)
1083		return -EINVAL;
1084
1085	name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1086	if (IS_ERR(name))
1087		return PTR_ERR(name);
1088
1089	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1090	if (!gem_obj) {
1091		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1092		kfree(name);
1093		return -ENOENT;
1094	}
1095
1096	mutex_lock(&vc4->bo_lock);
1097	label = vc4_get_user_label(vc4, name);
1098	if (label != -1)
1099		vc4_bo_set_label(gem_obj, label);
1100	else
1101		ret = -ENOMEM;
1102	mutex_unlock(&vc4->bo_lock);
1103
1104	drm_gem_object_put_unlocked(gem_obj);
1105
1106	return ret;
1107}