Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright © 2014 Broadcom
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/device.h>
  28#include <linux/io.h>
  29#include <linux/sched/signal.h>
  30
  31#include "uapi/drm/vc4_drm.h"
  32#include "vc4_drv.h"
  33#include "vc4_regs.h"
  34#include "vc4_trace.h"
  35
  36static void
  37vc4_queue_hangcheck(struct drm_device *dev)
  38{
  39	struct vc4_dev *vc4 = to_vc4_dev(dev);
  40
  41	mod_timer(&vc4->hangcheck.timer,
  42		  round_jiffies_up(jiffies + msecs_to_jiffies(100)));
  43}
  44
  45struct vc4_hang_state {
  46	struct drm_vc4_get_hang_state user_state;
  47
  48	u32 bo_count;
  49	struct drm_gem_object **bo;
  50};
  51
  52static void
  53vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
  54{
  55	unsigned int i;
  56
  57	for (i = 0; i < state->user_state.bo_count; i++)
  58		drm_gem_object_put_unlocked(state->bo[i]);
  59
  60	kfree(state);
  61}
  62
  63int
  64vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
  65			 struct drm_file *file_priv)
  66{
  67	struct drm_vc4_get_hang_state *get_state = data;
  68	struct drm_vc4_get_hang_state_bo *bo_state;
  69	struct vc4_hang_state *kernel_state;
  70	struct drm_vc4_get_hang_state *state;
  71	struct vc4_dev *vc4 = to_vc4_dev(dev);
  72	unsigned long irqflags;
  73	u32 i;
  74	int ret = 0;
  75
  76	spin_lock_irqsave(&vc4->job_lock, irqflags);
  77	kernel_state = vc4->hang_state;
  78	if (!kernel_state) {
  79		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  80		return -ENOENT;
  81	}
  82	state = &kernel_state->user_state;
  83
  84	/* If the user's array isn't big enough, just return the
  85	 * required array size.
  86	 */
  87	if (get_state->bo_count < state->bo_count) {
  88		get_state->bo_count = state->bo_count;
  89		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  90		return 0;
  91	}
  92
  93	vc4->hang_state = NULL;
  94	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  95
  96	/* Save the user's BO pointer, so we don't stomp it with the memcpy. */
  97	state->bo = get_state->bo;
  98	memcpy(get_state, state, sizeof(*state));
  99
 100	bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
 101	if (!bo_state) {
 102		ret = -ENOMEM;
 103		goto err_free;
 104	}
 105
 106	for (i = 0; i < state->bo_count; i++) {
 107		struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
 108		u32 handle;
 109
 110		ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
 111					    &handle);
 112
 113		if (ret) {
 114			state->bo_count = i;
 115			goto err_delete_handle;
 116		}
 117		bo_state[i].handle = handle;
 118		bo_state[i].paddr = vc4_bo->base.paddr;
 119		bo_state[i].size = vc4_bo->base.base.size;
 120	}
 121
 122	if (copy_to_user(u64_to_user_ptr(get_state->bo),
 123			 bo_state,
 124			 state->bo_count * sizeof(*bo_state)))
 125		ret = -EFAULT;
 126
 127err_delete_handle:
 128	if (ret) {
 129		for (i = 0; i < state->bo_count; i++)
 130			drm_gem_handle_delete(file_priv, bo_state[i].handle);
 131	}
 132
 133err_free:
 134	vc4_free_hang_state(dev, kernel_state);
 135	kfree(bo_state);
 136
 137	return ret;
 138}
 139
 140static void
 141vc4_save_hang_state(struct drm_device *dev)
 142{
 143	struct vc4_dev *vc4 = to_vc4_dev(dev);
 144	struct drm_vc4_get_hang_state *state;
 145	struct vc4_hang_state *kernel_state;
 146	struct vc4_exec_info *exec[2];
 147	struct vc4_bo *bo;
 148	unsigned long irqflags;
 149	unsigned int i, j, k, unref_list_count;
 150
 151	kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
 152	if (!kernel_state)
 153		return;
 154
 155	state = &kernel_state->user_state;
 156
 157	spin_lock_irqsave(&vc4->job_lock, irqflags);
 158	exec[0] = vc4_first_bin_job(vc4);
 159	exec[1] = vc4_first_render_job(vc4);
 160	if (!exec[0] && !exec[1]) {
 161		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 162		return;
 163	}
 164
 165	/* Get the bos from both binner and renderer into hang state. */
 166	state->bo_count = 0;
 167	for (i = 0; i < 2; i++) {
 168		if (!exec[i])
 169			continue;
 170
 171		unref_list_count = 0;
 172		list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
 173			unref_list_count++;
 174		state->bo_count += exec[i]->bo_count + unref_list_count;
 175	}
 176
 177	kernel_state->bo = kcalloc(state->bo_count,
 178				   sizeof(*kernel_state->bo), GFP_ATOMIC);
 179
 180	if (!kernel_state->bo) {
 181		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 182		return;
 183	}
 184
 185	k = 0;
 186	for (i = 0; i < 2; i++) {
 187		if (!exec[i])
 188			continue;
 189
 190		for (j = 0; j < exec[i]->bo_count; j++) {
 191			bo = to_vc4_bo(&exec[i]->bo[j]->base);
 192
 193			/* Retain BOs just in case they were marked purgeable.
 194			 * This prevents the BO from being purged before
 195			 * someone had a chance to dump the hang state.
 196			 */
 197			WARN_ON(!refcount_read(&bo->usecnt));
 198			refcount_inc(&bo->usecnt);
 199			drm_gem_object_get(&exec[i]->bo[j]->base);
 200			kernel_state->bo[k++] = &exec[i]->bo[j]->base;
 201		}
 202
 203		list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
 204			/* No need to retain BOs coming from the ->unref_list
 205			 * because they are naturally unpurgeable.
 206			 */
 207			drm_gem_object_get(&bo->base.base);
 208			kernel_state->bo[k++] = &bo->base.base;
 209		}
 210	}
 211
 212	WARN_ON_ONCE(k != state->bo_count);
 213
 214	if (exec[0])
 215		state->start_bin = exec[0]->ct0ca;
 216	if (exec[1])
 217		state->start_render = exec[1]->ct1ca;
 218
 219	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 220
 221	state->ct0ca = V3D_READ(V3D_CTNCA(0));
 222	state->ct0ea = V3D_READ(V3D_CTNEA(0));
 223
 224	state->ct1ca = V3D_READ(V3D_CTNCA(1));
 225	state->ct1ea = V3D_READ(V3D_CTNEA(1));
 226
 227	state->ct0cs = V3D_READ(V3D_CTNCS(0));
 228	state->ct1cs = V3D_READ(V3D_CTNCS(1));
 229
 230	state->ct0ra0 = V3D_READ(V3D_CT00RA0);
 231	state->ct1ra0 = V3D_READ(V3D_CT01RA0);
 232
 233	state->bpca = V3D_READ(V3D_BPCA);
 234	state->bpcs = V3D_READ(V3D_BPCS);
 235	state->bpoa = V3D_READ(V3D_BPOA);
 236	state->bpos = V3D_READ(V3D_BPOS);
 237
 238	state->vpmbase = V3D_READ(V3D_VPMBASE);
 239
 240	state->dbge = V3D_READ(V3D_DBGE);
 241	state->fdbgo = V3D_READ(V3D_FDBGO);
 242	state->fdbgb = V3D_READ(V3D_FDBGB);
 243	state->fdbgr = V3D_READ(V3D_FDBGR);
 244	state->fdbgs = V3D_READ(V3D_FDBGS);
 245	state->errstat = V3D_READ(V3D_ERRSTAT);
 246
 247	/* We need to turn purgeable BOs into unpurgeable ones so that
 248	 * userspace has a chance to dump the hang state before the kernel
 249	 * decides to purge those BOs.
 250	 * Note that BO consistency at dump time cannot be guaranteed. For
 251	 * example, if the owner of these BOs decides to re-use them or mark
 252	 * them purgeable again there's nothing we can do to prevent it.
 253	 */
 254	for (i = 0; i < kernel_state->user_state.bo_count; i++) {
 255		struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
 256
 257		if (bo->madv == __VC4_MADV_NOTSUPP)
 258			continue;
 259
 260		mutex_lock(&bo->madv_lock);
 261		if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
 262			bo->madv = VC4_MADV_WILLNEED;
 263		refcount_dec(&bo->usecnt);
 264		mutex_unlock(&bo->madv_lock);
 265	}
 266
 267	spin_lock_irqsave(&vc4->job_lock, irqflags);
 268	if (vc4->hang_state) {
 269		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 270		vc4_free_hang_state(dev, kernel_state);
 271	} else {
 272		vc4->hang_state = kernel_state;
 273		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 274	}
 275}
 276
 277static void
 278vc4_reset(struct drm_device *dev)
 279{
 280	struct vc4_dev *vc4 = to_vc4_dev(dev);
 281
 282	DRM_INFO("Resetting GPU.\n");
 283
 284	mutex_lock(&vc4->power_lock);
 285	if (vc4->power_refcount) {
 286		/* Power the device off and back on the by dropping the
 287		 * reference on runtime PM.
 288		 */
 289		pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
 290		pm_runtime_get_sync(&vc4->v3d->pdev->dev);
 291	}
 292	mutex_unlock(&vc4->power_lock);
 293
 294	vc4_irq_reset(dev);
 295
 296	/* Rearm the hangcheck -- another job might have been waiting
 297	 * for our hung one to get kicked off, and vc4_irq_reset()
 298	 * would have started it.
 299	 */
 300	vc4_queue_hangcheck(dev);
 301}
 302
 303static void
 304vc4_reset_work(struct work_struct *work)
 305{
 306	struct vc4_dev *vc4 =
 307		container_of(work, struct vc4_dev, hangcheck.reset_work);
 308
 309	vc4_save_hang_state(vc4->dev);
 310
 311	vc4_reset(vc4->dev);
 312}
 313
 314static void
 315vc4_hangcheck_elapsed(struct timer_list *t)
 316{
 317	struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
 318	struct drm_device *dev = vc4->dev;
 319	uint32_t ct0ca, ct1ca;
 320	unsigned long irqflags;
 321	struct vc4_exec_info *bin_exec, *render_exec;
 322
 323	spin_lock_irqsave(&vc4->job_lock, irqflags);
 324
 325	bin_exec = vc4_first_bin_job(vc4);
 326	render_exec = vc4_first_render_job(vc4);
 327
 328	/* If idle, we can stop watching for hangs. */
 329	if (!bin_exec && !render_exec) {
 330		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 331		return;
 332	}
 333
 334	ct0ca = V3D_READ(V3D_CTNCA(0));
 335	ct1ca = V3D_READ(V3D_CTNCA(1));
 336
 337	/* If we've made any progress in execution, rearm the timer
 338	 * and wait.
 339	 */
 340	if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
 341	    (render_exec && ct1ca != render_exec->last_ct1ca)) {
 342		if (bin_exec)
 343			bin_exec->last_ct0ca = ct0ca;
 344		if (render_exec)
 345			render_exec->last_ct1ca = ct1ca;
 346		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 347		vc4_queue_hangcheck(dev);
 348		return;
 349	}
 350
 351	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 352
 353	/* We've gone too long with no progress, reset.  This has to
 354	 * be done from a work struct, since resetting can sleep and
 355	 * this timer hook isn't allowed to.
 356	 */
 357	schedule_work(&vc4->hangcheck.reset_work);
 358}
 359
 360static void
 361submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
 362{
 363	struct vc4_dev *vc4 = to_vc4_dev(dev);
 364
 365	/* Set the current and end address of the control list.
 366	 * Writing the end register is what starts the job.
 367	 */
 368	V3D_WRITE(V3D_CTNCA(thread), start);
 369	V3D_WRITE(V3D_CTNEA(thread), end);
 370}
 371
 372int
 373vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
 374		   bool interruptible)
 375{
 376	struct vc4_dev *vc4 = to_vc4_dev(dev);
 377	int ret = 0;
 378	unsigned long timeout_expire;
 379	DEFINE_WAIT(wait);
 380
 381	if (vc4->finished_seqno >= seqno)
 382		return 0;
 383
 384	if (timeout_ns == 0)
 385		return -ETIME;
 386
 387	timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
 388
 389	trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
 390	for (;;) {
 391		prepare_to_wait(&vc4->job_wait_queue, &wait,
 392				interruptible ? TASK_INTERRUPTIBLE :
 393				TASK_UNINTERRUPTIBLE);
 394
 395		if (interruptible && signal_pending(current)) {
 396			ret = -ERESTARTSYS;
 397			break;
 398		}
 399
 400		if (vc4->finished_seqno >= seqno)
 401			break;
 402
 403		if (timeout_ns != ~0ull) {
 404			if (time_after_eq(jiffies, timeout_expire)) {
 405				ret = -ETIME;
 406				break;
 407			}
 408			schedule_timeout(timeout_expire - jiffies);
 409		} else {
 410			schedule();
 411		}
 412	}
 413
 414	finish_wait(&vc4->job_wait_queue, &wait);
 415	trace_vc4_wait_for_seqno_end(dev, seqno);
 416
 417	return ret;
 418}
 419
 420static void
 421vc4_flush_caches(struct drm_device *dev)
 422{
 423	struct vc4_dev *vc4 = to_vc4_dev(dev);
 424
 425	/* Flush the GPU L2 caches.  These caches sit on top of system
 426	 * L3 (the 128kb or so shared with the CPU), and are
 427	 * non-allocating in the L3.
 428	 */
 429	V3D_WRITE(V3D_L2CACTL,
 430		  V3D_L2CACTL_L2CCLR);
 431
 432	V3D_WRITE(V3D_SLCACTL,
 433		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
 434		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
 435		  VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
 436		  VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
 437}
 438
 439static void
 440vc4_flush_texture_caches(struct drm_device *dev)
 441{
 442	struct vc4_dev *vc4 = to_vc4_dev(dev);
 443
 444	V3D_WRITE(V3D_L2CACTL,
 445		  V3D_L2CACTL_L2CCLR);
 446
 447	V3D_WRITE(V3D_SLCACTL,
 448		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
 449		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
 450}
 451
 452/* Sets the registers for the next job to be actually be executed in
 453 * the hardware.
 454 *
 455 * The job_lock should be held during this.
 456 */
 457void
 458vc4_submit_next_bin_job(struct drm_device *dev)
 459{
 460	struct vc4_dev *vc4 = to_vc4_dev(dev);
 461	struct vc4_exec_info *exec;
 462
 463again:
 464	exec = vc4_first_bin_job(vc4);
 465	if (!exec)
 466		return;
 467
 468	vc4_flush_caches(dev);
 469
 470	/* Only start the perfmon if it was not already started by a previous
 471	 * job.
 472	 */
 473	if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
 474		vc4_perfmon_start(vc4, exec->perfmon);
 475
 476	/* Either put the job in the binner if it uses the binner, or
 477	 * immediately move it to the to-be-rendered queue.
 478	 */
 479	if (exec->ct0ca != exec->ct0ea) {
 480		submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
 481	} else {
 482		struct vc4_exec_info *next;
 483
 484		vc4_move_job_to_render(dev, exec);
 485		next = vc4_first_bin_job(vc4);
 486
 487		/* We can't start the next bin job if the previous job had a
 488		 * different perfmon instance attached to it. The same goes
 489		 * if one of them had a perfmon attached to it and the other
 490		 * one doesn't.
 491		 */
 492		if (next && next->perfmon == exec->perfmon)
 493			goto again;
 494	}
 495}
 496
 497void
 498vc4_submit_next_render_job(struct drm_device *dev)
 499{
 500	struct vc4_dev *vc4 = to_vc4_dev(dev);
 501	struct vc4_exec_info *exec = vc4_first_render_job(vc4);
 502
 503	if (!exec)
 504		return;
 505
 506	/* A previous RCL may have written to one of our textures, and
 507	 * our full cache flush at bin time may have occurred before
 508	 * that RCL completed.  Flush the texture cache now, but not
 509	 * the instructions or uniforms (since we don't write those
 510	 * from an RCL).
 511	 */
 512	vc4_flush_texture_caches(dev);
 513
 514	submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
 515}
 516
 517void
 518vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
 519{
 520	struct vc4_dev *vc4 = to_vc4_dev(dev);
 521	bool was_empty = list_empty(&vc4->render_job_list);
 522
 523	list_move_tail(&exec->head, &vc4->render_job_list);
 524	if (was_empty)
 525		vc4_submit_next_render_job(dev);
 526}
 527
 528static void
 529vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
 530{
 531	struct vc4_bo *bo;
 532	unsigned i;
 533
 534	for (i = 0; i < exec->bo_count; i++) {
 535		bo = to_vc4_bo(&exec->bo[i]->base);
 536		bo->seqno = seqno;
 537
 538		reservation_object_add_shared_fence(bo->resv, exec->fence);
 539	}
 540
 541	list_for_each_entry(bo, &exec->unref_list, unref_head) {
 542		bo->seqno = seqno;
 543	}
 544
 545	for (i = 0; i < exec->rcl_write_bo_count; i++) {
 546		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
 547		bo->write_seqno = seqno;
 548
 549		reservation_object_add_excl_fence(bo->resv, exec->fence);
 550	}
 551}
 552
 553static void
 554vc4_unlock_bo_reservations(struct drm_device *dev,
 555			   struct vc4_exec_info *exec,
 556			   struct ww_acquire_ctx *acquire_ctx)
 557{
 558	int i;
 559
 560	for (i = 0; i < exec->bo_count; i++) {
 561		struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
 562
 563		ww_mutex_unlock(&bo->resv->lock);
 564	}
 565
 566	ww_acquire_fini(acquire_ctx);
 567}
 568
 569/* Takes the reservation lock on all the BOs being referenced, so that
 570 * at queue submit time we can update the reservations.
 571 *
 572 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
 573 * (all of which are on exec->unref_list).  They're entirely private
 574 * to vc4, so we don't attach dma-buf fences to them.
 575 */
 576static int
 577vc4_lock_bo_reservations(struct drm_device *dev,
 578			 struct vc4_exec_info *exec,
 579			 struct ww_acquire_ctx *acquire_ctx)
 580{
 581	int contended_lock = -1;
 582	int i, ret;
 583	struct vc4_bo *bo;
 584
 585	ww_acquire_init(acquire_ctx, &reservation_ww_class);
 586
 587retry:
 588	if (contended_lock != -1) {
 589		bo = to_vc4_bo(&exec->bo[contended_lock]->base);
 590		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
 591						       acquire_ctx);
 592		if (ret) {
 593			ww_acquire_done(acquire_ctx);
 594			return ret;
 595		}
 596	}
 597
 598	for (i = 0; i < exec->bo_count; i++) {
 599		if (i == contended_lock)
 600			continue;
 601
 602		bo = to_vc4_bo(&exec->bo[i]->base);
 603
 604		ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
 605		if (ret) {
 606			int j;
 607
 608			for (j = 0; j < i; j++) {
 609				bo = to_vc4_bo(&exec->bo[j]->base);
 610				ww_mutex_unlock(&bo->resv->lock);
 611			}
 612
 613			if (contended_lock != -1 && contended_lock >= i) {
 614				bo = to_vc4_bo(&exec->bo[contended_lock]->base);
 615
 616				ww_mutex_unlock(&bo->resv->lock);
 617			}
 618
 619			if (ret == -EDEADLK) {
 620				contended_lock = i;
 621				goto retry;
 622			}
 623
 624			ww_acquire_done(acquire_ctx);
 625			return ret;
 626		}
 627	}
 628
 629	ww_acquire_done(acquire_ctx);
 630
 631	/* Reserve space for our shared (read-only) fence references,
 632	 * before we commit the CL to the hardware.
 633	 */
 634	for (i = 0; i < exec->bo_count; i++) {
 635		bo = to_vc4_bo(&exec->bo[i]->base);
 636
 637		ret = reservation_object_reserve_shared(bo->resv);
 638		if (ret) {
 639			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 640			return ret;
 641		}
 642	}
 643
 644	return 0;
 645}
 646
 647/* Queues a struct vc4_exec_info for execution.  If no job is
 648 * currently executing, then submits it.
 649 *
 650 * Unlike most GPUs, our hardware only handles one command list at a
 651 * time.  To queue multiple jobs at once, we'd need to edit the
 652 * previous command list to have a jump to the new one at the end, and
 653 * then bump the end address.  That's a change for a later date,
 654 * though.
 655 */
 656static int
 657vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
 658		 struct ww_acquire_ctx *acquire_ctx)
 659{
 660	struct vc4_dev *vc4 = to_vc4_dev(dev);
 661	struct vc4_exec_info *renderjob;
 662	uint64_t seqno;
 663	unsigned long irqflags;
 664	struct vc4_fence *fence;
 665
 666	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
 667	if (!fence)
 668		return -ENOMEM;
 669	fence->dev = dev;
 670
 671	spin_lock_irqsave(&vc4->job_lock, irqflags);
 672
 673	seqno = ++vc4->emit_seqno;
 674	exec->seqno = seqno;
 675
 676	dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
 677		       vc4->dma_fence_context, exec->seqno);
 678	fence->seqno = exec->seqno;
 679	exec->fence = &fence->base;
 680
 681	vc4_update_bo_seqnos(exec, seqno);
 682
 683	vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 684
 685	list_add_tail(&exec->head, &vc4->bin_job_list);
 686
 687	/* If no bin job was executing and if the render job (if any) has the
 688	 * same perfmon as our job attached to it (or if both jobs don't have
 689	 * perfmon activated), then kick ours off.  Otherwise, it'll get
 690	 * started when the previous job's flush/render done interrupt occurs.
 691	 */
 692	renderjob = vc4_first_render_job(vc4);
 693	if (vc4_first_bin_job(vc4) == exec &&
 694	    (!renderjob || renderjob->perfmon == exec->perfmon)) {
 695		vc4_submit_next_bin_job(dev);
 696		vc4_queue_hangcheck(dev);
 697	}
 698
 699	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 700
 701	return 0;
 702}
 703
 704/**
 705 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
 706 * referenced by the job.
 707 * @dev: DRM device
 708 * @file_priv: DRM file for this fd
 709 * @exec: V3D job being set up
 710 *
 711 * The command validator needs to reference BOs by their index within
 712 * the submitted job's BO list.  This does the validation of the job's
 713 * BO list and reference counting for the lifetime of the job.
 714 */
 715static int
 716vc4_cl_lookup_bos(struct drm_device *dev,
 717		  struct drm_file *file_priv,
 718		  struct vc4_exec_info *exec)
 719{
 720	struct drm_vc4_submit_cl *args = exec->args;
 721	uint32_t *handles;
 722	int ret = 0;
 723	int i;
 724
 725	exec->bo_count = args->bo_handle_count;
 726
 727	if (!exec->bo_count) {
 728		/* See comment on bo_index for why we have to check
 729		 * this.
 730		 */
 731		DRM_DEBUG("Rendering requires BOs to validate\n");
 732		return -EINVAL;
 733	}
 734
 735	exec->bo = kvmalloc_array(exec->bo_count,
 736				    sizeof(struct drm_gem_cma_object *),
 737				    GFP_KERNEL | __GFP_ZERO);
 738	if (!exec->bo) {
 739		DRM_ERROR("Failed to allocate validated BO pointers\n");
 740		return -ENOMEM;
 741	}
 742
 743	handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
 744	if (!handles) {
 745		ret = -ENOMEM;
 746		DRM_ERROR("Failed to allocate incoming GEM handles\n");
 747		goto fail;
 748	}
 749
 750	if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
 751			   exec->bo_count * sizeof(uint32_t))) {
 752		ret = -EFAULT;
 753		DRM_ERROR("Failed to copy in GEM handles\n");
 754		goto fail;
 755	}
 756
 757	spin_lock(&file_priv->table_lock);
 758	for (i = 0; i < exec->bo_count; i++) {
 759		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
 760						     handles[i]);
 761		if (!bo) {
 762			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
 763				  i, handles[i]);
 764			ret = -EINVAL;
 765			break;
 766		}
 767
 768		drm_gem_object_get(bo);
 769		exec->bo[i] = (struct drm_gem_cma_object *)bo;
 770	}
 771	spin_unlock(&file_priv->table_lock);
 772
 773	if (ret)
 774		goto fail_put_bo;
 775
 776	for (i = 0; i < exec->bo_count; i++) {
 777		ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
 778		if (ret)
 779			goto fail_dec_usecnt;
 780	}
 781
 782	kvfree(handles);
 783	return 0;
 784
 785fail_dec_usecnt:
 786	/* Decrease usecnt on acquired objects.
 787	 * We cannot rely on  vc4_complete_exec() to release resources here,
 788	 * because vc4_complete_exec() has no information about which BO has
 789	 * had its ->usecnt incremented.
 790	 * To make things easier we just free everything explicitly and set
 791	 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
 792	 * step.
 793	 */
 794	for (i-- ; i >= 0; i--)
 795		vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
 796
 797fail_put_bo:
 798	/* Release any reference to acquired objects. */
 799	for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
 800		drm_gem_object_put_unlocked(&exec->bo[i]->base);
 801
 802fail:
 803	kvfree(handles);
 804	kvfree(exec->bo);
 805	exec->bo = NULL;
 806	return ret;
 807}
 808
 809static int
 810vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
 811{
 812	struct drm_vc4_submit_cl *args = exec->args;
 813	void *temp = NULL;
 814	void *bin;
 815	int ret = 0;
 816	uint32_t bin_offset = 0;
 817	uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
 818					     16);
 819	uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
 820	uint32_t exec_size = uniforms_offset + args->uniforms_size;
 821	uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
 822					  args->shader_rec_count);
 823	struct vc4_bo *bo;
 824
 825	if (shader_rec_offset < args->bin_cl_size ||
 826	    uniforms_offset < shader_rec_offset ||
 827	    exec_size < uniforms_offset ||
 828	    args->shader_rec_count >= (UINT_MAX /
 829					  sizeof(struct vc4_shader_state)) ||
 830	    temp_size < exec_size) {
 831		DRM_DEBUG("overflow in exec arguments\n");
 832		ret = -EINVAL;
 833		goto fail;
 834	}
 835
 836	/* Allocate space where we'll store the copied in user command lists
 837	 * and shader records.
 838	 *
 839	 * We don't just copy directly into the BOs because we need to
 840	 * read the contents back for validation, and I think the
 841	 * bo->vaddr is uncached access.
 842	 */
 843	temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
 844	if (!temp) {
 845		DRM_ERROR("Failed to allocate storage for copying "
 846			  "in bin/render CLs.\n");
 847		ret = -ENOMEM;
 848		goto fail;
 849	}
 850	bin = temp + bin_offset;
 851	exec->shader_rec_u = temp + shader_rec_offset;
 852	exec->uniforms_u = temp + uniforms_offset;
 853	exec->shader_state = temp + exec_size;
 854	exec->shader_state_size = args->shader_rec_count;
 855
 856	if (copy_from_user(bin,
 857			   u64_to_user_ptr(args->bin_cl),
 858			   args->bin_cl_size)) {
 859		ret = -EFAULT;
 860		goto fail;
 861	}
 862
 863	if (copy_from_user(exec->shader_rec_u,
 864			   u64_to_user_ptr(args->shader_rec),
 865			   args->shader_rec_size)) {
 866		ret = -EFAULT;
 867		goto fail;
 868	}
 869
 870	if (copy_from_user(exec->uniforms_u,
 871			   u64_to_user_ptr(args->uniforms),
 872			   args->uniforms_size)) {
 873		ret = -EFAULT;
 874		goto fail;
 875	}
 876
 877	bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
 878	if (IS_ERR(bo)) {
 879		DRM_ERROR("Couldn't allocate BO for binning\n");
 880		ret = PTR_ERR(bo);
 881		goto fail;
 882	}
 883	exec->exec_bo = &bo->base;
 884
 885	list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
 886		      &exec->unref_list);
 887
 888	exec->ct0ca = exec->exec_bo->paddr + bin_offset;
 889
 890	exec->bin_u = bin;
 891
 892	exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
 893	exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
 894	exec->shader_rec_size = args->shader_rec_size;
 895
 896	exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
 897	exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
 898	exec->uniforms_size = args->uniforms_size;
 899
 900	ret = vc4_validate_bin_cl(dev,
 901				  exec->exec_bo->vaddr + bin_offset,
 902				  bin,
 903				  exec);
 904	if (ret)
 905		goto fail;
 906
 907	ret = vc4_validate_shader_recs(dev, exec);
 908	if (ret)
 909		goto fail;
 910
 911	/* Block waiting on any previous rendering into the CS's VBO,
 912	 * IB, or textures, so that pixels are actually written by the
 913	 * time we try to read them.
 914	 */
 915	ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
 916
 917fail:
 918	kvfree(temp);
 919	return ret;
 920}
 921
 922static void
 923vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
 924{
 925	struct vc4_dev *vc4 = to_vc4_dev(dev);
 926	unsigned long irqflags;
 927	unsigned i;
 928
 929	/* If we got force-completed because of GPU reset rather than
 930	 * through our IRQ handler, signal the fence now.
 931	 */
 932	if (exec->fence) {
 933		dma_fence_signal(exec->fence);
 934		dma_fence_put(exec->fence);
 935	}
 936
 937	if (exec->bo) {
 938		for (i = 0; i < exec->bo_count; i++) {
 939			struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
 940
 941			vc4_bo_dec_usecnt(bo);
 942			drm_gem_object_put_unlocked(&exec->bo[i]->base);
 943		}
 944		kvfree(exec->bo);
 945	}
 946
 947	while (!list_empty(&exec->unref_list)) {
 948		struct vc4_bo *bo = list_first_entry(&exec->unref_list,
 949						     struct vc4_bo, unref_head);
 950		list_del(&bo->unref_head);
 951		drm_gem_object_put_unlocked(&bo->base.base);
 952	}
 953
 954	/* Free up the allocation of any bin slots we used. */
 955	spin_lock_irqsave(&vc4->job_lock, irqflags);
 956	vc4->bin_alloc_used &= ~exec->bin_slots;
 957	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 958
 959	/* Release the reference we had on the perf monitor. */
 960	vc4_perfmon_put(exec->perfmon);
 961
 962	mutex_lock(&vc4->power_lock);
 963	if (--vc4->power_refcount == 0) {
 964		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
 965		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
 966	}
 967	mutex_unlock(&vc4->power_lock);
 968
 969	kfree(exec);
 970}
 971
 972void
 973vc4_job_handle_completed(struct vc4_dev *vc4)
 974{
 975	unsigned long irqflags;
 976	struct vc4_seqno_cb *cb, *cb_temp;
 977
 978	spin_lock_irqsave(&vc4->job_lock, irqflags);
 979	while (!list_empty(&vc4->job_done_list)) {
 980		struct vc4_exec_info *exec =
 981			list_first_entry(&vc4->job_done_list,
 982					 struct vc4_exec_info, head);
 983		list_del(&exec->head);
 984
 985		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 986		vc4_complete_exec(vc4->dev, exec);
 987		spin_lock_irqsave(&vc4->job_lock, irqflags);
 988	}
 989
 990	list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
 991		if (cb->seqno <= vc4->finished_seqno) {
 992			list_del_init(&cb->work.entry);
 993			schedule_work(&cb->work);
 994		}
 995	}
 996
 997	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
 998}
 999
1000static void vc4_seqno_cb_work(struct work_struct *work)
1001{
1002	struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1003
1004	cb->func(cb);
1005}
1006
1007int vc4_queue_seqno_cb(struct drm_device *dev,
1008		       struct vc4_seqno_cb *cb, uint64_t seqno,
1009		       void (*func)(struct vc4_seqno_cb *cb))
1010{
1011	struct vc4_dev *vc4 = to_vc4_dev(dev);
1012	int ret = 0;
1013	unsigned long irqflags;
1014
1015	cb->func = func;
1016	INIT_WORK(&cb->work, vc4_seqno_cb_work);
1017
1018	spin_lock_irqsave(&vc4->job_lock, irqflags);
1019	if (seqno > vc4->finished_seqno) {
1020		cb->seqno = seqno;
1021		list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1022	} else {
1023		schedule_work(&cb->work);
1024	}
1025	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1026
1027	return ret;
1028}
1029
1030/* Scheduled when any job has been completed, this walks the list of
1031 * jobs that had completed and unrefs their BOs and frees their exec
1032 * structs.
1033 */
1034static void
1035vc4_job_done_work(struct work_struct *work)
1036{
1037	struct vc4_dev *vc4 =
1038		container_of(work, struct vc4_dev, job_done_work);
1039
1040	vc4_job_handle_completed(vc4);
1041}
1042
1043static int
1044vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1045				uint64_t seqno,
1046				uint64_t *timeout_ns)
1047{
1048	unsigned long start = jiffies;
1049	int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1050
1051	if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1052		uint64_t delta = jiffies_to_nsecs(jiffies - start);
1053
1054		if (*timeout_ns >= delta)
1055			*timeout_ns -= delta;
1056	}
1057
1058	return ret;
1059}
1060
1061int
1062vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1063		     struct drm_file *file_priv)
1064{
1065	struct drm_vc4_wait_seqno *args = data;
1066
1067	return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1068					       &args->timeout_ns);
1069}
1070
1071int
1072vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1073		  struct drm_file *file_priv)
1074{
1075	int ret;
1076	struct drm_vc4_wait_bo *args = data;
1077	struct drm_gem_object *gem_obj;
1078	struct vc4_bo *bo;
1079
1080	if (args->pad != 0)
1081		return -EINVAL;
1082
1083	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1084	if (!gem_obj) {
1085		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1086		return -EINVAL;
1087	}
1088	bo = to_vc4_bo(gem_obj);
1089
1090	ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1091					      &args->timeout_ns);
1092
1093	drm_gem_object_put_unlocked(gem_obj);
1094	return ret;
1095}
1096
1097/**
1098 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1099 * @dev: DRM device
1100 * @data: ioctl argument
1101 * @file_priv: DRM file for this fd
1102 *
1103 * This is the main entrypoint for userspace to submit a 3D frame to
1104 * the GPU.  Userspace provides the binner command list (if
1105 * applicable), and the kernel sets up the render command list to draw
1106 * to the framebuffer described in the ioctl, using the command lists
1107 * that the 3D engine's binner will produce.
1108 */
1109int
1110vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1111		    struct drm_file *file_priv)
1112{
1113	struct vc4_dev *vc4 = to_vc4_dev(dev);
1114	struct vc4_file *vc4file = file_priv->driver_priv;
1115	struct drm_vc4_submit_cl *args = data;
1116	struct vc4_exec_info *exec;
1117	struct ww_acquire_ctx acquire_ctx;
1118	int ret = 0;
1119
1120	if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1121			     VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1122			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1123			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1124		DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1125		return -EINVAL;
1126	}
1127
1128	if (args->pad2 != 0) {
1129		DRM_DEBUG("->pad2 must be set to zero\n");
1130		return -EINVAL;
1131	}
1132
1133	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1134	if (!exec) {
1135		DRM_ERROR("malloc failure on exec struct\n");
1136		return -ENOMEM;
1137	}
1138
1139	mutex_lock(&vc4->power_lock);
1140	if (vc4->power_refcount++ == 0) {
1141		ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
1142		if (ret < 0) {
1143			mutex_unlock(&vc4->power_lock);
1144			vc4->power_refcount--;
1145			kfree(exec);
1146			return ret;
1147		}
1148	}
1149	mutex_unlock(&vc4->power_lock);
1150
1151	exec->args = args;
1152	INIT_LIST_HEAD(&exec->unref_list);
1153
1154	ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1155	if (ret)
1156		goto fail;
1157
1158	if (args->perfmonid) {
1159		exec->perfmon = vc4_perfmon_find(vc4file,
1160						 args->perfmonid);
1161		if (!exec->perfmon) {
1162			ret = -ENOENT;
1163			goto fail;
1164		}
1165	}
1166
1167	if (exec->args->bin_cl_size != 0) {
1168		ret = vc4_get_bcl(dev, exec);
1169		if (ret)
1170			goto fail;
1171	} else {
1172		exec->ct0ca = 0;
1173		exec->ct0ea = 0;
1174	}
1175
1176	ret = vc4_get_rcl(dev, exec);
1177	if (ret)
1178		goto fail;
1179
1180	ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1181	if (ret)
1182		goto fail;
1183
1184	/* Clear this out of the struct we'll be putting in the queue,
1185	 * since it's part of our stack.
1186	 */
1187	exec->args = NULL;
1188
1189	ret = vc4_queue_submit(dev, exec, &acquire_ctx);
1190	if (ret)
1191		goto fail;
1192
1193	/* Return the seqno for our job. */
1194	args->seqno = vc4->emit_seqno;
1195
1196	return 0;
1197
1198fail:
1199	vc4_complete_exec(vc4->dev, exec);
1200
1201	return ret;
1202}
1203
1204void
1205vc4_gem_init(struct drm_device *dev)
1206{
1207	struct vc4_dev *vc4 = to_vc4_dev(dev);
1208
1209	vc4->dma_fence_context = dma_fence_context_alloc(1);
1210
1211	INIT_LIST_HEAD(&vc4->bin_job_list);
1212	INIT_LIST_HEAD(&vc4->render_job_list);
1213	INIT_LIST_HEAD(&vc4->job_done_list);
1214	INIT_LIST_HEAD(&vc4->seqno_cb_list);
1215	spin_lock_init(&vc4->job_lock);
1216
1217	INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1218	timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1219
1220	INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1221
1222	mutex_init(&vc4->power_lock);
1223
1224	INIT_LIST_HEAD(&vc4->purgeable.list);
1225	mutex_init(&vc4->purgeable.lock);
1226}
1227
1228void
1229vc4_gem_destroy(struct drm_device *dev)
1230{
1231	struct vc4_dev *vc4 = to_vc4_dev(dev);
1232
1233	/* Waiting for exec to finish would need to be done before
1234	 * unregistering V3D.
1235	 */
1236	WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1237
1238	/* V3D should already have disabled its interrupt and cleared
1239	 * the overflow allocation registers.  Now free the object.
1240	 */
1241	if (vc4->bin_bo) {
1242		drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
1243		vc4->bin_bo = NULL;
1244	}
1245
1246	if (vc4->hang_state)
1247		vc4_free_hang_state(dev, vc4->hang_state);
1248}
1249
1250int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1251			  struct drm_file *file_priv)
1252{
1253	struct drm_vc4_gem_madvise *args = data;
1254	struct drm_gem_object *gem_obj;
1255	struct vc4_bo *bo;
1256	int ret;
1257
1258	switch (args->madv) {
1259	case VC4_MADV_DONTNEED:
1260	case VC4_MADV_WILLNEED:
1261		break;
1262	default:
1263		return -EINVAL;
1264	}
1265
1266	if (args->pad != 0)
1267		return -EINVAL;
1268
1269	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1270	if (!gem_obj) {
1271		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1272		return -ENOENT;
1273	}
1274
1275	bo = to_vc4_bo(gem_obj);
1276
1277	/* Only BOs exposed to userspace can be purged. */
1278	if (bo->madv == __VC4_MADV_NOTSUPP) {
1279		DRM_DEBUG("madvise not supported on this BO\n");
1280		ret = -EINVAL;
1281		goto out_put_gem;
1282	}
1283
1284	/* Not sure it's safe to purge imported BOs. Let's just assume it's
1285	 * not until proven otherwise.
1286	 */
1287	if (gem_obj->import_attach) {
1288		DRM_DEBUG("madvise not supported on imported BOs\n");
1289		ret = -EINVAL;
1290		goto out_put_gem;
1291	}
1292
1293	mutex_lock(&bo->madv_lock);
1294
1295	if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1296	    !refcount_read(&bo->usecnt)) {
1297		/* If the BO is about to be marked as purgeable, is not used
1298		 * and is not already purgeable or purged, add it to the
1299		 * purgeable list.
1300		 */
1301		vc4_bo_add_to_purgeable_pool(bo);
1302	} else if (args->madv == VC4_MADV_WILLNEED &&
1303		   bo->madv == VC4_MADV_DONTNEED &&
1304		   !refcount_read(&bo->usecnt)) {
1305		/* The BO has not been purged yet, just remove it from
1306		 * the purgeable list.
1307		 */
1308		vc4_bo_remove_from_purgeable_pool(bo);
1309	}
1310
1311	/* Save the purged state. */
1312	args->retained = bo->madv != __VC4_MADV_PURGED;
1313
1314	/* Update internal madv state only if the bo was not purged. */
1315	if (bo->madv != __VC4_MADV_PURGED)
1316		bo->madv = args->madv;
1317
1318	mutex_unlock(&bo->madv_lock);
1319
1320	ret = 0;
1321
1322out_put_gem:
1323	drm_gem_object_put_unlocked(gem_obj);
1324
1325	return ret;
1326}