Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <linux/sched/signal.h>
  29
  30#include "vmwgfx_drv.h"
  31
  32#define VMW_FENCE_WRAP (1 << 31)
  33
  34struct vmw_fence_manager {
 
  35	struct vmw_private *dev_priv;
  36	spinlock_t lock;
  37	struct list_head fence_list;
  38	struct work_struct work;
  39	bool fifo_down;
  40	struct list_head cleanup_list;
  41	uint32_t pending_actions[VMW_ACTION_MAX];
  42	struct mutex goal_irq_mutex;
  43	bool goal_irq_on; /* Protected by @goal_irq_mutex */
  44	bool seqno_valid; /* Protected by @lock, and may not be set to true
  45			     without the @goal_irq_mutex held. */
  46	u64 ctx;
  47};
  48
  49struct vmw_user_fence {
  50	struct ttm_base_object base;
  51	struct vmw_fence_obj fence;
  52};
  53
  54/**
  55 * struct vmw_event_fence_action - fence action that delivers a drm event.
  56 *
  57 * @action: A struct vmw_fence_action to hook up to a fence.
  58 * @event: A pointer to the pending event.
  59 * @fence: A referenced pointer to the fence to keep it alive while @action
  60 * hangs on it.
  61 * @dev: Pointer to a struct drm_device so we can access the event stuff.
  62 * @tv_sec: If non-null, the variable pointed to will be assigned
  63 * current time tv_sec val when the fence signals.
  64 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
  65 * be assigned the current time tv_usec val when the fence signals.
  66 */
  67struct vmw_event_fence_action {
  68	struct vmw_fence_action action;
  69
  70	struct drm_pending_event *event;
  71	struct vmw_fence_obj *fence;
  72	struct drm_device *dev;
  73
  74	uint32_t *tv_sec;
  75	uint32_t *tv_usec;
  76};
  77
  78static struct vmw_fence_manager *
  79fman_from_fence(struct vmw_fence_obj *fence)
  80{
  81	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
  82}
  83
  84static u32 vmw_fence_goal_read(struct vmw_private *vmw)
  85{
  86	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
  87		return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
  88	else
  89		return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
  90}
  91
  92static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
  93{
  94	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
  95		vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
  96	else
  97		vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
  98}
  99
 100/*
 101 * Note on fencing subsystem usage of irqs:
 102 * Typically the vmw_fences_update function is called
 103 *
 104 * a) When a new fence seqno has been submitted by the fifo code.
 105 * b) On-demand when we have waiters. Sleeping waiters will switch on the
 106 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
 107 * irq is received. When the last fence waiter is gone, that IRQ is masked
 108 * away.
 109 *
 110 * In situations where there are no waiters and we don't submit any new fences,
 111 * fence objects may not be signaled. This is perfectly OK, since there are
 112 * no consumers of the signaled data, but that is NOT ok when there are fence
 113 * actions attached to a fence. The fencing subsystem then makes use of the
 114 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
 115 * which has an action attached, and each time vmw_fences_update is called,
 116 * the subsystem makes sure the fence goal seqno is updated.
 117 *
 118 * The fence goal seqno irq is on as long as there are unsignaled fence
 119 * objects with actions attached to them.
 120 */
 121
 122static void vmw_fence_obj_destroy(struct dma_fence *f)
 123{
 124	struct vmw_fence_obj *fence =
 125		container_of(f, struct vmw_fence_obj, base);
 
 126	struct vmw_fence_manager *fman = fman_from_fence(fence);
 127
 128	if (!list_empty(&fence->head)) {
 129		spin_lock(&fman->lock);
 130		list_del_init(&fence->head);
 131		spin_unlock(&fman->lock);
 132	}
 133	fence->destroy(fence);
 134}
 135
 136static const char *vmw_fence_get_driver_name(struct dma_fence *f)
 137{
 138	return "vmwgfx";
 139}
 140
 141static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
 142{
 143	return "svga";
 144}
 145
 146static bool vmw_fence_enable_signaling(struct dma_fence *f)
 147{
 148	struct vmw_fence_obj *fence =
 149		container_of(f, struct vmw_fence_obj, base);
 150
 151	struct vmw_fence_manager *fman = fman_from_fence(fence);
 152	struct vmw_private *dev_priv = fman->dev_priv;
 153
 154	u32 seqno = vmw_fence_read(dev_priv);
 155	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 156		return false;
 157
 158	return true;
 159}
 160
 161struct vmwgfx_wait_cb {
 162	struct dma_fence_cb base;
 163	struct task_struct *task;
 164};
 165
 166static void
 167vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 168{
 169	struct vmwgfx_wait_cb *wait =
 170		container_of(cb, struct vmwgfx_wait_cb, base);
 171
 172	wake_up_process(wait->task);
 173}
 174
 175static void __vmw_fences_update(struct vmw_fence_manager *fman);
 176
 177static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
 178{
 179	struct vmw_fence_obj *fence =
 180		container_of(f, struct vmw_fence_obj, base);
 181
 182	struct vmw_fence_manager *fman = fman_from_fence(fence);
 183	struct vmw_private *dev_priv = fman->dev_priv;
 184	struct vmwgfx_wait_cb cb;
 185	long ret = timeout;
 186
 187	if (likely(vmw_fence_obj_signaled(fence)))
 188		return timeout;
 189
 190	vmw_seqno_waiter_add(dev_priv);
 191
 192	spin_lock(f->lock);
 193
 194	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
 195		goto out;
 196
 197	if (intr && signal_pending(current)) {
 198		ret = -ERESTARTSYS;
 199		goto out;
 200	}
 201
 202	cb.base.func = vmwgfx_wait_cb;
 203	cb.task = current;
 204	list_add(&cb.base.node, &f->cb_list);
 205
 206	for (;;) {
 207		__vmw_fences_update(fman);
 208
 209		/*
 210		 * We can use the barrier free __set_current_state() since
 211		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
 212		 * fence spinlock.
 213		 */
 214		if (intr)
 215			__set_current_state(TASK_INTERRUPTIBLE);
 216		else
 217			__set_current_state(TASK_UNINTERRUPTIBLE);
 218
 219		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
 220			if (ret == 0 && timeout > 0)
 221				ret = 1;
 222			break;
 223		}
 224
 225		if (intr && signal_pending(current)) {
 226			ret = -ERESTARTSYS;
 227			break;
 228		}
 229
 230		if (ret == 0)
 231			break;
 232
 233		spin_unlock(f->lock);
 234
 235		ret = schedule_timeout(ret);
 236
 237		spin_lock(f->lock);
 238	}
 239	__set_current_state(TASK_RUNNING);
 240	if (!list_empty(&cb.base.node))
 241		list_del(&cb.base.node);
 242
 243out:
 244	spin_unlock(f->lock);
 245
 246	vmw_seqno_waiter_remove(dev_priv);
 247
 248	return ret;
 249}
 250
 251static const struct dma_fence_ops vmw_fence_ops = {
 252	.get_driver_name = vmw_fence_get_driver_name,
 253	.get_timeline_name = vmw_fence_get_timeline_name,
 254	.enable_signaling = vmw_fence_enable_signaling,
 255	.wait = vmw_fence_wait,
 256	.release = vmw_fence_obj_destroy,
 257};
 258
 
 259/*
 260 * Execute signal actions on fences recently signaled.
 261 * This is done from a workqueue so we don't have to execute
 262 * signal actions from atomic context.
 263 */
 264
 265static void vmw_fence_work_func(struct work_struct *work)
 266{
 267	struct vmw_fence_manager *fman =
 268		container_of(work, struct vmw_fence_manager, work);
 269	struct list_head list;
 270	struct vmw_fence_action *action, *next_action;
 271	bool seqno_valid;
 272
 273	do {
 274		INIT_LIST_HEAD(&list);
 275		mutex_lock(&fman->goal_irq_mutex);
 276
 277		spin_lock(&fman->lock);
 278		list_splice_init(&fman->cleanup_list, &list);
 279		seqno_valid = fman->seqno_valid;
 280		spin_unlock(&fman->lock);
 281
 282		if (!seqno_valid && fman->goal_irq_on) {
 283			fman->goal_irq_on = false;
 284			vmw_goal_waiter_remove(fman->dev_priv);
 285		}
 286		mutex_unlock(&fman->goal_irq_mutex);
 287
 288		if (list_empty(&list))
 289			return;
 290
 291		/*
 292		 * At this point, only we should be able to manipulate the
 293		 * list heads of the actions we have on the private list.
 294		 * hence fman::lock not held.
 295		 */
 296
 297		list_for_each_entry_safe(action, next_action, &list, head) {
 298			list_del_init(&action->head);
 299			if (action->cleanup)
 300				action->cleanup(action);
 301		}
 302	} while (1);
 303}
 304
 305struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 306{
 307	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
 308
 309	if (unlikely(!fman))
 310		return NULL;
 311
 312	fman->dev_priv = dev_priv;
 313	spin_lock_init(&fman->lock);
 314	INIT_LIST_HEAD(&fman->fence_list);
 315	INIT_LIST_HEAD(&fman->cleanup_list);
 316	INIT_WORK(&fman->work, &vmw_fence_work_func);
 317	fman->fifo_down = true;
 318	mutex_init(&fman->goal_irq_mutex);
 319	fman->ctx = dma_fence_context_alloc(1);
 320
 321	return fman;
 322}
 323
 324void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
 325{
 326	bool lists_empty;
 327
 328	(void) cancel_work_sync(&fman->work);
 329
 330	spin_lock(&fman->lock);
 331	lists_empty = list_empty(&fman->fence_list) &&
 332		list_empty(&fman->cleanup_list);
 333	spin_unlock(&fman->lock);
 334
 335	BUG_ON(!lists_empty);
 336	kfree(fman);
 337}
 338
 339static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 340			      struct vmw_fence_obj *fence, u32 seqno,
 341			      void (*destroy) (struct vmw_fence_obj *fence))
 342{
 343	int ret = 0;
 344
 345	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
 346		       fman->ctx, seqno);
 347	INIT_LIST_HEAD(&fence->seq_passed_actions);
 348	fence->destroy = destroy;
 349
 350	spin_lock(&fman->lock);
 351	if (unlikely(fman->fifo_down)) {
 352		ret = -EBUSY;
 353		goto out_unlock;
 354	}
 355	list_add_tail(&fence->head, &fman->fence_list);
 
 356
 357out_unlock:
 358	spin_unlock(&fman->lock);
 359	return ret;
 360
 361}
 362
 363static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
 364				struct list_head *list)
 365{
 366	struct vmw_fence_action *action, *next_action;
 367
 368	list_for_each_entry_safe(action, next_action, list, head) {
 369		list_del_init(&action->head);
 370		fman->pending_actions[action->type]--;
 371		if (action->seq_passed != NULL)
 372			action->seq_passed(action);
 373
 374		/*
 375		 * Add the cleanup action to the cleanup list so that
 376		 * it will be performed by a worker task.
 377		 */
 378
 379		list_add_tail(&action->head, &fman->cleanup_list);
 380	}
 381}
 382
 383/**
 384 * vmw_fence_goal_new_locked - Figure out a new device fence goal
 385 * seqno if needed.
 386 *
 387 * @fman: Pointer to a fence manager.
 388 * @passed_seqno: The seqno the device currently signals as passed.
 389 *
 390 * This function should be called with the fence manager lock held.
 391 * It is typically called when we have a new passed_seqno, and
 392 * we might need to update the fence goal. It checks to see whether
 393 * the current fence goal has already passed, and, in that case,
 394 * scans through all unsignaled fences to get the next fence object with an
 395 * action attached, and sets the seqno of that fence as a new fence goal.
 396 *
 397 * returns true if the device goal seqno was updated. False otherwise.
 398 */
 399static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
 400				      u32 passed_seqno)
 401{
 402	u32 goal_seqno;
 403	struct vmw_fence_obj *fence, *next_fence;
 404
 405	if (likely(!fman->seqno_valid))
 406		return false;
 407
 408	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
 409	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
 410		return false;
 411
 412	fman->seqno_valid = false;
 413	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
 414		if (!list_empty(&fence->seq_passed_actions)) {
 415			fman->seqno_valid = true;
 416			vmw_fence_goal_write(fman->dev_priv,
 417					     fence->base.seqno);
 418			break;
 419		}
 420	}
 421
 422	return true;
 423}
 424
 425
 426/**
 427 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
 428 * needed.
 429 *
 430 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
 431 * considered as a device fence goal.
 432 *
 433 * This function should be called with the fence manager lock held.
 434 * It is typically called when an action has been attached to a fence to
 435 * check whether the seqno of that fence should be used for a fence
 436 * goal interrupt. This is typically needed if the current fence goal is
 437 * invalid, or has a higher seqno than that of the current fence object.
 438 *
 439 * returns true if the device goal seqno was updated. False otherwise.
 440 */
 441static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 442{
 443	struct vmw_fence_manager *fman = fman_from_fence(fence);
 444	u32 goal_seqno;
 445
 446	if (dma_fence_is_signaled_locked(&fence->base))
 447		return false;
 448
 449	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
 450	if (likely(fman->seqno_valid &&
 451		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
 452		return false;
 453
 454	vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
 455	fman->seqno_valid = true;
 456
 457	return true;
 458}
 459
 460static void __vmw_fences_update(struct vmw_fence_manager *fman)
 461{
 462	struct vmw_fence_obj *fence, *next_fence;
 463	struct list_head action_list;
 464	bool needs_rerun;
 465	uint32_t seqno, new_seqno;
 466
 467	seqno = vmw_fence_read(fman->dev_priv);
 468rerun:
 469	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
 470		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
 471			list_del_init(&fence->head);
 472			dma_fence_signal_locked(&fence->base);
 473			INIT_LIST_HEAD(&action_list);
 474			list_splice_init(&fence->seq_passed_actions,
 475					 &action_list);
 476			vmw_fences_perform_actions(fman, &action_list);
 477		} else
 478			break;
 479	}
 480
 481	/*
 482	 * Rerun if the fence goal seqno was updated, and the
 483	 * hardware might have raced with that update, so that
 484	 * we missed a fence_goal irq.
 485	 */
 486
 487	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
 488	if (unlikely(needs_rerun)) {
 489		new_seqno = vmw_fence_read(fman->dev_priv);
 490		if (new_seqno != seqno) {
 491			seqno = new_seqno;
 492			goto rerun;
 493		}
 494	}
 495
 496	if (!list_empty(&fman->cleanup_list))
 497		(void) schedule_work(&fman->work);
 498}
 499
 500void vmw_fences_update(struct vmw_fence_manager *fman)
 501{
 502	spin_lock(&fman->lock);
 503	__vmw_fences_update(fman);
 504	spin_unlock(&fman->lock);
 505}
 506
 507bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
 508{
 509	struct vmw_fence_manager *fman = fman_from_fence(fence);
 510
 511	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 512		return true;
 513
 514	vmw_fences_update(fman);
 515
 516	return dma_fence_is_signaled(&fence->base);
 517}
 518
 519int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
 520		       bool interruptible, unsigned long timeout)
 521{
 522	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
 523
 524	if (likely(ret > 0))
 525		return 0;
 526	else if (ret == 0)
 527		return -EBUSY;
 528	else
 529		return ret;
 530}
 531
 532static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 533{
 534	dma_fence_free(&fence->base);
 535}
 536
 537int vmw_fence_create(struct vmw_fence_manager *fman,
 538		     uint32_t seqno,
 539		     struct vmw_fence_obj **p_fence)
 540{
 541	struct vmw_fence_obj *fence;
 542 	int ret;
 543
 544	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
 545	if (unlikely(!fence))
 546		return -ENOMEM;
 547
 548	ret = vmw_fence_obj_init(fman, fence, seqno,
 549				 vmw_fence_destroy);
 550	if (unlikely(ret != 0))
 551		goto out_err_init;
 552
 553	*p_fence = fence;
 554	return 0;
 555
 556out_err_init:
 557	kfree(fence);
 558	return ret;
 559}
 560
 561
 562static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
 563{
 564	struct vmw_user_fence *ufence =
 565		container_of(fence, struct vmw_user_fence, fence);
 566
 567	ttm_base_object_kfree(ufence, base);
 568}
 569
 570static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
 571{
 572	struct ttm_base_object *base = *p_base;
 573	struct vmw_user_fence *ufence =
 574		container_of(base, struct vmw_user_fence, base);
 575	struct vmw_fence_obj *fence = &ufence->fence;
 576
 577	*p_base = NULL;
 578	vmw_fence_obj_unreference(&fence);
 579}
 580
 581int vmw_user_fence_create(struct drm_file *file_priv,
 582			  struct vmw_fence_manager *fman,
 583			  uint32_t seqno,
 584			  struct vmw_fence_obj **p_fence,
 585			  uint32_t *p_handle)
 586{
 587	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 588	struct vmw_user_fence *ufence;
 589	struct vmw_fence_obj *tmp;
 590	int ret;
 591
 592	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
 593	if (unlikely(!ufence)) {
 594		ret = -ENOMEM;
 595		goto out_no_object;
 596	}
 597
 598	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
 599				 vmw_user_fence_destroy);
 600	if (unlikely(ret != 0)) {
 601		kfree(ufence);
 602		goto out_no_object;
 603	}
 604
 605	/*
 606	 * The base object holds a reference which is freed in
 607	 * vmw_user_fence_base_release.
 608	 */
 609	tmp = vmw_fence_obj_reference(&ufence->fence);
 610
 611	ret = ttm_base_object_init(tfile, &ufence->base, false,
 612				   VMW_RES_FENCE,
 613				   &vmw_user_fence_base_release);
 614
 615
 616	if (unlikely(ret != 0)) {
 617		/*
 618		 * Free the base object's reference
 619		 */
 620		vmw_fence_obj_unreference(&tmp);
 621		goto out_err;
 622	}
 623
 624	*p_fence = &ufence->fence;
 625	*p_handle = ufence->base.handle;
 626
 627	return 0;
 628out_err:
 629	tmp = &ufence->fence;
 630	vmw_fence_obj_unreference(&tmp);
 631out_no_object:
 632	return ret;
 633}
 634
 635/*
 636 * vmw_fence_fifo_down - signal all unsignaled fence objects.
 637 */
 638
 639void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 640{
 641	struct list_head action_list;
 642	int ret;
 643
 644	/*
 645	 * The list may be altered while we traverse it, so always
 646	 * restart when we've released the fman->lock.
 647	 */
 648
 649	spin_lock(&fman->lock);
 650	fman->fifo_down = true;
 651	while (!list_empty(&fman->fence_list)) {
 652		struct vmw_fence_obj *fence =
 653			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
 654				   head);
 655		dma_fence_get(&fence->base);
 656		spin_unlock(&fman->lock);
 657
 658		ret = vmw_fence_obj_wait(fence, false, false,
 659					 VMW_FENCE_WAIT_TIMEOUT);
 660
 661		if (unlikely(ret != 0)) {
 662			list_del_init(&fence->head);
 663			dma_fence_signal(&fence->base);
 664			INIT_LIST_HEAD(&action_list);
 665			list_splice_init(&fence->seq_passed_actions,
 666					 &action_list);
 667			vmw_fences_perform_actions(fman, &action_list);
 668		}
 669
 670		BUG_ON(!list_empty(&fence->head));
 671		dma_fence_put(&fence->base);
 672		spin_lock(&fman->lock);
 673	}
 674	spin_unlock(&fman->lock);
 675}
 676
 677void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 678{
 679	spin_lock(&fman->lock);
 680	fman->fifo_down = false;
 681	spin_unlock(&fman->lock);
 682}
 683
 684
 685/**
 686 * vmw_fence_obj_lookup - Look up a user-space fence object
 687 *
 688 * @tfile: A struct ttm_object_file identifying the caller.
 689 * @handle: A handle identifying the fence object.
 690 * @return: A struct vmw_user_fence base ttm object on success or
 691 * an error pointer on failure.
 692 *
 693 * The fence object is looked up and type-checked. The caller needs
 694 * to have opened the fence object first, but since that happens on
 695 * creation and fence objects aren't shareable, that's not an
 696 * issue currently.
 697 */
 698static struct ttm_base_object *
 699vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
 700{
 701	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
 702
 703	if (!base) {
 704		pr_err("Invalid fence object handle 0x%08lx.\n",
 705		       (unsigned long)handle);
 706		return ERR_PTR(-EINVAL);
 707	}
 708
 709	if (base->refcount_release != vmw_user_fence_base_release) {
 710		pr_err("Invalid fence object handle 0x%08lx.\n",
 711		       (unsigned long)handle);
 712		ttm_base_object_unref(&base);
 713		return ERR_PTR(-EINVAL);
 714	}
 715
 716	return base;
 717}
 718
 719
 720int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
 721			     struct drm_file *file_priv)
 722{
 723	struct drm_vmw_fence_wait_arg *arg =
 724	    (struct drm_vmw_fence_wait_arg *)data;
 725	unsigned long timeout;
 726	struct ttm_base_object *base;
 727	struct vmw_fence_obj *fence;
 728	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 729	int ret;
 730	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
 731
 732	/*
 733	 * 64-bit division not present on 32-bit systems, so do an
 734	 * approximation. (Divide by 1000000).
 735	 */
 736
 737	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
 738	  (wait_timeout >> 26);
 739
 740	if (!arg->cookie_valid) {
 741		arg->cookie_valid = 1;
 742		arg->kernel_cookie = jiffies + wait_timeout;
 743	}
 744
 745	base = vmw_fence_obj_lookup(tfile, arg->handle);
 746	if (IS_ERR(base))
 747		return PTR_ERR(base);
 748
 749	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 750
 751	timeout = jiffies;
 752	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
 753		ret = ((vmw_fence_obj_signaled(fence)) ?
 754		       0 : -EBUSY);
 755		goto out;
 756	}
 757
 758	timeout = (unsigned long)arg->kernel_cookie - timeout;
 759
 760	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
 761
 762out:
 763	ttm_base_object_unref(&base);
 764
 765	/*
 766	 * Optionally unref the fence object.
 767	 */
 768
 769	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
 770		return ttm_ref_object_base_unref(tfile, arg->handle);
 771	return ret;
 772}
 773
 774int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
 775				 struct drm_file *file_priv)
 776{
 777	struct drm_vmw_fence_signaled_arg *arg =
 778		(struct drm_vmw_fence_signaled_arg *) data;
 779	struct ttm_base_object *base;
 780	struct vmw_fence_obj *fence;
 781	struct vmw_fence_manager *fman;
 782	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 783	struct vmw_private *dev_priv = vmw_priv(dev);
 784
 785	base = vmw_fence_obj_lookup(tfile, arg->handle);
 786	if (IS_ERR(base))
 787		return PTR_ERR(base);
 788
 789	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 790	fman = fman_from_fence(fence);
 791
 792	arg->signaled = vmw_fence_obj_signaled(fence);
 793
 794	arg->signaled_flags = arg->flags;
 795	spin_lock(&fman->lock);
 796	arg->passed_seqno = dev_priv->last_read_seqno;
 797	spin_unlock(&fman->lock);
 798
 799	ttm_base_object_unref(&base);
 800
 801	return 0;
 802}
 803
 804
 805int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
 806			      struct drm_file *file_priv)
 807{
 808	struct drm_vmw_fence_arg *arg =
 809		(struct drm_vmw_fence_arg *) data;
 810
 811	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 812					 arg->handle);
 813}
 814
 815/**
 816 * vmw_event_fence_action_seq_passed
 817 *
 818 * @action: The struct vmw_fence_action embedded in a struct
 819 * vmw_event_fence_action.
 820 *
 821 * This function is called when the seqno of the fence where @action is
 822 * attached has passed. It queues the event on the submitter's event list.
 823 * This function is always called from atomic context.
 824 */
 825static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 826{
 827	struct vmw_event_fence_action *eaction =
 828		container_of(action, struct vmw_event_fence_action, action);
 829	struct drm_device *dev = eaction->dev;
 830	struct drm_pending_event *event = eaction->event;
 831
 832	if (unlikely(event == NULL))
 833		return;
 834
 835	spin_lock_irq(&dev->event_lock);
 836
 837	if (likely(eaction->tv_sec != NULL)) {
 838		struct timespec64 ts;
 839
 840		ktime_get_ts64(&ts);
 841		/* monotonic time, so no y2038 overflow */
 842		*eaction->tv_sec = ts.tv_sec;
 843		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
 844	}
 845
 846	drm_send_event_locked(dev, eaction->event);
 847	eaction->event = NULL;
 848	spin_unlock_irq(&dev->event_lock);
 849}
 850
 851/**
 852 * vmw_event_fence_action_cleanup
 853 *
 854 * @action: The struct vmw_fence_action embedded in a struct
 855 * vmw_event_fence_action.
 856 *
 857 * This function is the struct vmw_fence_action destructor. It's typically
 858 * called from a workqueue.
 859 */
 860static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
 861{
 862	struct vmw_event_fence_action *eaction =
 863		container_of(action, struct vmw_event_fence_action, action);
 864
 865	vmw_fence_obj_unreference(&eaction->fence);
 866	kfree(eaction);
 867}
 868
 869
 870/**
 871 * vmw_fence_obj_add_action - Add an action to a fence object.
 872 *
 873 * @fence: The fence object.
 874 * @action: The action to add.
 875 *
 876 * Note that the action callbacks may be executed before this function
 877 * returns.
 878 */
 879static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 880			      struct vmw_fence_action *action)
 881{
 882	struct vmw_fence_manager *fman = fman_from_fence(fence);
 883	bool run_update = false;
 884
 885	mutex_lock(&fman->goal_irq_mutex);
 886	spin_lock(&fman->lock);
 887
 888	fman->pending_actions[action->type]++;
 889	if (dma_fence_is_signaled_locked(&fence->base)) {
 890		struct list_head action_list;
 891
 892		INIT_LIST_HEAD(&action_list);
 893		list_add_tail(&action->head, &action_list);
 894		vmw_fences_perform_actions(fman, &action_list);
 895	} else {
 896		list_add_tail(&action->head, &fence->seq_passed_actions);
 897
 898		/*
 899		 * This function may set fman::seqno_valid, so it must
 900		 * be run with the goal_irq_mutex held.
 901		 */
 902		run_update = vmw_fence_goal_check_locked(fence);
 903	}
 904
 905	spin_unlock(&fman->lock);
 906
 907	if (run_update) {
 908		if (!fman->goal_irq_on) {
 909			fman->goal_irq_on = true;
 910			vmw_goal_waiter_add(fman->dev_priv);
 911		}
 912		vmw_fences_update(fman);
 913	}
 914	mutex_unlock(&fman->goal_irq_mutex);
 915
 916}
 917
 918/**
 919 * vmw_event_fence_action_queue - Post an event for sending when a fence
 920 * object seqno has passed.
 921 *
 922 * @file_priv: The file connection on which the event should be posted.
 923 * @fence: The fence object on which to post the event.
 924 * @event: Event to be posted. This event should've been alloced
 925 * using k[mz]alloc, and should've been completely initialized.
 926 * @tv_sec: If non-null, the variable pointed to will be assigned
 927 * current time tv_sec val when the fence signals.
 928 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
 929 * be assigned the current time tv_usec val when the fence signals.
 930 * @interruptible: Interruptible waits if possible.
 931 *
 932 * As a side effect, the object pointed to by @event may have been
 933 * freed when this function returns. If this function returns with
 934 * an error code, the caller needs to free that object.
 935 */
 936
 937int vmw_event_fence_action_queue(struct drm_file *file_priv,
 938				 struct vmw_fence_obj *fence,
 939				 struct drm_pending_event *event,
 940				 uint32_t *tv_sec,
 941				 uint32_t *tv_usec,
 942				 bool interruptible)
 943{
 944	struct vmw_event_fence_action *eaction;
 945	struct vmw_fence_manager *fman = fman_from_fence(fence);
 946
 947	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
 948	if (unlikely(!eaction))
 949		return -ENOMEM;
 950
 951	eaction->event = event;
 952
 953	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
 954	eaction->action.cleanup = vmw_event_fence_action_cleanup;
 955	eaction->action.type = VMW_ACTION_EVENT;
 956
 957	eaction->fence = vmw_fence_obj_reference(fence);
 958	eaction->dev = &fman->dev_priv->drm;
 959	eaction->tv_sec = tv_sec;
 960	eaction->tv_usec = tv_usec;
 961
 962	vmw_fence_obj_add_action(fence, &eaction->action);
 963
 964	return 0;
 965}
 966
 967struct vmw_event_fence_pending {
 968	struct drm_pending_event base;
 969	struct drm_vmw_event_fence event;
 970};
 971
 972static int vmw_event_fence_action_create(struct drm_file *file_priv,
 973				  struct vmw_fence_obj *fence,
 974				  uint32_t flags,
 975				  uint64_t user_data,
 976				  bool interruptible)
 977{
 978	struct vmw_event_fence_pending *event;
 979	struct vmw_fence_manager *fman = fman_from_fence(fence);
 980	struct drm_device *dev = &fman->dev_priv->drm;
 981	int ret;
 982
 983	event = kzalloc(sizeof(*event), GFP_KERNEL);
 984	if (unlikely(!event)) {
 985		DRM_ERROR("Failed to allocate an event.\n");
 986		ret = -ENOMEM;
 987		goto out_no_space;
 988	}
 989
 990	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
 991	event->event.base.length = sizeof(event->event);
 992	event->event.user_data = user_data;
 993
 994	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
 995
 996	if (unlikely(ret != 0)) {
 997		DRM_ERROR("Failed to allocate event space for this file.\n");
 998		kfree(event);
 999		goto out_no_space;
1000	}
1001
1002	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1003		ret = vmw_event_fence_action_queue(file_priv, fence,
1004						   &event->base,
1005						   &event->event.tv_sec,
1006						   &event->event.tv_usec,
1007						   interruptible);
1008	else
1009		ret = vmw_event_fence_action_queue(file_priv, fence,
1010						   &event->base,
1011						   NULL,
1012						   NULL,
1013						   interruptible);
1014	if (ret != 0)
1015		goto out_no_queue;
1016
1017	return 0;
1018
1019out_no_queue:
1020	drm_event_cancel_free(dev, &event->base);
1021out_no_space:
1022	return ret;
1023}
1024
1025int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1026			  struct drm_file *file_priv)
1027{
1028	struct vmw_private *dev_priv = vmw_priv(dev);
1029	struct drm_vmw_fence_event_arg *arg =
1030		(struct drm_vmw_fence_event_arg *) data;
1031	struct vmw_fence_obj *fence = NULL;
1032	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1033	struct ttm_object_file *tfile = vmw_fp->tfile;
1034	struct drm_vmw_fence_rep __user *user_fence_rep =
1035		(struct drm_vmw_fence_rep __user *)(unsigned long)
1036		arg->fence_rep;
1037	uint32_t handle;
1038	int ret;
1039
1040	/*
1041	 * Look up an existing fence object,
1042	 * and if user-space wants a new reference,
1043	 * add one.
1044	 */
1045	if (arg->handle) {
1046		struct ttm_base_object *base =
1047			vmw_fence_obj_lookup(tfile, arg->handle);
1048
1049		if (IS_ERR(base))
1050			return PTR_ERR(base);
1051
1052		fence = &(container_of(base, struct vmw_user_fence,
1053				       base)->fence);
1054		(void) vmw_fence_obj_reference(fence);
1055
1056		if (user_fence_rep != NULL) {
1057			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1058						 NULL, false);
1059			if (unlikely(ret != 0)) {
1060				DRM_ERROR("Failed to reference a fence "
1061					  "object.\n");
1062				goto out_no_ref_obj;
1063			}
1064			handle = base->handle;
1065		}
1066		ttm_base_object_unref(&base);
1067	}
1068
1069	/*
1070	 * Create a new fence object.
1071	 */
1072	if (!fence) {
1073		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1074						 &fence,
1075						 (user_fence_rep) ?
1076						 &handle : NULL);
1077		if (unlikely(ret != 0)) {
1078			DRM_ERROR("Fence event failed to create fence.\n");
1079			return ret;
1080		}
1081	}
1082
1083	BUG_ON(fence == NULL);
1084
1085	ret = vmw_event_fence_action_create(file_priv, fence,
1086					    arg->flags,
1087					    arg->user_data,
1088					    true);
1089	if (unlikely(ret != 0)) {
1090		if (ret != -ERESTARTSYS)
1091			DRM_ERROR("Failed to attach event to fence.\n");
1092		goto out_no_create;
1093	}
1094
1095	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1096				    handle, -1);
1097	vmw_fence_obj_unreference(&fence);
1098	return 0;
1099out_no_create:
1100	if (user_fence_rep != NULL)
1101		ttm_ref_object_base_unref(tfile, handle);
1102out_no_ref_obj:
1103	vmw_fence_obj_unreference(&fence);
1104	return ret;
1105}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <linux/sched/signal.h>
  29
  30#include "vmwgfx_drv.h"
  31
  32#define VMW_FENCE_WRAP (1 << 31)
  33
  34struct vmw_fence_manager {
  35	int num_fence_objects;
  36	struct vmw_private *dev_priv;
  37	spinlock_t lock;
  38	struct list_head fence_list;
  39	struct work_struct work;
  40	bool fifo_down;
  41	struct list_head cleanup_list;
  42	uint32_t pending_actions[VMW_ACTION_MAX];
  43	struct mutex goal_irq_mutex;
  44	bool goal_irq_on; /* Protected by @goal_irq_mutex */
  45	bool seqno_valid; /* Protected by @lock, and may not be set to true
  46			     without the @goal_irq_mutex held. */
  47	u64 ctx;
  48};
  49
  50struct vmw_user_fence {
  51	struct ttm_base_object base;
  52	struct vmw_fence_obj fence;
  53};
  54
  55/**
  56 * struct vmw_event_fence_action - fence action that delivers a drm event.
  57 *
  58 * @action: A struct vmw_fence_action to hook up to a fence.
  59 * @event: A pointer to the pending event.
  60 * @fence: A referenced pointer to the fence to keep it alive while @action
  61 * hangs on it.
  62 * @dev: Pointer to a struct drm_device so we can access the event stuff.
  63 * @tv_sec: If non-null, the variable pointed to will be assigned
  64 * current time tv_sec val when the fence signals.
  65 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
  66 * be assigned the current time tv_usec val when the fence signals.
  67 */
  68struct vmw_event_fence_action {
  69	struct vmw_fence_action action;
  70
  71	struct drm_pending_event *event;
  72	struct vmw_fence_obj *fence;
  73	struct drm_device *dev;
  74
  75	uint32_t *tv_sec;
  76	uint32_t *tv_usec;
  77};
  78
  79static struct vmw_fence_manager *
  80fman_from_fence(struct vmw_fence_obj *fence)
  81{
  82	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
  83}
  84
  85static u32 vmw_fence_goal_read(struct vmw_private *vmw)
  86{
  87	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
  88		return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
  89	else
  90		return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
  91}
  92
  93static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
  94{
  95	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
  96		vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
  97	else
  98		vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
  99}
 100
 101/*
 102 * Note on fencing subsystem usage of irqs:
 103 * Typically the vmw_fences_update function is called
 104 *
 105 * a) When a new fence seqno has been submitted by the fifo code.
 106 * b) On-demand when we have waiters. Sleeping waiters will switch on the
 107 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
 108 * irq is received. When the last fence waiter is gone, that IRQ is masked
 109 * away.
 110 *
 111 * In situations where there are no waiters and we don't submit any new fences,
 112 * fence objects may not be signaled. This is perfectly OK, since there are
 113 * no consumers of the signaled data, but that is NOT ok when there are fence
 114 * actions attached to a fence. The fencing subsystem then makes use of the
 115 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
 116 * which has an action attached, and each time vmw_fences_update is called,
 117 * the subsystem makes sure the fence goal seqno is updated.
 118 *
 119 * The fence goal seqno irq is on as long as there are unsignaled fence
 120 * objects with actions attached to them.
 121 */
 122
 123static void vmw_fence_obj_destroy(struct dma_fence *f)
 124{
 125	struct vmw_fence_obj *fence =
 126		container_of(f, struct vmw_fence_obj, base);
 127
 128	struct vmw_fence_manager *fman = fman_from_fence(fence);
 129
 130	spin_lock(&fman->lock);
 131	list_del_init(&fence->head);
 132	--fman->num_fence_objects;
 133	spin_unlock(&fman->lock);
 
 134	fence->destroy(fence);
 135}
 136
 137static const char *vmw_fence_get_driver_name(struct dma_fence *f)
 138{
 139	return "vmwgfx";
 140}
 141
 142static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
 143{
 144	return "svga";
 145}
 146
 147static bool vmw_fence_enable_signaling(struct dma_fence *f)
 148{
 149	struct vmw_fence_obj *fence =
 150		container_of(f, struct vmw_fence_obj, base);
 151
 152	struct vmw_fence_manager *fman = fman_from_fence(fence);
 153	struct vmw_private *dev_priv = fman->dev_priv;
 154
 155	u32 seqno = vmw_fence_read(dev_priv);
 156	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 157		return false;
 158
 159	return true;
 160}
 161
 162struct vmwgfx_wait_cb {
 163	struct dma_fence_cb base;
 164	struct task_struct *task;
 165};
 166
 167static void
 168vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 169{
 170	struct vmwgfx_wait_cb *wait =
 171		container_of(cb, struct vmwgfx_wait_cb, base);
 172
 173	wake_up_process(wait->task);
 174}
 175
 176static void __vmw_fences_update(struct vmw_fence_manager *fman);
 177
 178static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
 179{
 180	struct vmw_fence_obj *fence =
 181		container_of(f, struct vmw_fence_obj, base);
 182
 183	struct vmw_fence_manager *fman = fman_from_fence(fence);
 184	struct vmw_private *dev_priv = fman->dev_priv;
 185	struct vmwgfx_wait_cb cb;
 186	long ret = timeout;
 187
 188	if (likely(vmw_fence_obj_signaled(fence)))
 189		return timeout;
 190
 191	vmw_seqno_waiter_add(dev_priv);
 192
 193	spin_lock(f->lock);
 194
 195	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
 196		goto out;
 197
 198	if (intr && signal_pending(current)) {
 199		ret = -ERESTARTSYS;
 200		goto out;
 201	}
 202
 203	cb.base.func = vmwgfx_wait_cb;
 204	cb.task = current;
 205	list_add(&cb.base.node, &f->cb_list);
 206
 207	for (;;) {
 208		__vmw_fences_update(fman);
 209
 210		/*
 211		 * We can use the barrier free __set_current_state() since
 212		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
 213		 * fence spinlock.
 214		 */
 215		if (intr)
 216			__set_current_state(TASK_INTERRUPTIBLE);
 217		else
 218			__set_current_state(TASK_UNINTERRUPTIBLE);
 219
 220		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
 221			if (ret == 0 && timeout > 0)
 222				ret = 1;
 223			break;
 224		}
 225
 226		if (intr && signal_pending(current)) {
 227			ret = -ERESTARTSYS;
 228			break;
 229		}
 230
 231		if (ret == 0)
 232			break;
 233
 234		spin_unlock(f->lock);
 235
 236		ret = schedule_timeout(ret);
 237
 238		spin_lock(f->lock);
 239	}
 240	__set_current_state(TASK_RUNNING);
 241	if (!list_empty(&cb.base.node))
 242		list_del(&cb.base.node);
 243
 244out:
 245	spin_unlock(f->lock);
 246
 247	vmw_seqno_waiter_remove(dev_priv);
 248
 249	return ret;
 250}
 251
 252static const struct dma_fence_ops vmw_fence_ops = {
 253	.get_driver_name = vmw_fence_get_driver_name,
 254	.get_timeline_name = vmw_fence_get_timeline_name,
 255	.enable_signaling = vmw_fence_enable_signaling,
 256	.wait = vmw_fence_wait,
 257	.release = vmw_fence_obj_destroy,
 258};
 259
 260
 261/*
 262 * Execute signal actions on fences recently signaled.
 263 * This is done from a workqueue so we don't have to execute
 264 * signal actions from atomic context.
 265 */
 266
 267static void vmw_fence_work_func(struct work_struct *work)
 268{
 269	struct vmw_fence_manager *fman =
 270		container_of(work, struct vmw_fence_manager, work);
 271	struct list_head list;
 272	struct vmw_fence_action *action, *next_action;
 273	bool seqno_valid;
 274
 275	do {
 276		INIT_LIST_HEAD(&list);
 277		mutex_lock(&fman->goal_irq_mutex);
 278
 279		spin_lock(&fman->lock);
 280		list_splice_init(&fman->cleanup_list, &list);
 281		seqno_valid = fman->seqno_valid;
 282		spin_unlock(&fman->lock);
 283
 284		if (!seqno_valid && fman->goal_irq_on) {
 285			fman->goal_irq_on = false;
 286			vmw_goal_waiter_remove(fman->dev_priv);
 287		}
 288		mutex_unlock(&fman->goal_irq_mutex);
 289
 290		if (list_empty(&list))
 291			return;
 292
 293		/*
 294		 * At this point, only we should be able to manipulate the
 295		 * list heads of the actions we have on the private list.
 296		 * hence fman::lock not held.
 297		 */
 298
 299		list_for_each_entry_safe(action, next_action, &list, head) {
 300			list_del_init(&action->head);
 301			if (action->cleanup)
 302				action->cleanup(action);
 303		}
 304	} while (1);
 305}
 306
 307struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 308{
 309	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
 310
 311	if (unlikely(!fman))
 312		return NULL;
 313
 314	fman->dev_priv = dev_priv;
 315	spin_lock_init(&fman->lock);
 316	INIT_LIST_HEAD(&fman->fence_list);
 317	INIT_LIST_HEAD(&fman->cleanup_list);
 318	INIT_WORK(&fman->work, &vmw_fence_work_func);
 319	fman->fifo_down = true;
 320	mutex_init(&fman->goal_irq_mutex);
 321	fman->ctx = dma_fence_context_alloc(1);
 322
 323	return fman;
 324}
 325
 326void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
 327{
 328	bool lists_empty;
 329
 330	(void) cancel_work_sync(&fman->work);
 331
 332	spin_lock(&fman->lock);
 333	lists_empty = list_empty(&fman->fence_list) &&
 334		list_empty(&fman->cleanup_list);
 335	spin_unlock(&fman->lock);
 336
 337	BUG_ON(!lists_empty);
 338	kfree(fman);
 339}
 340
 341static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 342			      struct vmw_fence_obj *fence, u32 seqno,
 343			      void (*destroy) (struct vmw_fence_obj *fence))
 344{
 345	int ret = 0;
 346
 347	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
 348		       fman->ctx, seqno);
 349	INIT_LIST_HEAD(&fence->seq_passed_actions);
 350	fence->destroy = destroy;
 351
 352	spin_lock(&fman->lock);
 353	if (unlikely(fman->fifo_down)) {
 354		ret = -EBUSY;
 355		goto out_unlock;
 356	}
 357	list_add_tail(&fence->head, &fman->fence_list);
 358	++fman->num_fence_objects;
 359
 360out_unlock:
 361	spin_unlock(&fman->lock);
 362	return ret;
 363
 364}
 365
 366static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
 367				struct list_head *list)
 368{
 369	struct vmw_fence_action *action, *next_action;
 370
 371	list_for_each_entry_safe(action, next_action, list, head) {
 372		list_del_init(&action->head);
 373		fman->pending_actions[action->type]--;
 374		if (action->seq_passed != NULL)
 375			action->seq_passed(action);
 376
 377		/*
 378		 * Add the cleanup action to the cleanup list so that
 379		 * it will be performed by a worker task.
 380		 */
 381
 382		list_add_tail(&action->head, &fman->cleanup_list);
 383	}
 384}
 385
 386/**
 387 * vmw_fence_goal_new_locked - Figure out a new device fence goal
 388 * seqno if needed.
 389 *
 390 * @fman: Pointer to a fence manager.
 391 * @passed_seqno: The seqno the device currently signals as passed.
 392 *
 393 * This function should be called with the fence manager lock held.
 394 * It is typically called when we have a new passed_seqno, and
 395 * we might need to update the fence goal. It checks to see whether
 396 * the current fence goal has already passed, and, in that case,
 397 * scans through all unsignaled fences to get the next fence object with an
 398 * action attached, and sets the seqno of that fence as a new fence goal.
 399 *
 400 * returns true if the device goal seqno was updated. False otherwise.
 401 */
 402static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
 403				      u32 passed_seqno)
 404{
 405	u32 goal_seqno;
 406	struct vmw_fence_obj *fence;
 407
 408	if (likely(!fman->seqno_valid))
 409		return false;
 410
 411	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
 412	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
 413		return false;
 414
 415	fman->seqno_valid = false;
 416	list_for_each_entry(fence, &fman->fence_list, head) {
 417		if (!list_empty(&fence->seq_passed_actions)) {
 418			fman->seqno_valid = true;
 419			vmw_fence_goal_write(fman->dev_priv,
 420					     fence->base.seqno);
 421			break;
 422		}
 423	}
 424
 425	return true;
 426}
 427
 428
 429/**
 430 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
 431 * needed.
 432 *
 433 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
 434 * considered as a device fence goal.
 435 *
 436 * This function should be called with the fence manager lock held.
 437 * It is typically called when an action has been attached to a fence to
 438 * check whether the seqno of that fence should be used for a fence
 439 * goal interrupt. This is typically needed if the current fence goal is
 440 * invalid, or has a higher seqno than that of the current fence object.
 441 *
 442 * returns true if the device goal seqno was updated. False otherwise.
 443 */
 444static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 445{
 446	struct vmw_fence_manager *fman = fman_from_fence(fence);
 447	u32 goal_seqno;
 448
 449	if (dma_fence_is_signaled_locked(&fence->base))
 450		return false;
 451
 452	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
 453	if (likely(fman->seqno_valid &&
 454		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
 455		return false;
 456
 457	vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
 458	fman->seqno_valid = true;
 459
 460	return true;
 461}
 462
 463static void __vmw_fences_update(struct vmw_fence_manager *fman)
 464{
 465	struct vmw_fence_obj *fence, *next_fence;
 466	struct list_head action_list;
 467	bool needs_rerun;
 468	uint32_t seqno, new_seqno;
 469
 470	seqno = vmw_fence_read(fman->dev_priv);
 471rerun:
 472	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
 473		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
 474			list_del_init(&fence->head);
 475			dma_fence_signal_locked(&fence->base);
 476			INIT_LIST_HEAD(&action_list);
 477			list_splice_init(&fence->seq_passed_actions,
 478					 &action_list);
 479			vmw_fences_perform_actions(fman, &action_list);
 480		} else
 481			break;
 482	}
 483
 484	/*
 485	 * Rerun if the fence goal seqno was updated, and the
 486	 * hardware might have raced with that update, so that
 487	 * we missed a fence_goal irq.
 488	 */
 489
 490	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
 491	if (unlikely(needs_rerun)) {
 492		new_seqno = vmw_fence_read(fman->dev_priv);
 493		if (new_seqno != seqno) {
 494			seqno = new_seqno;
 495			goto rerun;
 496		}
 497	}
 498
 499	if (!list_empty(&fman->cleanup_list))
 500		(void) schedule_work(&fman->work);
 501}
 502
 503void vmw_fences_update(struct vmw_fence_manager *fman)
 504{
 505	spin_lock(&fman->lock);
 506	__vmw_fences_update(fman);
 507	spin_unlock(&fman->lock);
 508}
 509
 510bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
 511{
 512	struct vmw_fence_manager *fman = fman_from_fence(fence);
 513
 514	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 515		return true;
 516
 517	vmw_fences_update(fman);
 518
 519	return dma_fence_is_signaled(&fence->base);
 520}
 521
 522int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
 523		       bool interruptible, unsigned long timeout)
 524{
 525	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
 526
 527	if (likely(ret > 0))
 528		return 0;
 529	else if (ret == 0)
 530		return -EBUSY;
 531	else
 532		return ret;
 533}
 534
 535static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 536{
 537	dma_fence_free(&fence->base);
 538}
 539
 540int vmw_fence_create(struct vmw_fence_manager *fman,
 541		     uint32_t seqno,
 542		     struct vmw_fence_obj **p_fence)
 543{
 544	struct vmw_fence_obj *fence;
 545 	int ret;
 546
 547	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
 548	if (unlikely(!fence))
 549		return -ENOMEM;
 550
 551	ret = vmw_fence_obj_init(fman, fence, seqno,
 552				 vmw_fence_destroy);
 553	if (unlikely(ret != 0))
 554		goto out_err_init;
 555
 556	*p_fence = fence;
 557	return 0;
 558
 559out_err_init:
 560	kfree(fence);
 561	return ret;
 562}
 563
 564
 565static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
 566{
 567	struct vmw_user_fence *ufence =
 568		container_of(fence, struct vmw_user_fence, fence);
 569
 570	ttm_base_object_kfree(ufence, base);
 571}
 572
 573static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
 574{
 575	struct ttm_base_object *base = *p_base;
 576	struct vmw_user_fence *ufence =
 577		container_of(base, struct vmw_user_fence, base);
 578	struct vmw_fence_obj *fence = &ufence->fence;
 579
 580	*p_base = NULL;
 581	vmw_fence_obj_unreference(&fence);
 582}
 583
 584int vmw_user_fence_create(struct drm_file *file_priv,
 585			  struct vmw_fence_manager *fman,
 586			  uint32_t seqno,
 587			  struct vmw_fence_obj **p_fence,
 588			  uint32_t *p_handle)
 589{
 590	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 591	struct vmw_user_fence *ufence;
 592	struct vmw_fence_obj *tmp;
 593	int ret;
 594
 595	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
 596	if (unlikely(!ufence)) {
 597		ret = -ENOMEM;
 598		goto out_no_object;
 599	}
 600
 601	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
 602				 vmw_user_fence_destroy);
 603	if (unlikely(ret != 0)) {
 604		kfree(ufence);
 605		goto out_no_object;
 606	}
 607
 608	/*
 609	 * The base object holds a reference which is freed in
 610	 * vmw_user_fence_base_release.
 611	 */
 612	tmp = vmw_fence_obj_reference(&ufence->fence);
 613
 614	ret = ttm_base_object_init(tfile, &ufence->base, false,
 615				   VMW_RES_FENCE,
 616				   &vmw_user_fence_base_release);
 617
 618
 619	if (unlikely(ret != 0)) {
 620		/*
 621		 * Free the base object's reference
 622		 */
 623		vmw_fence_obj_unreference(&tmp);
 624		goto out_err;
 625	}
 626
 627	*p_fence = &ufence->fence;
 628	*p_handle = ufence->base.handle;
 629
 630	return 0;
 631out_err:
 632	tmp = &ufence->fence;
 633	vmw_fence_obj_unreference(&tmp);
 634out_no_object:
 635	return ret;
 636}
 637
 638/*
 639 * vmw_fence_fifo_down - signal all unsignaled fence objects.
 640 */
 641
 642void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 643{
 644	struct list_head action_list;
 645	int ret;
 646
 647	/*
 648	 * The list may be altered while we traverse it, so always
 649	 * restart when we've released the fman->lock.
 650	 */
 651
 652	spin_lock(&fman->lock);
 653	fman->fifo_down = true;
 654	while (!list_empty(&fman->fence_list)) {
 655		struct vmw_fence_obj *fence =
 656			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
 657				   head);
 658		dma_fence_get(&fence->base);
 659		spin_unlock(&fman->lock);
 660
 661		ret = vmw_fence_obj_wait(fence, false, false,
 662					 VMW_FENCE_WAIT_TIMEOUT);
 663
 664		if (unlikely(ret != 0)) {
 665			list_del_init(&fence->head);
 666			dma_fence_signal(&fence->base);
 667			INIT_LIST_HEAD(&action_list);
 668			list_splice_init(&fence->seq_passed_actions,
 669					 &action_list);
 670			vmw_fences_perform_actions(fman, &action_list);
 671		}
 672
 673		BUG_ON(!list_empty(&fence->head));
 674		dma_fence_put(&fence->base);
 675		spin_lock(&fman->lock);
 676	}
 677	spin_unlock(&fman->lock);
 678}
 679
 680void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 681{
 682	spin_lock(&fman->lock);
 683	fman->fifo_down = false;
 684	spin_unlock(&fman->lock);
 685}
 686
 687
 688/**
 689 * vmw_fence_obj_lookup - Look up a user-space fence object
 690 *
 691 * @tfile: A struct ttm_object_file identifying the caller.
 692 * @handle: A handle identifying the fence object.
 693 * @return: A struct vmw_user_fence base ttm object on success or
 694 * an error pointer on failure.
 695 *
 696 * The fence object is looked up and type-checked. The caller needs
 697 * to have opened the fence object first, but since that happens on
 698 * creation and fence objects aren't shareable, that's not an
 699 * issue currently.
 700 */
 701static struct ttm_base_object *
 702vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
 703{
 704	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
 705
 706	if (!base) {
 707		pr_err("Invalid fence object handle 0x%08lx.\n",
 708		       (unsigned long)handle);
 709		return ERR_PTR(-EINVAL);
 710	}
 711
 712	if (base->refcount_release != vmw_user_fence_base_release) {
 713		pr_err("Invalid fence object handle 0x%08lx.\n",
 714		       (unsigned long)handle);
 715		ttm_base_object_unref(&base);
 716		return ERR_PTR(-EINVAL);
 717	}
 718
 719	return base;
 720}
 721
 722
 723int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
 724			     struct drm_file *file_priv)
 725{
 726	struct drm_vmw_fence_wait_arg *arg =
 727	    (struct drm_vmw_fence_wait_arg *)data;
 728	unsigned long timeout;
 729	struct ttm_base_object *base;
 730	struct vmw_fence_obj *fence;
 731	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 732	int ret;
 733	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
 734
 735	/*
 736	 * 64-bit division not present on 32-bit systems, so do an
 737	 * approximation. (Divide by 1000000).
 738	 */
 739
 740	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
 741	  (wait_timeout >> 26);
 742
 743	if (!arg->cookie_valid) {
 744		arg->cookie_valid = 1;
 745		arg->kernel_cookie = jiffies + wait_timeout;
 746	}
 747
 748	base = vmw_fence_obj_lookup(tfile, arg->handle);
 749	if (IS_ERR(base))
 750		return PTR_ERR(base);
 751
 752	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 753
 754	timeout = jiffies;
 755	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
 756		ret = ((vmw_fence_obj_signaled(fence)) ?
 757		       0 : -EBUSY);
 758		goto out;
 759	}
 760
 761	timeout = (unsigned long)arg->kernel_cookie - timeout;
 762
 763	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
 764
 765out:
 766	ttm_base_object_unref(&base);
 767
 768	/*
 769	 * Optionally unref the fence object.
 770	 */
 771
 772	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
 773		return ttm_ref_object_base_unref(tfile, arg->handle);
 774	return ret;
 775}
 776
 777int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
 778				 struct drm_file *file_priv)
 779{
 780	struct drm_vmw_fence_signaled_arg *arg =
 781		(struct drm_vmw_fence_signaled_arg *) data;
 782	struct ttm_base_object *base;
 783	struct vmw_fence_obj *fence;
 784	struct vmw_fence_manager *fman;
 785	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 786	struct vmw_private *dev_priv = vmw_priv(dev);
 787
 788	base = vmw_fence_obj_lookup(tfile, arg->handle);
 789	if (IS_ERR(base))
 790		return PTR_ERR(base);
 791
 792	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 793	fman = fman_from_fence(fence);
 794
 795	arg->signaled = vmw_fence_obj_signaled(fence);
 796
 797	arg->signaled_flags = arg->flags;
 798	spin_lock(&fman->lock);
 799	arg->passed_seqno = dev_priv->last_read_seqno;
 800	spin_unlock(&fman->lock);
 801
 802	ttm_base_object_unref(&base);
 803
 804	return 0;
 805}
 806
 807
 808int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
 809			      struct drm_file *file_priv)
 810{
 811	struct drm_vmw_fence_arg *arg =
 812		(struct drm_vmw_fence_arg *) data;
 813
 814	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 815					 arg->handle);
 816}
 817
 818/**
 819 * vmw_event_fence_action_seq_passed
 820 *
 821 * @action: The struct vmw_fence_action embedded in a struct
 822 * vmw_event_fence_action.
 823 *
 824 * This function is called when the seqno of the fence where @action is
 825 * attached has passed. It queues the event on the submitter's event list.
 826 * This function is always called from atomic context.
 827 */
 828static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 829{
 830	struct vmw_event_fence_action *eaction =
 831		container_of(action, struct vmw_event_fence_action, action);
 832	struct drm_device *dev = eaction->dev;
 833	struct drm_pending_event *event = eaction->event;
 834
 835	if (unlikely(event == NULL))
 836		return;
 837
 838	spin_lock_irq(&dev->event_lock);
 839
 840	if (likely(eaction->tv_sec != NULL)) {
 841		struct timespec64 ts;
 842
 843		ktime_get_ts64(&ts);
 844		/* monotonic time, so no y2038 overflow */
 845		*eaction->tv_sec = ts.tv_sec;
 846		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
 847	}
 848
 849	drm_send_event_locked(dev, eaction->event);
 850	eaction->event = NULL;
 851	spin_unlock_irq(&dev->event_lock);
 852}
 853
 854/**
 855 * vmw_event_fence_action_cleanup
 856 *
 857 * @action: The struct vmw_fence_action embedded in a struct
 858 * vmw_event_fence_action.
 859 *
 860 * This function is the struct vmw_fence_action destructor. It's typically
 861 * called from a workqueue.
 862 */
 863static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
 864{
 865	struct vmw_event_fence_action *eaction =
 866		container_of(action, struct vmw_event_fence_action, action);
 867
 868	vmw_fence_obj_unreference(&eaction->fence);
 869	kfree(eaction);
 870}
 871
 872
 873/**
 874 * vmw_fence_obj_add_action - Add an action to a fence object.
 875 *
 876 * @fence: The fence object.
 877 * @action: The action to add.
 878 *
 879 * Note that the action callbacks may be executed before this function
 880 * returns.
 881 */
 882static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 883			      struct vmw_fence_action *action)
 884{
 885	struct vmw_fence_manager *fman = fman_from_fence(fence);
 886	bool run_update = false;
 887
 888	mutex_lock(&fman->goal_irq_mutex);
 889	spin_lock(&fman->lock);
 890
 891	fman->pending_actions[action->type]++;
 892	if (dma_fence_is_signaled_locked(&fence->base)) {
 893		struct list_head action_list;
 894
 895		INIT_LIST_HEAD(&action_list);
 896		list_add_tail(&action->head, &action_list);
 897		vmw_fences_perform_actions(fman, &action_list);
 898	} else {
 899		list_add_tail(&action->head, &fence->seq_passed_actions);
 900
 901		/*
 902		 * This function may set fman::seqno_valid, so it must
 903		 * be run with the goal_irq_mutex held.
 904		 */
 905		run_update = vmw_fence_goal_check_locked(fence);
 906	}
 907
 908	spin_unlock(&fman->lock);
 909
 910	if (run_update) {
 911		if (!fman->goal_irq_on) {
 912			fman->goal_irq_on = true;
 913			vmw_goal_waiter_add(fman->dev_priv);
 914		}
 915		vmw_fences_update(fman);
 916	}
 917	mutex_unlock(&fman->goal_irq_mutex);
 918
 919}
 920
 921/**
 922 * vmw_event_fence_action_queue - Post an event for sending when a fence
 923 * object seqno has passed.
 924 *
 925 * @file_priv: The file connection on which the event should be posted.
 926 * @fence: The fence object on which to post the event.
 927 * @event: Event to be posted. This event should've been alloced
 928 * using k[mz]alloc, and should've been completely initialized.
 929 * @tv_sec: If non-null, the variable pointed to will be assigned
 930 * current time tv_sec val when the fence signals.
 931 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
 932 * be assigned the current time tv_usec val when the fence signals.
 933 * @interruptible: Interruptible waits if possible.
 934 *
 935 * As a side effect, the object pointed to by @event may have been
 936 * freed when this function returns. If this function returns with
 937 * an error code, the caller needs to free that object.
 938 */
 939
 940int vmw_event_fence_action_queue(struct drm_file *file_priv,
 941				 struct vmw_fence_obj *fence,
 942				 struct drm_pending_event *event,
 943				 uint32_t *tv_sec,
 944				 uint32_t *tv_usec,
 945				 bool interruptible)
 946{
 947	struct vmw_event_fence_action *eaction;
 948	struct vmw_fence_manager *fman = fman_from_fence(fence);
 949
 950	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
 951	if (unlikely(!eaction))
 952		return -ENOMEM;
 953
 954	eaction->event = event;
 955
 956	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
 957	eaction->action.cleanup = vmw_event_fence_action_cleanup;
 958	eaction->action.type = VMW_ACTION_EVENT;
 959
 960	eaction->fence = vmw_fence_obj_reference(fence);
 961	eaction->dev = &fman->dev_priv->drm;
 962	eaction->tv_sec = tv_sec;
 963	eaction->tv_usec = tv_usec;
 964
 965	vmw_fence_obj_add_action(fence, &eaction->action);
 966
 967	return 0;
 968}
 969
 970struct vmw_event_fence_pending {
 971	struct drm_pending_event base;
 972	struct drm_vmw_event_fence event;
 973};
 974
 975static int vmw_event_fence_action_create(struct drm_file *file_priv,
 976				  struct vmw_fence_obj *fence,
 977				  uint32_t flags,
 978				  uint64_t user_data,
 979				  bool interruptible)
 980{
 981	struct vmw_event_fence_pending *event;
 982	struct vmw_fence_manager *fman = fman_from_fence(fence);
 983	struct drm_device *dev = &fman->dev_priv->drm;
 984	int ret;
 985
 986	event = kzalloc(sizeof(*event), GFP_KERNEL);
 987	if (unlikely(!event)) {
 988		DRM_ERROR("Failed to allocate an event.\n");
 989		ret = -ENOMEM;
 990		goto out_no_space;
 991	}
 992
 993	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
 994	event->event.base.length = sizeof(*event);
 995	event->event.user_data = user_data;
 996
 997	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
 998
 999	if (unlikely(ret != 0)) {
1000		DRM_ERROR("Failed to allocate event space for this file.\n");
1001		kfree(event);
1002		goto out_no_space;
1003	}
1004
1005	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1006		ret = vmw_event_fence_action_queue(file_priv, fence,
1007						   &event->base,
1008						   &event->event.tv_sec,
1009						   &event->event.tv_usec,
1010						   interruptible);
1011	else
1012		ret = vmw_event_fence_action_queue(file_priv, fence,
1013						   &event->base,
1014						   NULL,
1015						   NULL,
1016						   interruptible);
1017	if (ret != 0)
1018		goto out_no_queue;
1019
1020	return 0;
1021
1022out_no_queue:
1023	drm_event_cancel_free(dev, &event->base);
1024out_no_space:
1025	return ret;
1026}
1027
1028int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1029			  struct drm_file *file_priv)
1030{
1031	struct vmw_private *dev_priv = vmw_priv(dev);
1032	struct drm_vmw_fence_event_arg *arg =
1033		(struct drm_vmw_fence_event_arg *) data;
1034	struct vmw_fence_obj *fence = NULL;
1035	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1036	struct ttm_object_file *tfile = vmw_fp->tfile;
1037	struct drm_vmw_fence_rep __user *user_fence_rep =
1038		(struct drm_vmw_fence_rep __user *)(unsigned long)
1039		arg->fence_rep;
1040	uint32_t handle;
1041	int ret;
1042
1043	/*
1044	 * Look up an existing fence object,
1045	 * and if user-space wants a new reference,
1046	 * add one.
1047	 */
1048	if (arg->handle) {
1049		struct ttm_base_object *base =
1050			vmw_fence_obj_lookup(tfile, arg->handle);
1051
1052		if (IS_ERR(base))
1053			return PTR_ERR(base);
1054
1055		fence = &(container_of(base, struct vmw_user_fence,
1056				       base)->fence);
1057		(void) vmw_fence_obj_reference(fence);
1058
1059		if (user_fence_rep != NULL) {
1060			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1061						 NULL, false);
1062			if (unlikely(ret != 0)) {
1063				DRM_ERROR("Failed to reference a fence "
1064					  "object.\n");
1065				goto out_no_ref_obj;
1066			}
1067			handle = base->handle;
1068		}
1069		ttm_base_object_unref(&base);
1070	}
1071
1072	/*
1073	 * Create a new fence object.
1074	 */
1075	if (!fence) {
1076		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1077						 &fence,
1078						 (user_fence_rep) ?
1079						 &handle : NULL);
1080		if (unlikely(ret != 0)) {
1081			DRM_ERROR("Fence event failed to create fence.\n");
1082			return ret;
1083		}
1084	}
1085
1086	BUG_ON(fence == NULL);
1087
1088	ret = vmw_event_fence_action_create(file_priv, fence,
1089					    arg->flags,
1090					    arg->user_data,
1091					    true);
1092	if (unlikely(ret != 0)) {
1093		if (ret != -ERESTARTSYS)
1094			DRM_ERROR("Failed to attach event to fence.\n");
1095		goto out_no_create;
1096	}
1097
1098	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1099				    handle, -1);
1100	vmw_fence_obj_unreference(&fence);
1101	return 0;
1102out_no_create:
1103	if (user_fence_rep != NULL)
1104		ttm_ref_object_base_unref(tfile, handle);
1105out_no_ref_obj:
1106	vmw_fence_obj_unreference(&fence);
1107	return ret;
1108}