Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28
 29#include "vmwgfx_drv.h"
 30
 31struct vmw_fence {
 32	struct list_head head;
 33	uint32_t sequence;
 34	struct timespec submitted;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35};
 36
 37void vmw_fence_queue_init(struct vmw_fence_queue *queue)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38{
 39	INIT_LIST_HEAD(&queue->head);
 40	queue->lag = ns_to_timespec(0);
 41	getrawmonotonic(&queue->lag_time);
 42	spin_lock_init(&queue->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43}
 44
 45void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
 46{
 47	struct vmw_fence *fence, *next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48
 49	spin_lock(&queue->lock);
 50	list_for_each_entry_safe(fence, next, &queue->head, head) {
 51		kfree(fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52	}
 53	spin_unlock(&queue->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54}
 55
 56int vmw_fence_push(struct vmw_fence_queue *queue,
 57		   uint32_t sequence)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58{
 59	struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60
 61	if (unlikely(!fence))
 62		return -ENOMEM;
 63
 64	fence->sequence = sequence;
 65	getrawmonotonic(&fence->submitted);
 66	spin_lock(&queue->lock);
 67	list_add_tail(&fence->head, &queue->head);
 68	spin_unlock(&queue->lock);
 
 
 
 
 
 
 
 
 69
 70	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71}
 72
 73int vmw_fence_pull(struct vmw_fence_queue *queue,
 74		   uint32_t signaled_sequence)
 75{
 76	struct vmw_fence *fence, *next;
 77	struct timespec now;
 78	bool updated = false;
 
 
 
 
 
 
 
 
 
 
 79
 80	spin_lock(&queue->lock);
 81	getrawmonotonic(&now);
 
 
 
 
 82
 83	if (list_empty(&queue->head)) {
 84		queue->lag = ns_to_timespec(0);
 85		queue->lag_time = now;
 86		updated = true;
 
 
 
 
 87		goto out_unlock;
 88	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 89
 90	list_for_each_entry_safe(fence, next, &queue->head, head) {
 91		if (signaled_sequence - fence->sequence > (1 << 30))
 92			continue;
 93
 94		queue->lag = timespec_sub(now, fence->submitted);
 95		queue->lag_time = now;
 96		updated = true;
 97		list_del(&fence->head);
 98		kfree(fence);
 
 
 
 99	}
 
100
101out_unlock:
102	spin_unlock(&queue->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
104	return (updated) ? 0 : -EBUSY;
105}
106
107static struct timespec vmw_timespec_add(struct timespec t1,
108					struct timespec t2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109{
110	t1.tv_sec += t2.tv_sec;
111	t1.tv_nsec += t2.tv_nsec;
112	if (t1.tv_nsec >= 1000000000L) {
113		t1.tv_sec += 1;
114		t1.tv_nsec -= 1000000000L;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115	}
116
117	return t1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118}
119
120static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
121{
122	struct timespec now;
123
124	spin_lock(&queue->lock);
125	getrawmonotonic(&now);
126	queue->lag = vmw_timespec_add(queue->lag,
127				      timespec_sub(now, queue->lag_time));
128	queue->lag_time = now;
129	spin_unlock(&queue->lock);
130	return queue->lag;
131}
132
 
 
 
133
134static bool vmw_lag_lt(struct vmw_fence_queue *queue,
135		       uint32_t us)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136{
137	struct timespec lag, cond;
138
139	cond = ns_to_timespec((s64) us * 1000);
140	lag = vmw_fifo_lag(queue);
141	return (timespec_compare(&lag, &cond) < 1);
142}
143
144int vmw_wait_lag(struct vmw_private *dev_priv,
145		 struct vmw_fence_queue *queue, uint32_t us)
146{
147	struct vmw_fence *fence;
148	uint32_t sequence;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149	int ret;
150
151	while (!vmw_lag_lt(queue, us)) {
152		spin_lock(&queue->lock);
153		if (list_empty(&queue->head))
154			sequence = atomic_read(&dev_priv->fence_seq);
155		else {
156			fence = list_first_entry(&queue->head,
157						 struct vmw_fence, head);
158			sequence = fence->sequence;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159		}
160		spin_unlock(&queue->lock);
161
162		ret = vmw_wait_fence(dev_priv, false, sequence, true,
163				     3*HZ);
 
 
 
 
 
 
 
 
 
 
 
 
 
164
165		if (unlikely(ret != 0))
166			return ret;
167
168		(void) vmw_fence_pull(queue, sequence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170	return 0;
171}
172
173
v4.10.11
   1/**************************************************************************
   2 *
   3 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/drmP.h>
  29#include "vmwgfx_drv.h"
  30
  31#define VMW_FENCE_WRAP (1 << 31)
  32
  33struct vmw_fence_manager {
  34	int num_fence_objects;
  35	struct vmw_private *dev_priv;
  36	spinlock_t lock;
  37	struct list_head fence_list;
  38	struct work_struct work;
  39	u32 user_fence_size;
  40	u32 fence_size;
  41	u32 event_fence_action_size;
  42	bool fifo_down;
  43	struct list_head cleanup_list;
  44	uint32_t pending_actions[VMW_ACTION_MAX];
  45	struct mutex goal_irq_mutex;
  46	bool goal_irq_on; /* Protected by @goal_irq_mutex */
  47	bool seqno_valid; /* Protected by @lock, and may not be set to true
  48			     without the @goal_irq_mutex held. */
  49	u64 ctx;
  50};
  51
  52struct vmw_user_fence {
  53	struct ttm_base_object base;
  54	struct vmw_fence_obj fence;
  55};
  56
  57/**
  58 * struct vmw_event_fence_action - fence action that delivers a drm event.
  59 *
  60 * @e: A struct drm_pending_event that controls the event delivery.
  61 * @action: A struct vmw_fence_action to hook up to a fence.
  62 * @fence: A referenced pointer to the fence to keep it alive while @action
  63 * hangs on it.
  64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
  65 * @kref: Both @e and @action has destructors, so we need to refcount.
  66 * @size: Size accounted for this object.
  67 * @tv_sec: If non-null, the variable pointed to will be assigned
  68 * current time tv_sec val when the fence signals.
  69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
  70 * be assigned the current time tv_usec val when the fence signals.
  71 */
  72struct vmw_event_fence_action {
  73	struct vmw_fence_action action;
  74
  75	struct drm_pending_event *event;
  76	struct vmw_fence_obj *fence;
  77	struct drm_device *dev;
  78
  79	uint32_t *tv_sec;
  80	uint32_t *tv_usec;
  81};
  82
  83static struct vmw_fence_manager *
  84fman_from_fence(struct vmw_fence_obj *fence)
  85{
  86	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
  87}
  88
  89/**
  90 * Note on fencing subsystem usage of irqs:
  91 * Typically the vmw_fences_update function is called
  92 *
  93 * a) When a new fence seqno has been submitted by the fifo code.
  94 * b) On-demand when we have waiters. Sleeping waiters will switch on the
  95 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
  96 * irq is received. When the last fence waiter is gone, that IRQ is masked
  97 * away.
  98 *
  99 * In situations where there are no waiters and we don't submit any new fences,
 100 * fence objects may not be signaled. This is perfectly OK, since there are
 101 * no consumers of the signaled data, but that is NOT ok when there are fence
 102 * actions attached to a fence. The fencing subsystem then makes use of the
 103 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
 104 * which has an action attached, and each time vmw_fences_update is called,
 105 * the subsystem makes sure the fence goal seqno is updated.
 106 *
 107 * The fence goal seqno irq is on as long as there are unsignaled fence
 108 * objects with actions attached to them.
 109 */
 110
 111static void vmw_fence_obj_destroy(struct dma_fence *f)
 112{
 113	struct vmw_fence_obj *fence =
 114		container_of(f, struct vmw_fence_obj, base);
 115
 116	struct vmw_fence_manager *fman = fman_from_fence(fence);
 117	unsigned long irq_flags;
 118
 119	spin_lock_irqsave(&fman->lock, irq_flags);
 120	list_del_init(&fence->head);
 121	--fman->num_fence_objects;
 122	spin_unlock_irqrestore(&fman->lock, irq_flags);
 123	fence->destroy(fence);
 124}
 125
 126static const char *vmw_fence_get_driver_name(struct dma_fence *f)
 127{
 128	return "vmwgfx";
 129}
 130
 131static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
 132{
 133	return "svga";
 134}
 135
 136static bool vmw_fence_enable_signaling(struct dma_fence *f)
 137{
 138	struct vmw_fence_obj *fence =
 139		container_of(f, struct vmw_fence_obj, base);
 140
 141	struct vmw_fence_manager *fman = fman_from_fence(fence);
 142	struct vmw_private *dev_priv = fman->dev_priv;
 143
 144	u32 *fifo_mem = dev_priv->mmio_virt;
 145	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 146	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 147		return false;
 148
 149	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 150
 151	return true;
 152}
 153
 154struct vmwgfx_wait_cb {
 155	struct dma_fence_cb base;
 156	struct task_struct *task;
 157};
 158
 159static void
 160vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 161{
 162	struct vmwgfx_wait_cb *wait =
 163		container_of(cb, struct vmwgfx_wait_cb, base);
 164
 165	wake_up_process(wait->task);
 166}
 167
 168static void __vmw_fences_update(struct vmw_fence_manager *fman);
 169
 170static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
 171{
 172	struct vmw_fence_obj *fence =
 173		container_of(f, struct vmw_fence_obj, base);
 174
 175	struct vmw_fence_manager *fman = fman_from_fence(fence);
 176	struct vmw_private *dev_priv = fman->dev_priv;
 177	struct vmwgfx_wait_cb cb;
 178	long ret = timeout;
 179	unsigned long irq_flags;
 180
 181	if (likely(vmw_fence_obj_signaled(fence)))
 182		return timeout;
 183
 184	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 185	vmw_seqno_waiter_add(dev_priv);
 186
 187	spin_lock_irqsave(f->lock, irq_flags);
 188
 189	if (intr && signal_pending(current)) {
 190		ret = -ERESTARTSYS;
 191		goto out;
 192	}
 193
 194	cb.base.func = vmwgfx_wait_cb;
 195	cb.task = current;
 196	list_add(&cb.base.node, &f->cb_list);
 197
 198	while (ret > 0) {
 199		__vmw_fences_update(fman);
 200		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
 201			break;
 202
 203		if (intr)
 204			__set_current_state(TASK_INTERRUPTIBLE);
 205		else
 206			__set_current_state(TASK_UNINTERRUPTIBLE);
 207		spin_unlock_irqrestore(f->lock, irq_flags);
 208
 209		ret = schedule_timeout(ret);
 210
 211		spin_lock_irqsave(f->lock, irq_flags);
 212		if (ret > 0 && intr && signal_pending(current))
 213			ret = -ERESTARTSYS;
 214	}
 215
 216	if (!list_empty(&cb.base.node))
 217		list_del(&cb.base.node);
 218	__set_current_state(TASK_RUNNING);
 219
 220out:
 221	spin_unlock_irqrestore(f->lock, irq_flags);
 222
 223	vmw_seqno_waiter_remove(dev_priv);
 224
 225	return ret;
 226}
 227
 228static struct dma_fence_ops vmw_fence_ops = {
 229	.get_driver_name = vmw_fence_get_driver_name,
 230	.get_timeline_name = vmw_fence_get_timeline_name,
 231	.enable_signaling = vmw_fence_enable_signaling,
 232	.wait = vmw_fence_wait,
 233	.release = vmw_fence_obj_destroy,
 234};
 235
 236
 237/**
 238 * Execute signal actions on fences recently signaled.
 239 * This is done from a workqueue so we don't have to execute
 240 * signal actions from atomic context.
 241 */
 242
 243static void vmw_fence_work_func(struct work_struct *work)
 244{
 245	struct vmw_fence_manager *fman =
 246		container_of(work, struct vmw_fence_manager, work);
 247	struct list_head list;
 248	struct vmw_fence_action *action, *next_action;
 249	bool seqno_valid;
 250
 251	do {
 252		INIT_LIST_HEAD(&list);
 253		mutex_lock(&fman->goal_irq_mutex);
 254
 255		spin_lock_irq(&fman->lock);
 256		list_splice_init(&fman->cleanup_list, &list);
 257		seqno_valid = fman->seqno_valid;
 258		spin_unlock_irq(&fman->lock);
 259
 260		if (!seqno_valid && fman->goal_irq_on) {
 261			fman->goal_irq_on = false;
 262			vmw_goal_waiter_remove(fman->dev_priv);
 263		}
 264		mutex_unlock(&fman->goal_irq_mutex);
 265
 266		if (list_empty(&list))
 267			return;
 268
 269		/*
 270		 * At this point, only we should be able to manipulate the
 271		 * list heads of the actions we have on the private list.
 272		 * hence fman::lock not held.
 273		 */
 274
 275		list_for_each_entry_safe(action, next_action, &list, head) {
 276			list_del_init(&action->head);
 277			if (action->cleanup)
 278				action->cleanup(action);
 279		}
 280	} while (1);
 281}
 282
 283struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 284{
 285	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
 286
 287	if (unlikely(fman == NULL))
 288		return NULL;
 289
 290	fman->dev_priv = dev_priv;
 291	spin_lock_init(&fman->lock);
 292	INIT_LIST_HEAD(&fman->fence_list);
 293	INIT_LIST_HEAD(&fman->cleanup_list);
 294	INIT_WORK(&fman->work, &vmw_fence_work_func);
 295	fman->fifo_down = true;
 296	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
 297	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
 298	fman->event_fence_action_size =
 299		ttm_round_pot(sizeof(struct vmw_event_fence_action));
 300	mutex_init(&fman->goal_irq_mutex);
 301	fman->ctx = dma_fence_context_alloc(1);
 302
 303	return fman;
 304}
 305
 306void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
 
 307{
 308	unsigned long irq_flags;
 309	bool lists_empty;
 310
 311	(void) cancel_work_sync(&fman->work);
 312
 313	spin_lock_irqsave(&fman->lock, irq_flags);
 314	lists_empty = list_empty(&fman->fence_list) &&
 315		list_empty(&fman->cleanup_list);
 316	spin_unlock_irqrestore(&fman->lock, irq_flags);
 317
 318	BUG_ON(!lists_empty);
 319	kfree(fman);
 320}
 321
 322static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 323			      struct vmw_fence_obj *fence, u32 seqno,
 324			      void (*destroy) (struct vmw_fence_obj *fence))
 325{
 326	unsigned long irq_flags;
 327	int ret = 0;
 328
 329	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
 330		       fman->ctx, seqno);
 331	INIT_LIST_HEAD(&fence->seq_passed_actions);
 332	fence->destroy = destroy;
 333
 334	spin_lock_irqsave(&fman->lock, irq_flags);
 335	if (unlikely(fman->fifo_down)) {
 336		ret = -EBUSY;
 337		goto out_unlock;
 338	}
 339	list_add_tail(&fence->head, &fman->fence_list);
 340	++fman->num_fence_objects;
 341
 342out_unlock:
 343	spin_unlock_irqrestore(&fman->lock, irq_flags);
 344	return ret;
 345
 346}
 347
 348static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
 349				struct list_head *list)
 350{
 351	struct vmw_fence_action *action, *next_action;
 352
 353	list_for_each_entry_safe(action, next_action, list, head) {
 354		list_del_init(&action->head);
 355		fman->pending_actions[action->type]--;
 356		if (action->seq_passed != NULL)
 357			action->seq_passed(action);
 358
 359		/*
 360		 * Add the cleanup action to the cleanup list so that
 361		 * it will be performed by a worker task.
 362		 */
 363
 364		list_add_tail(&action->head, &fman->cleanup_list);
 365	}
 366}
 367
 368/**
 369 * vmw_fence_goal_new_locked - Figure out a new device fence goal
 370 * seqno if needed.
 371 *
 372 * @fman: Pointer to a fence manager.
 373 * @passed_seqno: The seqno the device currently signals as passed.
 374 *
 375 * This function should be called with the fence manager lock held.
 376 * It is typically called when we have a new passed_seqno, and
 377 * we might need to update the fence goal. It checks to see whether
 378 * the current fence goal has already passed, and, in that case,
 379 * scans through all unsignaled fences to get the next fence object with an
 380 * action attached, and sets the seqno of that fence as a new fence goal.
 381 *
 382 * returns true if the device goal seqno was updated. False otherwise.
 383 */
 384static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
 385				      u32 passed_seqno)
 386{
 387	u32 goal_seqno;
 388	u32 *fifo_mem;
 389	struct vmw_fence_obj *fence;
 390
 391	if (likely(!fman->seqno_valid))
 392		return false;
 393
 394	fifo_mem = fman->dev_priv->mmio_virt;
 395	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
 396	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
 397		return false;
 398
 399	fman->seqno_valid = false;
 400	list_for_each_entry(fence, &fman->fence_list, head) {
 401		if (!list_empty(&fence->seq_passed_actions)) {
 402			fman->seqno_valid = true;
 403			vmw_mmio_write(fence->base.seqno,
 404				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
 405			break;
 406		}
 407	}
 408
 409	return true;
 410}
 411
 412
 413/**
 414 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
 415 * needed.
 416 *
 417 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
 418 * considered as a device fence goal.
 419 *
 420 * This function should be called with the fence manager lock held.
 421 * It is typically called when an action has been attached to a fence to
 422 * check whether the seqno of that fence should be used for a fence
 423 * goal interrupt. This is typically needed if the current fence goal is
 424 * invalid, or has a higher seqno than that of the current fence object.
 425 *
 426 * returns true if the device goal seqno was updated. False otherwise.
 427 */
 428static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 429{
 430	struct vmw_fence_manager *fman = fman_from_fence(fence);
 431	u32 goal_seqno;
 432	u32 *fifo_mem;
 433
 434	if (dma_fence_is_signaled_locked(&fence->base))
 435		return false;
 436
 437	fifo_mem = fman->dev_priv->mmio_virt;
 438	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
 439	if (likely(fman->seqno_valid &&
 440		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
 441		return false;
 442
 443	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
 444	fman->seqno_valid = true;
 445
 446	return true;
 447}
 448
 449static void __vmw_fences_update(struct vmw_fence_manager *fman)
 450{
 451	struct vmw_fence_obj *fence, *next_fence;
 452	struct list_head action_list;
 453	bool needs_rerun;
 454	uint32_t seqno, new_seqno;
 455	u32 *fifo_mem = fman->dev_priv->mmio_virt;
 456
 457	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 458rerun:
 459	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
 460		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
 461			list_del_init(&fence->head);
 462			dma_fence_signal_locked(&fence->base);
 463			INIT_LIST_HEAD(&action_list);
 464			list_splice_init(&fence->seq_passed_actions,
 465					 &action_list);
 466			vmw_fences_perform_actions(fman, &action_list);
 467		} else
 468			break;
 469	}
 470
 471	/*
 472	 * Rerun if the fence goal seqno was updated, and the
 473	 * hardware might have raced with that update, so that
 474	 * we missed a fence_goal irq.
 475	 */
 476
 477	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
 478	if (unlikely(needs_rerun)) {
 479		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 480		if (new_seqno != seqno) {
 481			seqno = new_seqno;
 482			goto rerun;
 483		}
 484	}
 485
 486	if (!list_empty(&fman->cleanup_list))
 487		(void) schedule_work(&fman->work);
 488}
 489
 490void vmw_fences_update(struct vmw_fence_manager *fman)
 491{
 492	unsigned long irq_flags;
 493
 494	spin_lock_irqsave(&fman->lock, irq_flags);
 495	__vmw_fences_update(fman);
 496	spin_unlock_irqrestore(&fman->lock, irq_flags);
 
 
 
 
 497}
 498
 499bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
 500{
 501	struct vmw_fence_manager *fman = fman_from_fence(fence);
 502
 503	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 504		return 1;
 505
 506	vmw_fences_update(fman);
 507
 508	return dma_fence_is_signaled(&fence->base);
 509}
 510
 511int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
 512		       bool interruptible, unsigned long timeout)
 513{
 514	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
 515
 516	if (likely(ret > 0))
 517		return 0;
 518	else if (ret == 0)
 519		return -EBUSY;
 520	else
 521		return ret;
 522}
 523
 524void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
 525{
 526	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
 527
 528	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 
 
 529}
 530
 531static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 
 532{
 533	dma_fence_free(&fence->base);
 534}
 535
 536int vmw_fence_create(struct vmw_fence_manager *fman,
 537		     uint32_t seqno,
 538		     struct vmw_fence_obj **p_fence)
 539{
 540	struct vmw_fence_obj *fence;
 541 	int ret;
 542
 543	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
 544	if (unlikely(fence == NULL))
 545		return -ENOMEM;
 546
 547	ret = vmw_fence_obj_init(fman, fence, seqno,
 548				 vmw_fence_destroy);
 549	if (unlikely(ret != 0))
 550		goto out_err_init;
 551
 552	*p_fence = fence;
 553	return 0;
 554
 555out_err_init:
 556	kfree(fence);
 557	return ret;
 558}
 559
 560
 561static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
 562{
 563	struct vmw_user_fence *ufence =
 564		container_of(fence, struct vmw_user_fence, fence);
 565	struct vmw_fence_manager *fman = fman_from_fence(fence);
 566
 567	ttm_base_object_kfree(ufence, base);
 568	/*
 569	 * Free kernel space accounting.
 570	 */
 571	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
 572			    fman->user_fence_size);
 573}
 574
 575static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
 576{
 577	struct ttm_base_object *base = *p_base;
 578	struct vmw_user_fence *ufence =
 579		container_of(base, struct vmw_user_fence, base);
 580	struct vmw_fence_obj *fence = &ufence->fence;
 581
 582	*p_base = NULL;
 583	vmw_fence_obj_unreference(&fence);
 584}
 585
 586int vmw_user_fence_create(struct drm_file *file_priv,
 587			  struct vmw_fence_manager *fman,
 588			  uint32_t seqno,
 589			  struct vmw_fence_obj **p_fence,
 590			  uint32_t *p_handle)
 591{
 592	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 593	struct vmw_user_fence *ufence;
 594	struct vmw_fence_obj *tmp;
 595	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
 596	int ret;
 597
 598	/*
 599	 * Kernel memory space accounting, since this object may
 600	 * be created by a user-space request.
 601	 */
 602
 603	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
 604				   false, false);
 605	if (unlikely(ret != 0))
 606		return ret;
 607
 608	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
 609	if (unlikely(ufence == NULL)) {
 610		ret = -ENOMEM;
 611		goto out_no_object;
 612	}
 613
 614	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
 615				 vmw_user_fence_destroy);
 616	if (unlikely(ret != 0)) {
 617		kfree(ufence);
 618		goto out_no_object;
 619	}
 620
 621	/*
 622	 * The base object holds a reference which is freed in
 623	 * vmw_user_fence_base_release.
 624	 */
 625	tmp = vmw_fence_obj_reference(&ufence->fence);
 626	ret = ttm_base_object_init(tfile, &ufence->base, false,
 627				   VMW_RES_FENCE,
 628				   &vmw_user_fence_base_release, NULL);
 629
 630
 631	if (unlikely(ret != 0)) {
 632		/*
 633		 * Free the base object's reference
 634		 */
 635		vmw_fence_obj_unreference(&tmp);
 636		goto out_err;
 637	}
 638
 639	*p_fence = &ufence->fence;
 640	*p_handle = ufence->base.hash.key;
 641
 642	return 0;
 643out_err:
 644	tmp = &ufence->fence;
 645	vmw_fence_obj_unreference(&tmp);
 646out_no_object:
 647	ttm_mem_global_free(mem_glob, fman->user_fence_size);
 648	return ret;
 649}
 650
 651
 652/**
 653 * vmw_fence_fifo_down - signal all unsignaled fence objects.
 654 */
 655
 656void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 657{
 658	struct list_head action_list;
 659	int ret;
 660
 661	/*
 662	 * The list may be altered while we traverse it, so always
 663	 * restart when we've released the fman->lock.
 664	 */
 665
 666	spin_lock_irq(&fman->lock);
 667	fman->fifo_down = true;
 668	while (!list_empty(&fman->fence_list)) {
 669		struct vmw_fence_obj *fence =
 670			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
 671				   head);
 672		dma_fence_get(&fence->base);
 673		spin_unlock_irq(&fman->lock);
 674
 675		ret = vmw_fence_obj_wait(fence, false, false,
 676					 VMW_FENCE_WAIT_TIMEOUT);
 677
 678		if (unlikely(ret != 0)) {
 679			list_del_init(&fence->head);
 680			dma_fence_signal(&fence->base);
 681			INIT_LIST_HEAD(&action_list);
 682			list_splice_init(&fence->seq_passed_actions,
 683					 &action_list);
 684			vmw_fences_perform_actions(fman, &action_list);
 685		}
 
 686
 687		BUG_ON(!list_empty(&fence->head));
 688		dma_fence_put(&fence->base);
 689		spin_lock_irq(&fman->lock);
 690	}
 691	spin_unlock_irq(&fman->lock);
 692}
 693
 694void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 695{
 696	unsigned long irq_flags;
 697
 698	spin_lock_irqsave(&fman->lock, irq_flags);
 699	fman->fifo_down = false;
 700	spin_unlock_irqrestore(&fman->lock, irq_flags);
 701}
 702
 
 
 703
 704/**
 705 * vmw_fence_obj_lookup - Look up a user-space fence object
 706 *
 707 * @tfile: A struct ttm_object_file identifying the caller.
 708 * @handle: A handle identifying the fence object.
 709 * @return: A struct vmw_user_fence base ttm object on success or
 710 * an error pointer on failure.
 711 *
 712 * The fence object is looked up and type-checked. The caller needs
 713 * to have opened the fence object first, but since that happens on
 714 * creation and fence objects aren't shareable, that's not an
 715 * issue currently.
 716 */
 717static struct ttm_base_object *
 718vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
 719{
 720	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
 721
 722	if (!base) {
 723		pr_err("Invalid fence object handle 0x%08lx.\n",
 724		       (unsigned long)handle);
 725		return ERR_PTR(-EINVAL);
 726	}
 727
 728	if (base->refcount_release != vmw_user_fence_base_release) {
 729		pr_err("Invalid fence object handle 0x%08lx.\n",
 730		       (unsigned long)handle);
 731		ttm_base_object_unref(&base);
 732		return ERR_PTR(-EINVAL);
 733	}
 734
 735	return base;
 736}
 737
 738
 739int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
 740			     struct drm_file *file_priv)
 741{
 742	struct drm_vmw_fence_wait_arg *arg =
 743	    (struct drm_vmw_fence_wait_arg *)data;
 744	unsigned long timeout;
 745	struct ttm_base_object *base;
 746	struct vmw_fence_obj *fence;
 747	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 748	int ret;
 749	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
 750
 751	/*
 752	 * 64-bit division not present on 32-bit systems, so do an
 753	 * approximation. (Divide by 1000000).
 754	 */
 755
 756	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
 757	  (wait_timeout >> 26);
 758
 759	if (!arg->cookie_valid) {
 760		arg->cookie_valid = 1;
 761		arg->kernel_cookie = jiffies + wait_timeout;
 762	}
 763
 764	base = vmw_fence_obj_lookup(tfile, arg->handle);
 765	if (IS_ERR(base))
 766		return PTR_ERR(base);
 767
 768	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 769
 770	timeout = jiffies;
 771	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
 772		ret = ((vmw_fence_obj_signaled(fence)) ?
 773		       0 : -EBUSY);
 774		goto out;
 775	}
 776
 777	timeout = (unsigned long)arg->kernel_cookie - timeout;
 778
 779	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
 780
 781out:
 782	ttm_base_object_unref(&base);
 783
 784	/*
 785	 * Optionally unref the fence object.
 786	 */
 787
 788	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
 789		return ttm_ref_object_base_unref(tfile, arg->handle,
 790						 TTM_REF_USAGE);
 791	return ret;
 792}
 793
 794int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
 795				 struct drm_file *file_priv)
 796{
 797	struct drm_vmw_fence_signaled_arg *arg =
 798		(struct drm_vmw_fence_signaled_arg *) data;
 799	struct ttm_base_object *base;
 800	struct vmw_fence_obj *fence;
 801	struct vmw_fence_manager *fman;
 802	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 803	struct vmw_private *dev_priv = vmw_priv(dev);
 804
 805	base = vmw_fence_obj_lookup(tfile, arg->handle);
 806	if (IS_ERR(base))
 807		return PTR_ERR(base);
 808
 809	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 810	fman = fman_from_fence(fence);
 811
 812	arg->signaled = vmw_fence_obj_signaled(fence);
 813
 814	arg->signaled_flags = arg->flags;
 815	spin_lock_irq(&fman->lock);
 816	arg->passed_seqno = dev_priv->last_read_seqno;
 817	spin_unlock_irq(&fman->lock);
 818
 819	ttm_base_object_unref(&base);
 820
 821	return 0;
 822}
 823
 824
 825int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
 826			      struct drm_file *file_priv)
 827{
 828	struct drm_vmw_fence_arg *arg =
 829		(struct drm_vmw_fence_arg *) data;
 830
 831	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 832					 arg->handle,
 833					 TTM_REF_USAGE);
 834}
 835
 836/**
 837 * vmw_event_fence_action_seq_passed
 838 *
 839 * @action: The struct vmw_fence_action embedded in a struct
 840 * vmw_event_fence_action.
 841 *
 842 * This function is called when the seqno of the fence where @action is
 843 * attached has passed. It queues the event on the submitter's event list.
 844 * This function is always called from atomic context, and may be called
 845 * from irq context.
 846 */
 847static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 848{
 849	struct vmw_event_fence_action *eaction =
 850		container_of(action, struct vmw_event_fence_action, action);
 851	struct drm_device *dev = eaction->dev;
 852	struct drm_pending_event *event = eaction->event;
 853	struct drm_file *file_priv;
 854	unsigned long irq_flags;
 855
 856	if (unlikely(event == NULL))
 857		return;
 858
 859	file_priv = event->file_priv;
 860	spin_lock_irqsave(&dev->event_lock, irq_flags);
 861
 862	if (likely(eaction->tv_sec != NULL)) {
 863		struct timeval tv;
 864
 865		do_gettimeofday(&tv);
 866		*eaction->tv_sec = tv.tv_sec;
 867		*eaction->tv_usec = tv.tv_usec;
 868	}
 869
 870	drm_send_event_locked(dev, eaction->event);
 871	eaction->event = NULL;
 872	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
 873}
 874
 875/**
 876 * vmw_event_fence_action_cleanup
 877 *
 878 * @action: The struct vmw_fence_action embedded in a struct
 879 * vmw_event_fence_action.
 880 *
 881 * This function is the struct vmw_fence_action destructor. It's typically
 882 * called from a workqueue.
 883 */
 884static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
 885{
 886	struct vmw_event_fence_action *eaction =
 887		container_of(action, struct vmw_event_fence_action, action);
 888
 889	vmw_fence_obj_unreference(&eaction->fence);
 890	kfree(eaction);
 891}
 892
 893
 894/**
 895 * vmw_fence_obj_add_action - Add an action to a fence object.
 896 *
 897 * @fence - The fence object.
 898 * @action - The action to add.
 899 *
 900 * Note that the action callbacks may be executed before this function
 901 * returns.
 902 */
 903static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 904			      struct vmw_fence_action *action)
 905{
 906	struct vmw_fence_manager *fman = fman_from_fence(fence);
 907	unsigned long irq_flags;
 908	bool run_update = false;
 909
 910	mutex_lock(&fman->goal_irq_mutex);
 911	spin_lock_irqsave(&fman->lock, irq_flags);
 912
 913	fman->pending_actions[action->type]++;
 914	if (dma_fence_is_signaled_locked(&fence->base)) {
 915		struct list_head action_list;
 916
 917		INIT_LIST_HEAD(&action_list);
 918		list_add_tail(&action->head, &action_list);
 919		vmw_fences_perform_actions(fman, &action_list);
 920	} else {
 921		list_add_tail(&action->head, &fence->seq_passed_actions);
 922
 923		/*
 924		 * This function may set fman::seqno_valid, so it must
 925		 * be run with the goal_irq_mutex held.
 926		 */
 927		run_update = vmw_fence_goal_check_locked(fence);
 928	}
 929
 930	spin_unlock_irqrestore(&fman->lock, irq_flags);
 931
 932	if (run_update) {
 933		if (!fman->goal_irq_on) {
 934			fman->goal_irq_on = true;
 935			vmw_goal_waiter_add(fman->dev_priv);
 936		}
 937		vmw_fences_update(fman);
 938	}
 939	mutex_unlock(&fman->goal_irq_mutex);
 940
 941}
 942
 943/**
 944 * vmw_event_fence_action_create - Post an event for sending when a fence
 945 * object seqno has passed.
 946 *
 947 * @file_priv: The file connection on which the event should be posted.
 948 * @fence: The fence object on which to post the event.
 949 * @event: Event to be posted. This event should've been alloced
 950 * using k[mz]alloc, and should've been completely initialized.
 951 * @interruptible: Interruptible waits if possible.
 952 *
 953 * As a side effect, the object pointed to by @event may have been
 954 * freed when this function returns. If this function returns with
 955 * an error code, the caller needs to free that object.
 956 */
 957
 958int vmw_event_fence_action_queue(struct drm_file *file_priv,
 959				 struct vmw_fence_obj *fence,
 960				 struct drm_pending_event *event,
 961				 uint32_t *tv_sec,
 962				 uint32_t *tv_usec,
 963				 bool interruptible)
 964{
 965	struct vmw_event_fence_action *eaction;
 966	struct vmw_fence_manager *fman = fman_from_fence(fence);
 967
 968	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
 969	if (unlikely(eaction == NULL))
 970		return -ENOMEM;
 971
 972	eaction->event = event;
 973
 974	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
 975	eaction->action.cleanup = vmw_event_fence_action_cleanup;
 976	eaction->action.type = VMW_ACTION_EVENT;
 977
 978	eaction->fence = vmw_fence_obj_reference(fence);
 979	eaction->dev = fman->dev_priv->dev;
 980	eaction->tv_sec = tv_sec;
 981	eaction->tv_usec = tv_usec;
 982
 983	vmw_fence_obj_add_action(fence, &eaction->action);
 984
 985	return 0;
 986}
 987
 988struct vmw_event_fence_pending {
 989	struct drm_pending_event base;
 990	struct drm_vmw_event_fence event;
 991};
 992
 993static int vmw_event_fence_action_create(struct drm_file *file_priv,
 994				  struct vmw_fence_obj *fence,
 995				  uint32_t flags,
 996				  uint64_t user_data,
 997				  bool interruptible)
 998{
 999	struct vmw_event_fence_pending *event;
1000	struct vmw_fence_manager *fman = fman_from_fence(fence);
1001	struct drm_device *dev = fman->dev_priv->dev;
1002	int ret;
1003
1004	event = kzalloc(sizeof(*event), GFP_KERNEL);
1005	if (unlikely(event == NULL)) {
1006		DRM_ERROR("Failed to allocate an event.\n");
1007		ret = -ENOMEM;
1008		goto out_no_space;
1009	}
1010
1011	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1012	event->event.base.length = sizeof(*event);
1013	event->event.user_data = user_data;
1014
1015	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1016
1017	if (unlikely(ret != 0)) {
1018		DRM_ERROR("Failed to allocate event space for this file.\n");
1019		kfree(event);
1020		goto out_no_space;
1021	}
1022
1023	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1024		ret = vmw_event_fence_action_queue(file_priv, fence,
1025						   &event->base,
1026						   &event->event.tv_sec,
1027						   &event->event.tv_usec,
1028						   interruptible);
1029	else
1030		ret = vmw_event_fence_action_queue(file_priv, fence,
1031						   &event->base,
1032						   NULL,
1033						   NULL,
1034						   interruptible);
1035	if (ret != 0)
1036		goto out_no_queue;
1037
1038	return 0;
1039
1040out_no_queue:
1041	drm_event_cancel_free(dev, &event->base);
1042out_no_space:
1043	return ret;
1044}
1045
1046int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1047			  struct drm_file *file_priv)
1048{
1049	struct vmw_private *dev_priv = vmw_priv(dev);
1050	struct drm_vmw_fence_event_arg *arg =
1051		(struct drm_vmw_fence_event_arg *) data;
1052	struct vmw_fence_obj *fence = NULL;
1053	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1054	struct ttm_object_file *tfile = vmw_fp->tfile;
1055	struct drm_vmw_fence_rep __user *user_fence_rep =
1056		(struct drm_vmw_fence_rep __user *)(unsigned long)
1057		arg->fence_rep;
1058	uint32_t handle;
1059	int ret;
1060
1061	/*
1062	 * Look up an existing fence object,
1063	 * and if user-space wants a new reference,
1064	 * add one.
1065	 */
1066	if (arg->handle) {
1067		struct ttm_base_object *base =
1068			vmw_fence_obj_lookup(tfile, arg->handle);
1069
1070		if (IS_ERR(base))
1071			return PTR_ERR(base);
1072
1073		fence = &(container_of(base, struct vmw_user_fence,
1074				       base)->fence);
1075		(void) vmw_fence_obj_reference(fence);
1076
1077		if (user_fence_rep != NULL) {
1078			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1079						 TTM_REF_USAGE, NULL, false);
1080			if (unlikely(ret != 0)) {
1081				DRM_ERROR("Failed to reference a fence "
1082					  "object.\n");
1083				goto out_no_ref_obj;
1084			}
1085			handle = base->hash.key;
1086		}
1087		ttm_base_object_unref(&base);
1088	}
1089
1090	/*
1091	 * Create a new fence object.
1092	 */
1093	if (!fence) {
1094		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1095						 &fence,
1096						 (user_fence_rep) ?
1097						 &handle : NULL);
1098		if (unlikely(ret != 0)) {
1099			DRM_ERROR("Fence event failed to create fence.\n");
1100			return ret;
1101		}
1102	}
1103
1104	BUG_ON(fence == NULL);
1105
1106	ret = vmw_event_fence_action_create(file_priv, fence,
1107					    arg->flags,
1108					    arg->user_data,
1109					    true);
1110	if (unlikely(ret != 0)) {
1111		if (ret != -ERESTARTSYS)
1112			DRM_ERROR("Failed to attach event to fence.\n");
1113		goto out_no_create;
1114	}
1115
1116	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1117				    handle);
1118	vmw_fence_obj_unreference(&fence);
1119	return 0;
1120out_no_create:
1121	if (user_fence_rep != NULL)
1122		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1123out_no_ref_obj:
1124	vmw_fence_obj_unreference(&fence);
1125	return ret;
1126}