Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright 2011 Red Hat, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * on the rights to use, copy, modify, merge, publish, distribute, sub
  8 * license, and/or sell copies of the Software, and to permit persons to whom
  9 * the Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
 19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22
 23#include <linux/delay.h>
 24
 25#include <trace/events/dma_fence.h>
 26
 27#include "qxl_drv.h"
 28#include "qxl_object.h"
 
 29
 30/*
 31 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
 32 * into 256 byte chunks for now - gives 16 cmds per page.
 33 *
 34 * use an ida to index into the chunks?
 35 */
 36/* manage releaseables */
 37/* stack them 16 high for now -drawable object is 191 */
 38#define RELEASE_SIZE 256
 39#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
 40/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
 41#define SURFACE_RELEASE_SIZE 128
 42#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
 43
 44static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
 45static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
 46
 47static const char *qxl_get_driver_name(struct dma_fence *fence)
 48{
 49	return "qxl";
 50}
 51
 52static const char *qxl_get_timeline_name(struct dma_fence *fence)
 53{
 54	return "release";
 55}
 56
 
 
 
 
 
 
 57static long qxl_fence_wait(struct dma_fence *fence, bool intr,
 58			   signed long timeout)
 59{
 60	struct qxl_device *qdev;
 
 
 
 61	unsigned long cur, end = jiffies + timeout;
 62
 63	qdev = container_of(fence->lock, struct qxl_device, release_lock);
 
 
 64
 65	if (!wait_event_timeout(qdev->release_event,
 66				(dma_fence_is_signaled(fence) ||
 67				 (qxl_io_notify_oom(qdev), 0)),
 68				timeout))
 69		return 0;
 
 
 
 
 
 
 
 
 
 
 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71	cur = jiffies;
 72	if (time_after(cur, end))
 73		return 0;
 74	return end - cur;
 75}
 76
 77static const struct dma_fence_ops qxl_fence_ops = {
 78	.get_driver_name = qxl_get_driver_name,
 79	.get_timeline_name = qxl_get_timeline_name,
 
 80	.wait = qxl_fence_wait,
 81};
 82
 83static int
 84qxl_release_alloc(struct qxl_device *qdev, int type,
 85		  struct qxl_release **ret)
 86{
 87	struct qxl_release *release;
 88	int handle;
 89	size_t size = sizeof(*release);
 90
 91	release = kmalloc(size, GFP_KERNEL);
 92	if (!release) {
 93		DRM_ERROR("Out of memory\n");
 94		return -ENOMEM;
 95	}
 96	release->base.ops = NULL;
 97	release->type = type;
 98	release->release_offset = 0;
 99	release->surface_release_id = 0;
100	INIT_LIST_HEAD(&release->bos);
101
102	idr_preload(GFP_KERNEL);
103	spin_lock(&qdev->release_idr_lock);
104	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
105	release->base.seqno = ++qdev->release_seqno;
106	spin_unlock(&qdev->release_idr_lock);
107	idr_preload_end();
108	if (handle < 0) {
109		kfree(release);
110		*ret = NULL;
111		return handle;
112	}
113	*ret = release;
114	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
115	release->id = handle;
116	return handle;
117}
118
119static void
120qxl_release_free_list(struct qxl_release *release)
121{
122	while (!list_empty(&release->bos)) {
123		struct qxl_bo_list *entry;
124		struct qxl_bo *bo;
125
126		entry = container_of(release->bos.next,
127				     struct qxl_bo_list, tv.head);
128		bo = to_qxl_bo(entry->tv.bo);
129		qxl_bo_unref(&bo);
130		list_del(&entry->tv.head);
131		kfree(entry);
132	}
133	release->release_bo = NULL;
134}
135
136void
137qxl_release_free(struct qxl_device *qdev,
138		 struct qxl_release *release)
139{
140	DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
 
141
142	if (release->surface_release_id)
143		qxl_surface_id_dealloc(qdev, release->surface_release_id);
144
145	spin_lock(&qdev->release_idr_lock);
146	idr_remove(&qdev->release_idr, release->id);
147	spin_unlock(&qdev->release_idr_lock);
148
149	if (release->base.ops) {
150		WARN_ON(list_empty(&release->bos));
151		qxl_release_free_list(release);
152
153		dma_fence_signal(&release->base);
154		dma_fence_put(&release->base);
155	} else {
156		qxl_release_free_list(release);
157		kfree(release);
158	}
159	atomic_dec(&qdev->release_count);
160}
161
162static int qxl_release_bo_alloc(struct qxl_device *qdev,
163				struct qxl_bo **bo,
164				u32 priority)
165{
166	/* pin releases bo's they are too messy to evict */
167	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
168			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
169}
170
171int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
172{
173	struct qxl_bo_list *entry;
174
175	list_for_each_entry(entry, &release->bos, tv.head) {
176		if (entry->tv.bo == &bo->tbo)
177			return 0;
178	}
179
180	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
181	if (!entry)
182		return -ENOMEM;
183
184	qxl_bo_ref(bo);
185	entry->tv.bo = &bo->tbo;
186	entry->tv.num_shared = 0;
187	list_add_tail(&entry->tv.head, &release->bos);
188	return 0;
189}
190
191static int qxl_release_validate_bo(struct qxl_bo *bo)
192{
193	struct ttm_operation_ctx ctx = { true, false };
194	int ret;
195
196	if (!bo->tbo.pin_count) {
197		qxl_ttm_placement_from_domain(bo, bo->type);
198		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 
199		if (ret)
200			return ret;
201	}
202
203	ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
204	if (ret)
205		return ret;
206
207	/* allocate a surface for reserved + validated buffers */
208	ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
209	if (ret)
210		return ret;
211	return 0;
212}
213
214int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
215{
216	int ret;
217	struct qxl_bo_list *entry;
218
219	/* if only one object on the release its the release itself
220	   since these objects are pinned no need to reserve */
221	if (list_is_singular(&release->bos))
222		return 0;
223
224	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
225				     !no_intr, NULL);
226	if (ret)
227		return ret;
228
229	list_for_each_entry(entry, &release->bos, tv.head) {
230		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
231
232		ret = qxl_release_validate_bo(bo);
233		if (ret) {
234			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
235			return ret;
236		}
237	}
238	return 0;
239}
240
241void qxl_release_backoff_reserve_list(struct qxl_release *release)
242{
243	/* if only one object on the release its the release itself
244	   since these objects are pinned no need to reserve */
245	if (list_is_singular(&release->bos))
246		return;
247
248	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
249}
250
 
251int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
252				       enum qxl_surface_cmd_type surface_cmd_type,
253				       struct qxl_release *create_rel,
254				       struct qxl_release **release)
255{
256	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
257		int idr_ret;
 
258		struct qxl_bo *bo;
259		union qxl_release_info *info;
260
261		/* stash the release after the create command */
262		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
263		if (idr_ret < 0)
264			return idr_ret;
265		bo = create_rel->release_bo;
266
267		(*release)->release_bo = bo;
268		(*release)->release_offset = create_rel->release_offset + 64;
269
270		qxl_release_list_add(*release, bo);
271
272		info = qxl_release_map(qdev, *release);
273		info->id = idr_ret;
274		qxl_release_unmap(qdev, *release, info);
275		return 0;
276	}
277
278	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
279					 QXL_RELEASE_SURFACE_CMD, release, NULL);
280}
281
282int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
283				       int type, struct qxl_release **release,
284				       struct qxl_bo **rbo)
285{
286	struct qxl_bo *bo, *free_bo = NULL;
287	int idr_ret;
288	int ret = 0;
289	union qxl_release_info *info;
290	int cur_idx;
291	u32 priority;
292
293	if (type == QXL_RELEASE_DRAWABLE) {
294		cur_idx = 0;
295		priority = 0;
296	} else if (type == QXL_RELEASE_SURFACE_CMD) {
297		cur_idx = 1;
298		priority = 1;
299	} else if (type == QXL_RELEASE_CURSOR_CMD) {
300		cur_idx = 2;
301		priority = 1;
302	}
303	else {
304		DRM_ERROR("got illegal type: %d\n", type);
305		return -EINVAL;
306	}
307
308	idr_ret = qxl_release_alloc(qdev, type, release);
309	if (idr_ret < 0) {
310		if (rbo)
311			*rbo = NULL;
312		return idr_ret;
313	}
314	atomic_inc(&qdev->release_count);
315
316	mutex_lock(&qdev->release_mutex);
317	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
318		free_bo = qdev->current_release_bo[cur_idx];
319		qdev->current_release_bo_offset[cur_idx] = 0;
320		qdev->current_release_bo[cur_idx] = NULL;
321	}
322	if (!qdev->current_release_bo[cur_idx]) {
323		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
324		if (ret) {
325			mutex_unlock(&qdev->release_mutex);
326			if (free_bo) {
327				qxl_bo_unpin(free_bo);
328				qxl_bo_unref(&free_bo);
329			}
330			qxl_release_free(qdev, *release);
331			return ret;
332		}
333	}
334
335	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
336
337	(*release)->release_bo = bo;
338	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
339	qdev->current_release_bo_offset[cur_idx]++;
340
341	if (rbo)
342		*rbo = bo;
343
344	mutex_unlock(&qdev->release_mutex);
345	if (free_bo) {
346		qxl_bo_unpin(free_bo);
347		qxl_bo_unref(&free_bo);
348	}
349
350	ret = qxl_release_list_add(*release, bo);
351	qxl_bo_unref(&bo);
352	if (ret) {
353		qxl_release_free(qdev, *release);
354		return ret;
355	}
356
357	info = qxl_release_map(qdev, *release);
358	info->id = idr_ret;
359	qxl_release_unmap(qdev, *release, info);
360
361	return ret;
362}
363
364struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
365						   uint64_t id)
366{
367	struct qxl_release *release;
368
369	spin_lock(&qdev->release_idr_lock);
370	release = idr_find(&qdev->release_idr, id);
371	spin_unlock(&qdev->release_idr_lock);
372	if (!release) {
373		DRM_ERROR("failed to find id in release_idr\n");
374		return NULL;
375	}
376
377	return release;
378}
379
380union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
381					struct qxl_release *release)
382{
383	void *ptr;
384	union qxl_release_info *info;
385	struct qxl_bo *bo = release->release_bo;
 
386
387	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
388	if (!ptr)
389		return NULL;
390	info = ptr + (release->release_offset & ~PAGE_MASK);
391	return info;
392}
393
394void qxl_release_unmap(struct qxl_device *qdev,
395		       struct qxl_release *release,
396		       union qxl_release_info *info)
397{
398	struct qxl_bo *bo = release->release_bo;
 
399	void *ptr;
400
401	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
402	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
403}
404
405void qxl_release_fence_buffer_objects(struct qxl_release *release)
406{
407	struct ttm_buffer_object *bo;
408	struct ttm_device *bdev;
 
 
 
409	struct ttm_validate_buffer *entry;
410	struct qxl_device *qdev;
411
412	/* if only one object on the release its the release itself
413	   since these objects are pinned no need to reserve */
414	if (list_is_singular(&release->bos) || list_empty(&release->bos))
415		return;
416
417	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
418	bdev = bo->bdev;
419	qdev = container_of(bdev, struct qxl_device, mman.bdev);
420
421	/*
422	 * Since we never really allocated a context and we don't want to conflict,
423	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
424	 */
425	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
426		       release->id | 0xf0000000, release->base.seqno);
427	trace_dma_fence_emit(&release->base);
428
 
 
 
 
 
429	list_for_each_entry(entry, &release->bos, head) {
430		bo = entry->bo;
 
431
432		dma_resv_add_fence(bo->base.resv, &release->base,
433				   DMA_RESV_USAGE_READ);
434		ttm_bo_move_to_lru_tail_unlocked(bo);
435		dma_resv_unlock(bo->base.resv);
436	}
 
437	ww_acquire_fini(&release->ticket);
438}
439
v4.10.11
  1/*
  2 * Copyright 2011 Red Hat, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * on the rights to use, copy, modify, merge, publish, distribute, sub
  8 * license, and/or sell copies of the Software, and to permit persons to whom
  9 * the Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
 19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 21 */
 
 
 
 
 
 22#include "qxl_drv.h"
 23#include "qxl_object.h"
 24#include <trace/events/dma_fence.h>
 25
 26/*
 27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
 28 * into 256 byte chunks for now - gives 16 cmds per page.
 29 *
 30 * use an ida to index into the chunks?
 31 */
 32/* manage releaseables */
 33/* stack them 16 high for now -drawable object is 191 */
 34#define RELEASE_SIZE 256
 35#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
 36/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
 37#define SURFACE_RELEASE_SIZE 128
 38#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
 39
 40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
 41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
 42
 43static const char *qxl_get_driver_name(struct dma_fence *fence)
 44{
 45	return "qxl";
 46}
 47
 48static const char *qxl_get_timeline_name(struct dma_fence *fence)
 49{
 50	return "release";
 51}
 52
 53static bool qxl_nop_signaling(struct dma_fence *fence)
 54{
 55	/* fences are always automatically signaled, so just pretend we did this.. */
 56	return true;
 57}
 58
 59static long qxl_fence_wait(struct dma_fence *fence, bool intr,
 60			   signed long timeout)
 61{
 62	struct qxl_device *qdev;
 63	struct qxl_release *release;
 64	int count = 0, sc = 0;
 65	bool have_drawable_releases;
 66	unsigned long cur, end = jiffies + timeout;
 67
 68	qdev = container_of(fence->lock, struct qxl_device, release_lock);
 69	release = container_of(fence, struct qxl_release, base);
 70	have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
 71
 72retry:
 73	sc++;
 74
 75	if (dma_fence_is_signaled(fence))
 76		goto signaled;
 77
 78	qxl_io_notify_oom(qdev);
 79
 80	for (count = 0; count < 11; count++) {
 81		if (!qxl_queue_garbage_collect(qdev, true))
 82			break;
 83
 84		if (dma_fence_is_signaled(fence))
 85			goto signaled;
 86	}
 87
 88	if (dma_fence_is_signaled(fence))
 89		goto signaled;
 90
 91	if (have_drawable_releases || sc < 4) {
 92		if (sc > 2)
 93			/* back off */
 94			usleep_range(500, 1000);
 95
 96		if (time_after(jiffies, end))
 97			return 0;
 98
 99		if (have_drawable_releases && sc > 300) {
100			DMA_FENCE_WARN(fence, "failed to wait on release %llu "
101				       "after spincount %d\n",
102				       fence->context & ~0xf0000000, sc);
103			goto signaled;
104		}
105		goto retry;
106	}
107	/*
108	 * yeah, original sync_obj_wait gave up after 3 spins when
109	 * have_drawable_releases is not set.
110	 */
111
112signaled:
113	cur = jiffies;
114	if (time_after(cur, end))
115		return 0;
116	return end - cur;
117}
118
119static const struct dma_fence_ops qxl_fence_ops = {
120	.get_driver_name = qxl_get_driver_name,
121	.get_timeline_name = qxl_get_timeline_name,
122	.enable_signaling = qxl_nop_signaling,
123	.wait = qxl_fence_wait,
124};
125
126static int
127qxl_release_alloc(struct qxl_device *qdev, int type,
128		  struct qxl_release **ret)
129{
130	struct qxl_release *release;
131	int handle;
132	size_t size = sizeof(*release);
133
134	release = kmalloc(size, GFP_KERNEL);
135	if (!release) {
136		DRM_ERROR("Out of memory\n");
137		return -ENOMEM;
138	}
139	release->base.ops = NULL;
140	release->type = type;
141	release->release_offset = 0;
142	release->surface_release_id = 0;
143	INIT_LIST_HEAD(&release->bos);
144
145	idr_preload(GFP_KERNEL);
146	spin_lock(&qdev->release_idr_lock);
147	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
148	release->base.seqno = ++qdev->release_seqno;
149	spin_unlock(&qdev->release_idr_lock);
150	idr_preload_end();
151	if (handle < 0) {
152		kfree(release);
153		*ret = NULL;
154		return handle;
155	}
156	*ret = release;
157	QXL_INFO(qdev, "allocated release %d\n", handle);
158	release->id = handle;
159	return handle;
160}
161
162static void
163qxl_release_free_list(struct qxl_release *release)
164{
165	while (!list_empty(&release->bos)) {
166		struct qxl_bo_list *entry;
167		struct qxl_bo *bo;
168
169		entry = container_of(release->bos.next,
170				     struct qxl_bo_list, tv.head);
171		bo = to_qxl_bo(entry->tv.bo);
172		qxl_bo_unref(&bo);
173		list_del(&entry->tv.head);
174		kfree(entry);
175	}
 
176}
177
178void
179qxl_release_free(struct qxl_device *qdev,
180		 struct qxl_release *release)
181{
182	QXL_INFO(qdev, "release %d, type %d\n", release->id,
183		 release->type);
184
185	if (release->surface_release_id)
186		qxl_surface_id_dealloc(qdev, release->surface_release_id);
187
188	spin_lock(&qdev->release_idr_lock);
189	idr_remove(&qdev->release_idr, release->id);
190	spin_unlock(&qdev->release_idr_lock);
191
192	if (release->base.ops) {
193		WARN_ON(list_empty(&release->bos));
194		qxl_release_free_list(release);
195
196		dma_fence_signal(&release->base);
197		dma_fence_put(&release->base);
198	} else {
199		qxl_release_free_list(release);
200		kfree(release);
201	}
 
202}
203
204static int qxl_release_bo_alloc(struct qxl_device *qdev,
205				struct qxl_bo **bo)
 
206{
207	/* pin releases bo's they are too messy to evict */
208	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
209			     QXL_GEM_DOMAIN_VRAM, NULL, bo);
210}
211
212int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
213{
214	struct qxl_bo_list *entry;
215
216	list_for_each_entry(entry, &release->bos, tv.head) {
217		if (entry->tv.bo == &bo->tbo)
218			return 0;
219	}
220
221	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
222	if (!entry)
223		return -ENOMEM;
224
225	qxl_bo_ref(bo);
226	entry->tv.bo = &bo->tbo;
227	entry->tv.shared = false;
228	list_add_tail(&entry->tv.head, &release->bos);
229	return 0;
230}
231
232static int qxl_release_validate_bo(struct qxl_bo *bo)
233{
 
234	int ret;
235
236	if (!bo->pin_count) {
237		qxl_ttm_placement_from_domain(bo, bo->type, false);
238		ret = ttm_bo_validate(&bo->tbo, &bo->placement,
239				      true, false);
240		if (ret)
241			return ret;
242	}
243
244	ret = reservation_object_reserve_shared(bo->tbo.resv);
245	if (ret)
246		return ret;
247
248	/* allocate a surface for reserved + validated buffers */
249	ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
250	if (ret)
251		return ret;
252	return 0;
253}
254
255int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
256{
257	int ret;
258	struct qxl_bo_list *entry;
259
260	/* if only one object on the release its the release itself
261	   since these objects are pinned no need to reserve */
262	if (list_is_singular(&release->bos))
263		return 0;
264
265	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
266				     !no_intr, NULL);
267	if (ret)
268		return ret;
269
270	list_for_each_entry(entry, &release->bos, tv.head) {
271		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
272
273		ret = qxl_release_validate_bo(bo);
274		if (ret) {
275			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
276			return ret;
277		}
278	}
279	return 0;
280}
281
282void qxl_release_backoff_reserve_list(struct qxl_release *release)
283{
284	/* if only one object on the release its the release itself
285	   since these objects are pinned no need to reserve */
286	if (list_is_singular(&release->bos))
287		return;
288
289	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
290}
291
292
293int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
294				       enum qxl_surface_cmd_type surface_cmd_type,
295				       struct qxl_release *create_rel,
296				       struct qxl_release **release)
297{
298	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
299		int idr_ret;
300		struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
301		struct qxl_bo *bo;
302		union qxl_release_info *info;
303
304		/* stash the release after the create command */
305		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
306		if (idr_ret < 0)
307			return idr_ret;
308		bo = to_qxl_bo(entry->tv.bo);
309
 
310		(*release)->release_offset = create_rel->release_offset + 64;
311
312		qxl_release_list_add(*release, bo);
313
314		info = qxl_release_map(qdev, *release);
315		info->id = idr_ret;
316		qxl_release_unmap(qdev, *release, info);
317		return 0;
318	}
319
320	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
321					 QXL_RELEASE_SURFACE_CMD, release, NULL);
322}
323
324int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
325				       int type, struct qxl_release **release,
326				       struct qxl_bo **rbo)
327{
328	struct qxl_bo *bo;
329	int idr_ret;
330	int ret = 0;
331	union qxl_release_info *info;
332	int cur_idx;
 
333
334	if (type == QXL_RELEASE_DRAWABLE)
335		cur_idx = 0;
336	else if (type == QXL_RELEASE_SURFACE_CMD)
 
337		cur_idx = 1;
338	else if (type == QXL_RELEASE_CURSOR_CMD)
 
339		cur_idx = 2;
 
 
340	else {
341		DRM_ERROR("got illegal type: %d\n", type);
342		return -EINVAL;
343	}
344
345	idr_ret = qxl_release_alloc(qdev, type, release);
346	if (idr_ret < 0) {
347		if (rbo)
348			*rbo = NULL;
349		return idr_ret;
350	}
 
351
352	mutex_lock(&qdev->release_mutex);
353	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
354		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
355		qdev->current_release_bo_offset[cur_idx] = 0;
356		qdev->current_release_bo[cur_idx] = NULL;
357	}
358	if (!qdev->current_release_bo[cur_idx]) {
359		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
360		if (ret) {
361			mutex_unlock(&qdev->release_mutex);
 
 
 
 
362			qxl_release_free(qdev, *release);
363			return ret;
364		}
365	}
366
367	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
368
 
369	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
370	qdev->current_release_bo_offset[cur_idx]++;
371
372	if (rbo)
373		*rbo = bo;
374
375	mutex_unlock(&qdev->release_mutex);
 
 
 
 
376
377	ret = qxl_release_list_add(*release, bo);
378	qxl_bo_unref(&bo);
379	if (ret) {
380		qxl_release_free(qdev, *release);
381		return ret;
382	}
383
384	info = qxl_release_map(qdev, *release);
385	info->id = idr_ret;
386	qxl_release_unmap(qdev, *release, info);
387
388	return ret;
389}
390
391struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
392						   uint64_t id)
393{
394	struct qxl_release *release;
395
396	spin_lock(&qdev->release_idr_lock);
397	release = idr_find(&qdev->release_idr, id);
398	spin_unlock(&qdev->release_idr_lock);
399	if (!release) {
400		DRM_ERROR("failed to find id in release_idr\n");
401		return NULL;
402	}
403
404	return release;
405}
406
407union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
408					struct qxl_release *release)
409{
410	void *ptr;
411	union qxl_release_info *info;
412	struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
413	struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
414
415	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
416	if (!ptr)
417		return NULL;
418	info = ptr + (release->release_offset & ~PAGE_SIZE);
419	return info;
420}
421
422void qxl_release_unmap(struct qxl_device *qdev,
423		       struct qxl_release *release,
424		       union qxl_release_info *info)
425{
426	struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
427	struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
428	void *ptr;
429
430	ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
431	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
432}
433
434void qxl_release_fence_buffer_objects(struct qxl_release *release)
435{
436	struct ttm_buffer_object *bo;
437	struct ttm_bo_global *glob;
438	struct ttm_bo_device *bdev;
439	struct ttm_bo_driver *driver;
440	struct qxl_bo *qbo;
441	struct ttm_validate_buffer *entry;
442	struct qxl_device *qdev;
443
444	/* if only one object on the release its the release itself
445	   since these objects are pinned no need to reserve */
446	if (list_is_singular(&release->bos) || list_empty(&release->bos))
447		return;
448
449	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
450	bdev = bo->bdev;
451	qdev = container_of(bdev, struct qxl_device, mman.bdev);
452
453	/*
454	 * Since we never really allocated a context and we don't want to conflict,
455	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
456	 */
457	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
458		       release->id | 0xf0000000, release->base.seqno);
459	trace_dma_fence_emit(&release->base);
460
461	driver = bdev->driver;
462	glob = bo->glob;
463
464	spin_lock(&glob->lru_lock);
465
466	list_for_each_entry(entry, &release->bos, head) {
467		bo = entry->bo;
468		qbo = to_qxl_bo(bo);
469
470		reservation_object_add_shared_fence(bo->resv, &release->base);
471		ttm_bo_add_to_lru(bo);
472		__ttm_bo_unreserve(bo);
 
473	}
474	spin_unlock(&glob->lru_lock);
475	ww_acquire_fini(&release->ticket);
476}
477