Loading...
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "qxl_drv.h"
23#include "qxl_object.h"
24#include <trace/events/fence.h>
25
26/*
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
29 *
30 * use an ida to index into the chunks?
31 */
32/* manage releaseables */
33/* stack them 16 high for now -drawable object is 191 */
34#define RELEASE_SIZE 256
35#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37#define SURFACE_RELEASE_SIZE 128
38#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
39
40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
42
43static const char *qxl_get_driver_name(struct fence *fence)
44{
45 return "qxl";
46}
47
48static const char *qxl_get_timeline_name(struct fence *fence)
49{
50 return "release";
51}
52
53static bool qxl_nop_signaling(struct fence *fence)
54{
55 /* fences are always automatically signaled, so just pretend we did this.. */
56 return true;
57}
58
59static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
60{
61 struct qxl_device *qdev;
62 struct qxl_release *release;
63 int count = 0, sc = 0;
64 bool have_drawable_releases;
65 unsigned long cur, end = jiffies + timeout;
66
67 qdev = container_of(fence->lock, struct qxl_device, release_lock);
68 release = container_of(fence, struct qxl_release, base);
69 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
70
71retry:
72 sc++;
73
74 if (fence_is_signaled(fence))
75 goto signaled;
76
77 qxl_io_notify_oom(qdev);
78
79 for (count = 0; count < 11; count++) {
80 if (!qxl_queue_garbage_collect(qdev, true))
81 break;
82
83 if (fence_is_signaled(fence))
84 goto signaled;
85 }
86
87 if (fence_is_signaled(fence))
88 goto signaled;
89
90 if (have_drawable_releases || sc < 4) {
91 if (sc > 2)
92 /* back off */
93 usleep_range(500, 1000);
94
95 if (time_after(jiffies, end))
96 return 0;
97
98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d "
100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc);
102 goto signaled;
103 }
104 goto retry;
105 }
106 /*
107 * yeah, original sync_obj_wait gave up after 3 spins when
108 * have_drawable_releases is not set.
109 */
110
111signaled:
112 cur = jiffies;
113 if (time_after(cur, end))
114 return 0;
115 return end - cur;
116}
117
118static const struct fence_ops qxl_fence_ops = {
119 .get_driver_name = qxl_get_driver_name,
120 .get_timeline_name = qxl_get_timeline_name,
121 .enable_signaling = qxl_nop_signaling,
122 .wait = qxl_fence_wait,
123};
124
125static int
126qxl_release_alloc(struct qxl_device *qdev, int type,
127 struct qxl_release **ret)
128{
129 struct qxl_release *release;
130 int handle;
131 size_t size = sizeof(*release);
132
133 release = kmalloc(size, GFP_KERNEL);
134 if (!release) {
135 DRM_ERROR("Out of memory\n");
136 return 0;
137 }
138 release->base.ops = NULL;
139 release->type = type;
140 release->release_offset = 0;
141 release->surface_release_id = 0;
142 INIT_LIST_HEAD(&release->bos);
143
144 idr_preload(GFP_KERNEL);
145 spin_lock(&qdev->release_idr_lock);
146 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
147 release->base.seqno = ++qdev->release_seqno;
148 spin_unlock(&qdev->release_idr_lock);
149 idr_preload_end();
150 if (handle < 0) {
151 kfree(release);
152 *ret = NULL;
153 return handle;
154 }
155 *ret = release;
156 QXL_INFO(qdev, "allocated release %d\n", handle);
157 release->id = handle;
158 return handle;
159}
160
161static void
162qxl_release_free_list(struct qxl_release *release)
163{
164 while (!list_empty(&release->bos)) {
165 struct qxl_bo_list *entry;
166 struct qxl_bo *bo;
167
168 entry = container_of(release->bos.next,
169 struct qxl_bo_list, tv.head);
170 bo = to_qxl_bo(entry->tv.bo);
171 qxl_bo_unref(&bo);
172 list_del(&entry->tv.head);
173 kfree(entry);
174 }
175}
176
177void
178qxl_release_free(struct qxl_device *qdev,
179 struct qxl_release *release)
180{
181 QXL_INFO(qdev, "release %d, type %d\n", release->id,
182 release->type);
183
184 if (release->surface_release_id)
185 qxl_surface_id_dealloc(qdev, release->surface_release_id);
186
187 spin_lock(&qdev->release_idr_lock);
188 idr_remove(&qdev->release_idr, release->id);
189 spin_unlock(&qdev->release_idr_lock);
190
191 if (release->base.ops) {
192 WARN_ON(list_empty(&release->bos));
193 qxl_release_free_list(release);
194
195 fence_signal(&release->base);
196 fence_put(&release->base);
197 } else {
198 qxl_release_free_list(release);
199 kfree(release);
200 }
201}
202
203static int qxl_release_bo_alloc(struct qxl_device *qdev,
204 struct qxl_bo **bo)
205{
206 int ret;
207 /* pin releases bo's they are too messy to evict */
208 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
209 QXL_GEM_DOMAIN_VRAM, NULL,
210 bo);
211 return ret;
212}
213
214int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
215{
216 struct qxl_bo_list *entry;
217
218 list_for_each_entry(entry, &release->bos, tv.head) {
219 if (entry->tv.bo == &bo->tbo)
220 return 0;
221 }
222
223 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
224 if (!entry)
225 return -ENOMEM;
226
227 qxl_bo_ref(bo);
228 entry->tv.bo = &bo->tbo;
229 entry->tv.shared = false;
230 list_add_tail(&entry->tv.head, &release->bos);
231 return 0;
232}
233
234static int qxl_release_validate_bo(struct qxl_bo *bo)
235{
236 int ret;
237
238 if (!bo->pin_count) {
239 qxl_ttm_placement_from_domain(bo, bo->type, false);
240 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
241 true, false);
242 if (ret)
243 return ret;
244 }
245
246 ret = reservation_object_reserve_shared(bo->tbo.resv);
247 if (ret)
248 return ret;
249
250 /* allocate a surface for reserved + validated buffers */
251 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
252 if (ret)
253 return ret;
254 return 0;
255}
256
257int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
258{
259 int ret;
260 struct qxl_bo_list *entry;
261
262 /* if only one object on the release its the release itself
263 since these objects are pinned no need to reserve */
264 if (list_is_singular(&release->bos))
265 return 0;
266
267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
268 !no_intr, NULL);
269 if (ret)
270 return ret;
271
272 list_for_each_entry(entry, &release->bos, tv.head) {
273 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
274
275 ret = qxl_release_validate_bo(bo);
276 if (ret) {
277 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
278 return ret;
279 }
280 }
281 return 0;
282}
283
284void qxl_release_backoff_reserve_list(struct qxl_release *release)
285{
286 /* if only one object on the release its the release itself
287 since these objects are pinned no need to reserve */
288 if (list_is_singular(&release->bos))
289 return;
290
291 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
292}
293
294
295int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
296 enum qxl_surface_cmd_type surface_cmd_type,
297 struct qxl_release *create_rel,
298 struct qxl_release **release)
299{
300 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
301 int idr_ret;
302 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
303 struct qxl_bo *bo;
304 union qxl_release_info *info;
305
306 /* stash the release after the create command */
307 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
308 if (idr_ret < 0)
309 return idr_ret;
310 bo = to_qxl_bo(entry->tv.bo);
311
312 (*release)->release_offset = create_rel->release_offset + 64;
313
314 qxl_release_list_add(*release, bo);
315
316 info = qxl_release_map(qdev, *release);
317 info->id = idr_ret;
318 qxl_release_unmap(qdev, *release, info);
319 return 0;
320 }
321
322 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
323 QXL_RELEASE_SURFACE_CMD, release, NULL);
324}
325
326int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
327 int type, struct qxl_release **release,
328 struct qxl_bo **rbo)
329{
330 struct qxl_bo *bo;
331 int idr_ret;
332 int ret = 0;
333 union qxl_release_info *info;
334 int cur_idx;
335
336 if (type == QXL_RELEASE_DRAWABLE)
337 cur_idx = 0;
338 else if (type == QXL_RELEASE_SURFACE_CMD)
339 cur_idx = 1;
340 else if (type == QXL_RELEASE_CURSOR_CMD)
341 cur_idx = 2;
342 else {
343 DRM_ERROR("got illegal type: %d\n", type);
344 return -EINVAL;
345 }
346
347 idr_ret = qxl_release_alloc(qdev, type, release);
348 if (idr_ret < 0) {
349 if (rbo)
350 *rbo = NULL;
351 return idr_ret;
352 }
353
354 mutex_lock(&qdev->release_mutex);
355 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
356 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
357 qdev->current_release_bo_offset[cur_idx] = 0;
358 qdev->current_release_bo[cur_idx] = NULL;
359 }
360 if (!qdev->current_release_bo[cur_idx]) {
361 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
362 if (ret) {
363 mutex_unlock(&qdev->release_mutex);
364 qxl_release_free(qdev, *release);
365 return ret;
366 }
367 }
368
369 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
370
371 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
372 qdev->current_release_bo_offset[cur_idx]++;
373
374 if (rbo)
375 *rbo = bo;
376
377 mutex_unlock(&qdev->release_mutex);
378
379 ret = qxl_release_list_add(*release, bo);
380 qxl_bo_unref(&bo);
381 if (ret) {
382 qxl_release_free(qdev, *release);
383 return ret;
384 }
385
386 info = qxl_release_map(qdev, *release);
387 info->id = idr_ret;
388 qxl_release_unmap(qdev, *release, info);
389
390 return ret;
391}
392
393struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
394 uint64_t id)
395{
396 struct qxl_release *release;
397
398 spin_lock(&qdev->release_idr_lock);
399 release = idr_find(&qdev->release_idr, id);
400 spin_unlock(&qdev->release_idr_lock);
401 if (!release) {
402 DRM_ERROR("failed to find id in release_idr\n");
403 return NULL;
404 }
405
406 return release;
407}
408
409union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
410 struct qxl_release *release)
411{
412 void *ptr;
413 union qxl_release_info *info;
414 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
415 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
416
417 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
418 if (!ptr)
419 return NULL;
420 info = ptr + (release->release_offset & ~PAGE_SIZE);
421 return info;
422}
423
424void qxl_release_unmap(struct qxl_device *qdev,
425 struct qxl_release *release,
426 union qxl_release_info *info)
427{
428 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
429 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
430 void *ptr;
431
432 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
433 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
434}
435
436void qxl_release_fence_buffer_objects(struct qxl_release *release)
437{
438 struct ttm_buffer_object *bo;
439 struct ttm_bo_global *glob;
440 struct ttm_bo_device *bdev;
441 struct ttm_bo_driver *driver;
442 struct qxl_bo *qbo;
443 struct ttm_validate_buffer *entry;
444 struct qxl_device *qdev;
445
446 /* if only one object on the release its the release itself
447 since these objects are pinned no need to reserve */
448 if (list_is_singular(&release->bos) || list_empty(&release->bos))
449 return;
450
451 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
452 bdev = bo->bdev;
453 qdev = container_of(bdev, struct qxl_device, mman.bdev);
454
455 /*
456 * Since we never really allocated a context and we don't want to conflict,
457 * set the highest bits. This will break if we really allow exporting of dma-bufs.
458 */
459 fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
460 release->id | 0xf0000000, release->base.seqno);
461 trace_fence_emit(&release->base);
462
463 driver = bdev->driver;
464 glob = bo->glob;
465
466 spin_lock(&glob->lru_lock);
467
468 list_for_each_entry(entry, &release->bos, head) {
469 bo = entry->bo;
470 qbo = to_qxl_bo(bo);
471
472 reservation_object_add_shared_fence(bo->resv, &release->base);
473 ttm_bo_add_to_lru(bo);
474 __ttm_bo_unreserve(bo);
475 }
476 spin_unlock(&glob->lru_lock);
477 ww_acquire_fini(&release->ticket);
478}
479
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/delay.h>
24
25#include <trace/events/dma_fence.h>
26
27#include "qxl_drv.h"
28#include "qxl_object.h"
29
30/*
31 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
32 * into 256 byte chunks for now - gives 16 cmds per page.
33 *
34 * use an ida to index into the chunks?
35 */
36/* manage releaseables */
37/* stack them 16 high for now -drawable object is 191 */
38#define RELEASE_SIZE 256
39#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
40/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
41#define SURFACE_RELEASE_SIZE 128
42#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
43
44static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
45static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
46
47static const char *qxl_get_driver_name(struct dma_fence *fence)
48{
49 return "qxl";
50}
51
52static const char *qxl_get_timeline_name(struct dma_fence *fence)
53{
54 return "release";
55}
56
57static long qxl_fence_wait(struct dma_fence *fence, bool intr,
58 signed long timeout)
59{
60 struct qxl_device *qdev;
61 unsigned long cur, end = jiffies + timeout;
62
63 qdev = container_of(fence->lock, struct qxl_device, release_lock);
64
65 if (!wait_event_timeout(qdev->release_event,
66 (dma_fence_is_signaled(fence) ||
67 (qxl_io_notify_oom(qdev), 0)),
68 timeout))
69 return 0;
70
71 cur = jiffies;
72 if (time_after(cur, end))
73 return 0;
74 return end - cur;
75}
76
77static const struct dma_fence_ops qxl_fence_ops = {
78 .get_driver_name = qxl_get_driver_name,
79 .get_timeline_name = qxl_get_timeline_name,
80 .wait = qxl_fence_wait,
81};
82
83static int
84qxl_release_alloc(struct qxl_device *qdev, int type,
85 struct qxl_release **ret)
86{
87 struct qxl_release *release;
88 int handle;
89 size_t size = sizeof(*release);
90
91 release = kmalloc(size, GFP_KERNEL);
92 if (!release) {
93 DRM_ERROR("Out of memory\n");
94 return -ENOMEM;
95 }
96 release->base.ops = NULL;
97 release->type = type;
98 release->release_offset = 0;
99 release->surface_release_id = 0;
100 INIT_LIST_HEAD(&release->bos);
101
102 idr_preload(GFP_KERNEL);
103 spin_lock(&qdev->release_idr_lock);
104 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
105 release->base.seqno = ++qdev->release_seqno;
106 spin_unlock(&qdev->release_idr_lock);
107 idr_preload_end();
108 if (handle < 0) {
109 kfree(release);
110 *ret = NULL;
111 return handle;
112 }
113 *ret = release;
114 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
115 release->id = handle;
116 return handle;
117}
118
119static void
120qxl_release_free_list(struct qxl_release *release)
121{
122 while (!list_empty(&release->bos)) {
123 struct qxl_bo_list *entry;
124 struct qxl_bo *bo;
125
126 entry = container_of(release->bos.next,
127 struct qxl_bo_list, tv.head);
128 bo = to_qxl_bo(entry->tv.bo);
129 qxl_bo_unref(&bo);
130 list_del(&entry->tv.head);
131 kfree(entry);
132 }
133 release->release_bo = NULL;
134}
135
136void
137qxl_release_free(struct qxl_device *qdev,
138 struct qxl_release *release)
139{
140 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
141
142 if (release->surface_release_id)
143 qxl_surface_id_dealloc(qdev, release->surface_release_id);
144
145 spin_lock(&qdev->release_idr_lock);
146 idr_remove(&qdev->release_idr, release->id);
147 spin_unlock(&qdev->release_idr_lock);
148
149 if (release->base.ops) {
150 WARN_ON(list_empty(&release->bos));
151 qxl_release_free_list(release);
152
153 dma_fence_signal(&release->base);
154 dma_fence_put(&release->base);
155 } else {
156 qxl_release_free_list(release);
157 kfree(release);
158 }
159 atomic_dec(&qdev->release_count);
160}
161
162static int qxl_release_bo_alloc(struct qxl_device *qdev,
163 struct qxl_bo **bo,
164 u32 priority)
165{
166 /* pin releases bo's they are too messy to evict */
167 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
168 QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
169}
170
171int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
172{
173 struct qxl_bo_list *entry;
174
175 list_for_each_entry(entry, &release->bos, tv.head) {
176 if (entry->tv.bo == &bo->tbo)
177 return 0;
178 }
179
180 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
181 if (!entry)
182 return -ENOMEM;
183
184 qxl_bo_ref(bo);
185 entry->tv.bo = &bo->tbo;
186 entry->tv.num_shared = 0;
187 list_add_tail(&entry->tv.head, &release->bos);
188 return 0;
189}
190
191static int qxl_release_validate_bo(struct qxl_bo *bo)
192{
193 struct ttm_operation_ctx ctx = { true, false };
194 int ret;
195
196 if (!bo->tbo.pin_count) {
197 qxl_ttm_placement_from_domain(bo, bo->type);
198 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
199 if (ret)
200 return ret;
201 }
202
203 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
204 if (ret)
205 return ret;
206
207 /* allocate a surface for reserved + validated buffers */
208 ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
209 if (ret)
210 return ret;
211 return 0;
212}
213
214int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
215{
216 int ret;
217 struct qxl_bo_list *entry;
218
219 /* if only one object on the release its the release itself
220 since these objects are pinned no need to reserve */
221 if (list_is_singular(&release->bos))
222 return 0;
223
224 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
225 !no_intr, NULL);
226 if (ret)
227 return ret;
228
229 list_for_each_entry(entry, &release->bos, tv.head) {
230 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
231
232 ret = qxl_release_validate_bo(bo);
233 if (ret) {
234 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
235 return ret;
236 }
237 }
238 return 0;
239}
240
241void qxl_release_backoff_reserve_list(struct qxl_release *release)
242{
243 /* if only one object on the release its the release itself
244 since these objects are pinned no need to reserve */
245 if (list_is_singular(&release->bos))
246 return;
247
248 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
249}
250
251int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
252 enum qxl_surface_cmd_type surface_cmd_type,
253 struct qxl_release *create_rel,
254 struct qxl_release **release)
255{
256 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
257 int idr_ret;
258 struct qxl_bo *bo;
259 union qxl_release_info *info;
260
261 /* stash the release after the create command */
262 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
263 if (idr_ret < 0)
264 return idr_ret;
265 bo = create_rel->release_bo;
266
267 (*release)->release_bo = bo;
268 (*release)->release_offset = create_rel->release_offset + 64;
269
270 qxl_release_list_add(*release, bo);
271
272 info = qxl_release_map(qdev, *release);
273 info->id = idr_ret;
274 qxl_release_unmap(qdev, *release, info);
275 return 0;
276 }
277
278 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
279 QXL_RELEASE_SURFACE_CMD, release, NULL);
280}
281
282int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
283 int type, struct qxl_release **release,
284 struct qxl_bo **rbo)
285{
286 struct qxl_bo *bo, *free_bo = NULL;
287 int idr_ret;
288 int ret = 0;
289 union qxl_release_info *info;
290 int cur_idx;
291 u32 priority;
292
293 if (type == QXL_RELEASE_DRAWABLE) {
294 cur_idx = 0;
295 priority = 0;
296 } else if (type == QXL_RELEASE_SURFACE_CMD) {
297 cur_idx = 1;
298 priority = 1;
299 } else if (type == QXL_RELEASE_CURSOR_CMD) {
300 cur_idx = 2;
301 priority = 1;
302 }
303 else {
304 DRM_ERROR("got illegal type: %d\n", type);
305 return -EINVAL;
306 }
307
308 idr_ret = qxl_release_alloc(qdev, type, release);
309 if (idr_ret < 0) {
310 if (rbo)
311 *rbo = NULL;
312 return idr_ret;
313 }
314 atomic_inc(&qdev->release_count);
315
316 mutex_lock(&qdev->release_mutex);
317 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
318 free_bo = qdev->current_release_bo[cur_idx];
319 qdev->current_release_bo_offset[cur_idx] = 0;
320 qdev->current_release_bo[cur_idx] = NULL;
321 }
322 if (!qdev->current_release_bo[cur_idx]) {
323 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
324 if (ret) {
325 mutex_unlock(&qdev->release_mutex);
326 if (free_bo) {
327 qxl_bo_unpin(free_bo);
328 qxl_bo_unref(&free_bo);
329 }
330 qxl_release_free(qdev, *release);
331 return ret;
332 }
333 }
334
335 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
336
337 (*release)->release_bo = bo;
338 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
339 qdev->current_release_bo_offset[cur_idx]++;
340
341 if (rbo)
342 *rbo = bo;
343
344 mutex_unlock(&qdev->release_mutex);
345 if (free_bo) {
346 qxl_bo_unpin(free_bo);
347 qxl_bo_unref(&free_bo);
348 }
349
350 ret = qxl_release_list_add(*release, bo);
351 qxl_bo_unref(&bo);
352 if (ret) {
353 qxl_release_free(qdev, *release);
354 return ret;
355 }
356
357 info = qxl_release_map(qdev, *release);
358 info->id = idr_ret;
359 qxl_release_unmap(qdev, *release, info);
360
361 return ret;
362}
363
364struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
365 uint64_t id)
366{
367 struct qxl_release *release;
368
369 spin_lock(&qdev->release_idr_lock);
370 release = idr_find(&qdev->release_idr, id);
371 spin_unlock(&qdev->release_idr_lock);
372 if (!release) {
373 DRM_ERROR("failed to find id in release_idr\n");
374 return NULL;
375 }
376
377 return release;
378}
379
380union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
381 struct qxl_release *release)
382{
383 void *ptr;
384 union qxl_release_info *info;
385 struct qxl_bo *bo = release->release_bo;
386
387 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
388 if (!ptr)
389 return NULL;
390 info = ptr + (release->release_offset & ~PAGE_MASK);
391 return info;
392}
393
394void qxl_release_unmap(struct qxl_device *qdev,
395 struct qxl_release *release,
396 union qxl_release_info *info)
397{
398 struct qxl_bo *bo = release->release_bo;
399 void *ptr;
400
401 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
402 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
403}
404
405void qxl_release_fence_buffer_objects(struct qxl_release *release)
406{
407 struct ttm_buffer_object *bo;
408 struct ttm_device *bdev;
409 struct ttm_validate_buffer *entry;
410 struct qxl_device *qdev;
411
412 /* if only one object on the release its the release itself
413 since these objects are pinned no need to reserve */
414 if (list_is_singular(&release->bos) || list_empty(&release->bos))
415 return;
416
417 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
418 bdev = bo->bdev;
419 qdev = container_of(bdev, struct qxl_device, mman.bdev);
420
421 /*
422 * Since we never really allocated a context and we don't want to conflict,
423 * set the highest bits. This will break if we really allow exporting of dma-bufs.
424 */
425 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
426 release->id | 0xf0000000, release->base.seqno);
427 trace_dma_fence_emit(&release->base);
428
429 list_for_each_entry(entry, &release->bos, head) {
430 bo = entry->bo;
431
432 dma_resv_add_fence(bo->base.resv, &release->base,
433 DMA_RESV_USAGE_READ);
434 ttm_bo_move_to_lru_tail_unlocked(bo);
435 dma_resv_unlock(bo->base.resv);
436 }
437 ww_acquire_fini(&release->ticket);
438}
439