Loading...
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/delay.h>
24
25#include <trace/events/dma_fence.h>
26
27#include "qxl_drv.h"
28#include "qxl_object.h"
29
30/*
31 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
32 * into 256 byte chunks for now - gives 16 cmds per page.
33 *
34 * use an ida to index into the chunks?
35 */
36/* manage releaseables */
37/* stack them 16 high for now -drawable object is 191 */
38#define RELEASE_SIZE 256
39#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
40/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
41#define SURFACE_RELEASE_SIZE 128
42#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
43
44static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
45static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
46
47static const char *qxl_get_driver_name(struct dma_fence *fence)
48{
49 return "qxl";
50}
51
52static const char *qxl_get_timeline_name(struct dma_fence *fence)
53{
54 return "release";
55}
56
57static long qxl_fence_wait(struct dma_fence *fence, bool intr,
58 signed long timeout)
59{
60 struct qxl_device *qdev;
61 struct qxl_release *release;
62 int count = 0, sc = 0;
63 bool have_drawable_releases;
64 unsigned long cur, end = jiffies + timeout;
65
66 qdev = container_of(fence->lock, struct qxl_device, release_lock);
67 release = container_of(fence, struct qxl_release, base);
68 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
69
70retry:
71 sc++;
72
73 if (dma_fence_is_signaled(fence))
74 goto signaled;
75
76 qxl_io_notify_oom(qdev);
77
78 for (count = 0; count < 11; count++) {
79 if (!qxl_queue_garbage_collect(qdev, true))
80 break;
81
82 if (dma_fence_is_signaled(fence))
83 goto signaled;
84 }
85
86 if (dma_fence_is_signaled(fence))
87 goto signaled;
88
89 if (have_drawable_releases || sc < 4) {
90 if (sc > 2)
91 /* back off */
92 usleep_range(500, 1000);
93
94 if (time_after(jiffies, end))
95 return 0;
96
97 if (have_drawable_releases && sc > 300) {
98 DMA_FENCE_WARN(fence, "failed to wait on release %llu "
99 "after spincount %d\n",
100 fence->context & ~0xf0000000, sc);
101 goto signaled;
102 }
103 goto retry;
104 }
105 /*
106 * yeah, original sync_obj_wait gave up after 3 spins when
107 * have_drawable_releases is not set.
108 */
109
110signaled:
111 cur = jiffies;
112 if (time_after(cur, end))
113 return 0;
114 return end - cur;
115}
116
117static const struct dma_fence_ops qxl_fence_ops = {
118 .get_driver_name = qxl_get_driver_name,
119 .get_timeline_name = qxl_get_timeline_name,
120 .wait = qxl_fence_wait,
121};
122
123static int
124qxl_release_alloc(struct qxl_device *qdev, int type,
125 struct qxl_release **ret)
126{
127 struct qxl_release *release;
128 int handle;
129 size_t size = sizeof(*release);
130
131 release = kmalloc(size, GFP_KERNEL);
132 if (!release) {
133 DRM_ERROR("Out of memory\n");
134 return -ENOMEM;
135 }
136 release->base.ops = NULL;
137 release->type = type;
138 release->release_offset = 0;
139 release->surface_release_id = 0;
140 INIT_LIST_HEAD(&release->bos);
141
142 idr_preload(GFP_KERNEL);
143 spin_lock(&qdev->release_idr_lock);
144 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
145 release->base.seqno = ++qdev->release_seqno;
146 spin_unlock(&qdev->release_idr_lock);
147 idr_preload_end();
148 if (handle < 0) {
149 kfree(release);
150 *ret = NULL;
151 return handle;
152 }
153 *ret = release;
154 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
155 release->id = handle;
156 return handle;
157}
158
159static void
160qxl_release_free_list(struct qxl_release *release)
161{
162 while (!list_empty(&release->bos)) {
163 struct qxl_bo_list *entry;
164 struct qxl_bo *bo;
165
166 entry = container_of(release->bos.next,
167 struct qxl_bo_list, tv.head);
168 bo = to_qxl_bo(entry->tv.bo);
169 qxl_bo_unref(&bo);
170 list_del(&entry->tv.head);
171 kfree(entry);
172 }
173 release->release_bo = NULL;
174}
175
176void
177qxl_release_free(struct qxl_device *qdev,
178 struct qxl_release *release)
179{
180 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
181
182 if (release->surface_release_id)
183 qxl_surface_id_dealloc(qdev, release->surface_release_id);
184
185 spin_lock(&qdev->release_idr_lock);
186 idr_remove(&qdev->release_idr, release->id);
187 spin_unlock(&qdev->release_idr_lock);
188
189 if (release->base.ops) {
190 WARN_ON(list_empty(&release->bos));
191 qxl_release_free_list(release);
192
193 dma_fence_signal(&release->base);
194 dma_fence_put(&release->base);
195 } else {
196 qxl_release_free_list(release);
197 kfree(release);
198 }
199}
200
201static int qxl_release_bo_alloc(struct qxl_device *qdev,
202 struct qxl_bo **bo)
203{
204 /* pin releases bo's they are too messy to evict */
205 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
206 QXL_GEM_DOMAIN_VRAM, NULL, bo);
207}
208
209int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
210{
211 struct qxl_bo_list *entry;
212
213 list_for_each_entry(entry, &release->bos, tv.head) {
214 if (entry->tv.bo == &bo->tbo)
215 return 0;
216 }
217
218 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
219 if (!entry)
220 return -ENOMEM;
221
222 qxl_bo_ref(bo);
223 entry->tv.bo = &bo->tbo;
224 entry->tv.num_shared = 0;
225 list_add_tail(&entry->tv.head, &release->bos);
226 return 0;
227}
228
229static int qxl_release_validate_bo(struct qxl_bo *bo)
230{
231 struct ttm_operation_ctx ctx = { true, false };
232 int ret;
233
234 if (!bo->pin_count) {
235 qxl_ttm_placement_from_domain(bo, bo->type, false);
236 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
237 if (ret)
238 return ret;
239 }
240
241 ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
242 if (ret)
243 return ret;
244
245 /* allocate a surface for reserved + validated buffers */
246 ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
247 if (ret)
248 return ret;
249 return 0;
250}
251
252int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
253{
254 int ret;
255 struct qxl_bo_list *entry;
256
257 /* if only one object on the release its the release itself
258 since these objects are pinned no need to reserve */
259 if (list_is_singular(&release->bos))
260 return 0;
261
262 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
263 !no_intr, NULL, true);
264 if (ret)
265 return ret;
266
267 list_for_each_entry(entry, &release->bos, tv.head) {
268 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
269
270 ret = qxl_release_validate_bo(bo);
271 if (ret) {
272 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
273 return ret;
274 }
275 }
276 return 0;
277}
278
279void qxl_release_backoff_reserve_list(struct qxl_release *release)
280{
281 /* if only one object on the release its the release itself
282 since these objects are pinned no need to reserve */
283 if (list_is_singular(&release->bos))
284 return;
285
286 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
287}
288
289int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
290 enum qxl_surface_cmd_type surface_cmd_type,
291 struct qxl_release *create_rel,
292 struct qxl_release **release)
293{
294 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
295 int idr_ret;
296 struct qxl_bo *bo;
297 union qxl_release_info *info;
298
299 /* stash the release after the create command */
300 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
301 if (idr_ret < 0)
302 return idr_ret;
303 bo = create_rel->release_bo;
304
305 (*release)->release_bo = bo;
306 (*release)->release_offset = create_rel->release_offset + 64;
307
308 qxl_release_list_add(*release, bo);
309
310 info = qxl_release_map(qdev, *release);
311 info->id = idr_ret;
312 qxl_release_unmap(qdev, *release, info);
313 return 0;
314 }
315
316 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
317 QXL_RELEASE_SURFACE_CMD, release, NULL);
318}
319
320int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
321 int type, struct qxl_release **release,
322 struct qxl_bo **rbo)
323{
324 struct qxl_bo *bo;
325 int idr_ret;
326 int ret = 0;
327 union qxl_release_info *info;
328 int cur_idx;
329
330 if (type == QXL_RELEASE_DRAWABLE)
331 cur_idx = 0;
332 else if (type == QXL_RELEASE_SURFACE_CMD)
333 cur_idx = 1;
334 else if (type == QXL_RELEASE_CURSOR_CMD)
335 cur_idx = 2;
336 else {
337 DRM_ERROR("got illegal type: %d\n", type);
338 return -EINVAL;
339 }
340
341 idr_ret = qxl_release_alloc(qdev, type, release);
342 if (idr_ret < 0) {
343 if (rbo)
344 *rbo = NULL;
345 return idr_ret;
346 }
347
348 mutex_lock(&qdev->release_mutex);
349 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
350 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
351 qdev->current_release_bo_offset[cur_idx] = 0;
352 qdev->current_release_bo[cur_idx] = NULL;
353 }
354 if (!qdev->current_release_bo[cur_idx]) {
355 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
356 if (ret) {
357 mutex_unlock(&qdev->release_mutex);
358 qxl_release_free(qdev, *release);
359 return ret;
360 }
361 }
362
363 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
364
365 (*release)->release_bo = bo;
366 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
367 qdev->current_release_bo_offset[cur_idx]++;
368
369 if (rbo)
370 *rbo = bo;
371
372 mutex_unlock(&qdev->release_mutex);
373
374 ret = qxl_release_list_add(*release, bo);
375 qxl_bo_unref(&bo);
376 if (ret) {
377 qxl_release_free(qdev, *release);
378 return ret;
379 }
380
381 info = qxl_release_map(qdev, *release);
382 info->id = idr_ret;
383 qxl_release_unmap(qdev, *release, info);
384
385 return ret;
386}
387
388struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
389 uint64_t id)
390{
391 struct qxl_release *release;
392
393 spin_lock(&qdev->release_idr_lock);
394 release = idr_find(&qdev->release_idr, id);
395 spin_unlock(&qdev->release_idr_lock);
396 if (!release) {
397 DRM_ERROR("failed to find id in release_idr\n");
398 return NULL;
399 }
400
401 return release;
402}
403
404union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
405 struct qxl_release *release)
406{
407 void *ptr;
408 union qxl_release_info *info;
409 struct qxl_bo *bo = release->release_bo;
410
411 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
412 if (!ptr)
413 return NULL;
414 info = ptr + (release->release_offset & ~PAGE_MASK);
415 return info;
416}
417
418void qxl_release_unmap(struct qxl_device *qdev,
419 struct qxl_release *release,
420 union qxl_release_info *info)
421{
422 struct qxl_bo *bo = release->release_bo;
423 void *ptr;
424
425 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
426 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
427}
428
429void qxl_release_fence_buffer_objects(struct qxl_release *release)
430{
431 struct ttm_buffer_object *bo;
432 struct ttm_bo_global *glob;
433 struct ttm_bo_device *bdev;
434 struct ttm_validate_buffer *entry;
435 struct qxl_device *qdev;
436
437 /* if only one object on the release its the release itself
438 since these objects are pinned no need to reserve */
439 if (list_is_singular(&release->bos) || list_empty(&release->bos))
440 return;
441
442 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
443 bdev = bo->bdev;
444 qdev = container_of(bdev, struct qxl_device, mman.bdev);
445
446 /*
447 * Since we never really allocated a context and we don't want to conflict,
448 * set the highest bits. This will break if we really allow exporting of dma-bufs.
449 */
450 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
451 release->id | 0xf0000000, release->base.seqno);
452 trace_dma_fence_emit(&release->base);
453
454 glob = bdev->glob;
455
456 spin_lock(&glob->lru_lock);
457
458 list_for_each_entry(entry, &release->bos, head) {
459 bo = entry->bo;
460
461 dma_resv_add_shared_fence(bo->base.resv, &release->base);
462 ttm_bo_add_to_lru(bo);
463 dma_resv_unlock(bo->base.resv);
464 }
465 spin_unlock(&glob->lru_lock);
466 ww_acquire_fini(&release->ticket);
467}
468
1/*
2 * Copyright 2011 Red Hat, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "qxl_drv.h"
23#include "qxl_object.h"
24#include <trace/events/dma_fence.h>
25
26/*
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
29 *
30 * use an ida to index into the chunks?
31 */
32/* manage releaseables */
33/* stack them 16 high for now -drawable object is 191 */
34#define RELEASE_SIZE 256
35#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37#define SURFACE_RELEASE_SIZE 128
38#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
39
40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
42
43static const char *qxl_get_driver_name(struct dma_fence *fence)
44{
45 return "qxl";
46}
47
48static const char *qxl_get_timeline_name(struct dma_fence *fence)
49{
50 return "release";
51}
52
53static bool qxl_nop_signaling(struct dma_fence *fence)
54{
55 /* fences are always automatically signaled, so just pretend we did this.. */
56 return true;
57}
58
59static long qxl_fence_wait(struct dma_fence *fence, bool intr,
60 signed long timeout)
61{
62 struct qxl_device *qdev;
63 struct qxl_release *release;
64 int count = 0, sc = 0;
65 bool have_drawable_releases;
66 unsigned long cur, end = jiffies + timeout;
67
68 qdev = container_of(fence->lock, struct qxl_device, release_lock);
69 release = container_of(fence, struct qxl_release, base);
70 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
71
72retry:
73 sc++;
74
75 if (dma_fence_is_signaled(fence))
76 goto signaled;
77
78 qxl_io_notify_oom(qdev);
79
80 for (count = 0; count < 11; count++) {
81 if (!qxl_queue_garbage_collect(qdev, true))
82 break;
83
84 if (dma_fence_is_signaled(fence))
85 goto signaled;
86 }
87
88 if (dma_fence_is_signaled(fence))
89 goto signaled;
90
91 if (have_drawable_releases || sc < 4) {
92 if (sc > 2)
93 /* back off */
94 usleep_range(500, 1000);
95
96 if (time_after(jiffies, end))
97 return 0;
98
99 if (have_drawable_releases && sc > 300) {
100 DMA_FENCE_WARN(fence, "failed to wait on release %llu "
101 "after spincount %d\n",
102 fence->context & ~0xf0000000, sc);
103 goto signaled;
104 }
105 goto retry;
106 }
107 /*
108 * yeah, original sync_obj_wait gave up after 3 spins when
109 * have_drawable_releases is not set.
110 */
111
112signaled:
113 cur = jiffies;
114 if (time_after(cur, end))
115 return 0;
116 return end - cur;
117}
118
119static const struct dma_fence_ops qxl_fence_ops = {
120 .get_driver_name = qxl_get_driver_name,
121 .get_timeline_name = qxl_get_timeline_name,
122 .enable_signaling = qxl_nop_signaling,
123 .wait = qxl_fence_wait,
124};
125
126static int
127qxl_release_alloc(struct qxl_device *qdev, int type,
128 struct qxl_release **ret)
129{
130 struct qxl_release *release;
131 int handle;
132 size_t size = sizeof(*release);
133
134 release = kmalloc(size, GFP_KERNEL);
135 if (!release) {
136 DRM_ERROR("Out of memory\n");
137 return -ENOMEM;
138 }
139 release->base.ops = NULL;
140 release->type = type;
141 release->release_offset = 0;
142 release->surface_release_id = 0;
143 INIT_LIST_HEAD(&release->bos);
144
145 idr_preload(GFP_KERNEL);
146 spin_lock(&qdev->release_idr_lock);
147 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
148 release->base.seqno = ++qdev->release_seqno;
149 spin_unlock(&qdev->release_idr_lock);
150 idr_preload_end();
151 if (handle < 0) {
152 kfree(release);
153 *ret = NULL;
154 return handle;
155 }
156 *ret = release;
157 DRM_DEBUG_DRIVER("allocated release %d\n", handle);
158 release->id = handle;
159 return handle;
160}
161
162static void
163qxl_release_free_list(struct qxl_release *release)
164{
165 while (!list_empty(&release->bos)) {
166 struct qxl_bo_list *entry;
167 struct qxl_bo *bo;
168
169 entry = container_of(release->bos.next,
170 struct qxl_bo_list, tv.head);
171 bo = to_qxl_bo(entry->tv.bo);
172 qxl_bo_unref(&bo);
173 list_del(&entry->tv.head);
174 kfree(entry);
175 }
176 release->release_bo = NULL;
177}
178
179void
180qxl_release_free(struct qxl_device *qdev,
181 struct qxl_release *release)
182{
183 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
184
185 if (release->surface_release_id)
186 qxl_surface_id_dealloc(qdev, release->surface_release_id);
187
188 spin_lock(&qdev->release_idr_lock);
189 idr_remove(&qdev->release_idr, release->id);
190 spin_unlock(&qdev->release_idr_lock);
191
192 if (release->base.ops) {
193 WARN_ON(list_empty(&release->bos));
194 qxl_release_free_list(release);
195
196 dma_fence_signal(&release->base);
197 dma_fence_put(&release->base);
198 } else {
199 qxl_release_free_list(release);
200 kfree(release);
201 }
202}
203
204static int qxl_release_bo_alloc(struct qxl_device *qdev,
205 struct qxl_bo **bo)
206{
207 /* pin releases bo's they are too messy to evict */
208 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
209 QXL_GEM_DOMAIN_VRAM, NULL, bo);
210}
211
212int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
213{
214 struct qxl_bo_list *entry;
215
216 list_for_each_entry(entry, &release->bos, tv.head) {
217 if (entry->tv.bo == &bo->tbo)
218 return 0;
219 }
220
221 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
222 if (!entry)
223 return -ENOMEM;
224
225 qxl_bo_ref(bo);
226 entry->tv.bo = &bo->tbo;
227 entry->tv.shared = false;
228 list_add_tail(&entry->tv.head, &release->bos);
229 return 0;
230}
231
232static int qxl_release_validate_bo(struct qxl_bo *bo)
233{
234 struct ttm_operation_ctx ctx = { true, false };
235 int ret;
236
237 if (!bo->pin_count) {
238 qxl_ttm_placement_from_domain(bo, bo->type, false);
239 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
240 if (ret)
241 return ret;
242 }
243
244 ret = reservation_object_reserve_shared(bo->tbo.resv);
245 if (ret)
246 return ret;
247
248 /* allocate a surface for reserved + validated buffers */
249 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
250 if (ret)
251 return ret;
252 return 0;
253}
254
255int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
256{
257 int ret;
258 struct qxl_bo_list *entry;
259
260 /* if only one object on the release its the release itself
261 since these objects are pinned no need to reserve */
262 if (list_is_singular(&release->bos))
263 return 0;
264
265 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
266 !no_intr, NULL);
267 if (ret)
268 return ret;
269
270 list_for_each_entry(entry, &release->bos, tv.head) {
271 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
272
273 ret = qxl_release_validate_bo(bo);
274 if (ret) {
275 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
276 return ret;
277 }
278 }
279 return 0;
280}
281
282void qxl_release_backoff_reserve_list(struct qxl_release *release)
283{
284 /* if only one object on the release its the release itself
285 since these objects are pinned no need to reserve */
286 if (list_is_singular(&release->bos))
287 return;
288
289 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
290}
291
292
293int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
294 enum qxl_surface_cmd_type surface_cmd_type,
295 struct qxl_release *create_rel,
296 struct qxl_release **release)
297{
298 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
299 int idr_ret;
300 struct qxl_bo *bo;
301 union qxl_release_info *info;
302
303 /* stash the release after the create command */
304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
305 if (idr_ret < 0)
306 return idr_ret;
307 bo = create_rel->release_bo;
308
309 (*release)->release_bo = bo;
310 (*release)->release_offset = create_rel->release_offset + 64;
311
312 qxl_release_list_add(*release, bo);
313
314 info = qxl_release_map(qdev, *release);
315 info->id = idr_ret;
316 qxl_release_unmap(qdev, *release, info);
317 return 0;
318 }
319
320 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
321 QXL_RELEASE_SURFACE_CMD, release, NULL);
322}
323
324int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
325 int type, struct qxl_release **release,
326 struct qxl_bo **rbo)
327{
328 struct qxl_bo *bo;
329 int idr_ret;
330 int ret = 0;
331 union qxl_release_info *info;
332 int cur_idx;
333
334 if (type == QXL_RELEASE_DRAWABLE)
335 cur_idx = 0;
336 else if (type == QXL_RELEASE_SURFACE_CMD)
337 cur_idx = 1;
338 else if (type == QXL_RELEASE_CURSOR_CMD)
339 cur_idx = 2;
340 else {
341 DRM_ERROR("got illegal type: %d\n", type);
342 return -EINVAL;
343 }
344
345 idr_ret = qxl_release_alloc(qdev, type, release);
346 if (idr_ret < 0) {
347 if (rbo)
348 *rbo = NULL;
349 return idr_ret;
350 }
351
352 mutex_lock(&qdev->release_mutex);
353 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
354 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
355 qdev->current_release_bo_offset[cur_idx] = 0;
356 qdev->current_release_bo[cur_idx] = NULL;
357 }
358 if (!qdev->current_release_bo[cur_idx]) {
359 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
360 if (ret) {
361 mutex_unlock(&qdev->release_mutex);
362 qxl_release_free(qdev, *release);
363 return ret;
364 }
365 }
366
367 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
368
369 (*release)->release_bo = bo;
370 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
371 qdev->current_release_bo_offset[cur_idx]++;
372
373 if (rbo)
374 *rbo = bo;
375
376 mutex_unlock(&qdev->release_mutex);
377
378 ret = qxl_release_list_add(*release, bo);
379 qxl_bo_unref(&bo);
380 if (ret) {
381 qxl_release_free(qdev, *release);
382 return ret;
383 }
384
385 info = qxl_release_map(qdev, *release);
386 info->id = idr_ret;
387 qxl_release_unmap(qdev, *release, info);
388
389 return ret;
390}
391
392struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
393 uint64_t id)
394{
395 struct qxl_release *release;
396
397 spin_lock(&qdev->release_idr_lock);
398 release = idr_find(&qdev->release_idr, id);
399 spin_unlock(&qdev->release_idr_lock);
400 if (!release) {
401 DRM_ERROR("failed to find id in release_idr\n");
402 return NULL;
403 }
404
405 return release;
406}
407
408union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
409 struct qxl_release *release)
410{
411 void *ptr;
412 union qxl_release_info *info;
413 struct qxl_bo *bo = release->release_bo;
414
415 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
416 if (!ptr)
417 return NULL;
418 info = ptr + (release->release_offset & ~PAGE_MASK);
419 return info;
420}
421
422void qxl_release_unmap(struct qxl_device *qdev,
423 struct qxl_release *release,
424 union qxl_release_info *info)
425{
426 struct qxl_bo *bo = release->release_bo;
427 void *ptr;
428
429 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
431}
432
433void qxl_release_fence_buffer_objects(struct qxl_release *release)
434{
435 struct ttm_buffer_object *bo;
436 struct ttm_bo_global *glob;
437 struct ttm_bo_device *bdev;
438 struct ttm_bo_driver *driver;
439 struct qxl_bo *qbo;
440 struct ttm_validate_buffer *entry;
441 struct qxl_device *qdev;
442
443 /* if only one object on the release its the release itself
444 since these objects are pinned no need to reserve */
445 if (list_is_singular(&release->bos) || list_empty(&release->bos))
446 return;
447
448 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
449 bdev = bo->bdev;
450 qdev = container_of(bdev, struct qxl_device, mman.bdev);
451
452 /*
453 * Since we never really allocated a context and we don't want to conflict,
454 * set the highest bits. This will break if we really allow exporting of dma-bufs.
455 */
456 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
457 release->id | 0xf0000000, release->base.seqno);
458 trace_dma_fence_emit(&release->base);
459
460 driver = bdev->driver;
461 glob = bdev->glob;
462
463 spin_lock(&glob->lru_lock);
464
465 list_for_each_entry(entry, &release->bos, head) {
466 bo = entry->bo;
467 qbo = to_qxl_bo(bo);
468
469 reservation_object_add_shared_fence(bo->resv, &release->base);
470 ttm_bo_add_to_lru(bo);
471 reservation_object_unlock(bo->resv);
472 }
473 spin_unlock(&glob->lru_lock);
474 ww_acquire_fini(&release->ticket);
475}
476