Loading...
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/iosys-map.h>
27#include <linux/io-mapping.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
33{
34 struct qxl_bo *bo;
35 struct qxl_device *qdev;
36
37 bo = to_qxl_bo(tbo);
38 qdev = to_qxl(bo->tbo.base.dev);
39
40 qxl_surface_evict(qdev, bo, false);
41 WARN_ON_ONCE(bo->map_count > 0);
42 mutex_lock(&qdev->gem.mutex);
43 list_del_init(&bo->list);
44 mutex_unlock(&qdev->gem.mutex);
45 drm_gem_object_release(&bo->tbo.base);
46 kfree(bo);
47}
48
49bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
50{
51 if (bo->destroy == &qxl_ttm_bo_destroy)
52 return true;
53 return false;
54}
55
56void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
57{
58 u32 c = 0;
59 u32 pflag = 0;
60 unsigned int i;
61
62 if (qbo->tbo.base.size <= PAGE_SIZE)
63 pflag |= TTM_PL_FLAG_TOPDOWN;
64
65 qbo->placement.placement = qbo->placements;
66 if (domain == QXL_GEM_DOMAIN_VRAM) {
67 qbo->placements[c].mem_type = TTM_PL_VRAM;
68 qbo->placements[c++].flags = pflag;
69 }
70 if (domain == QXL_GEM_DOMAIN_SURFACE) {
71 qbo->placements[c].mem_type = TTM_PL_PRIV;
72 qbo->placements[c++].flags = pflag;
73 qbo->placements[c].mem_type = TTM_PL_VRAM;
74 qbo->placements[c++].flags = pflag;
75 }
76 if (domain == QXL_GEM_DOMAIN_CPU) {
77 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
78 qbo->placements[c++].flags = pflag;
79 }
80 if (!c) {
81 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
82 qbo->placements[c++].flags = 0;
83 }
84 qbo->placement.num_placement = c;
85 for (i = 0; i < c; ++i) {
86 qbo->placements[i].fpfn = 0;
87 qbo->placements[i].lpfn = 0;
88 }
89}
90
91static const struct drm_gem_object_funcs qxl_object_funcs = {
92 .free = qxl_gem_object_free,
93 .open = qxl_gem_object_open,
94 .close = qxl_gem_object_close,
95 .pin = qxl_gem_prime_pin,
96 .unpin = qxl_gem_prime_unpin,
97 .get_sg_table = qxl_gem_prime_get_sg_table,
98 .vmap = qxl_gem_prime_vmap,
99 .vunmap = qxl_gem_prime_vunmap,
100 .mmap = drm_gem_ttm_mmap,
101 .print_info = drm_gem_ttm_print_info,
102};
103
104int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
105 bool kernel, bool pinned, u32 domain, u32 priority,
106 struct qxl_surface *surf,
107 struct qxl_bo **bo_ptr)
108{
109 struct ttm_operation_ctx ctx = { !kernel, false };
110 struct qxl_bo *bo;
111 enum ttm_bo_type type;
112 int r;
113
114 if (kernel)
115 type = ttm_bo_type_kernel;
116 else
117 type = ttm_bo_type_device;
118 *bo_ptr = NULL;
119 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
120 if (bo == NULL)
121 return -ENOMEM;
122 size = roundup(size, PAGE_SIZE);
123 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
124 if (unlikely(r)) {
125 kfree(bo);
126 return r;
127 }
128 bo->tbo.base.funcs = &qxl_object_funcs;
129 bo->type = domain;
130 bo->surface_id = 0;
131 INIT_LIST_HEAD(&bo->list);
132
133 if (surf)
134 bo->surf = *surf;
135
136 qxl_ttm_placement_from_domain(bo, domain);
137
138 bo->tbo.priority = priority;
139 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
140 &bo->placement, 0, &ctx, NULL, NULL,
141 &qxl_ttm_bo_destroy);
142 if (unlikely(r != 0)) {
143 if (r != -ERESTARTSYS)
144 dev_err(qdev->ddev.dev,
145 "object_init failed for (%lu, 0x%08X)\n",
146 size, domain);
147 return r;
148 }
149 if (pinned)
150 ttm_bo_pin(&bo->tbo);
151 ttm_bo_unreserve(&bo->tbo);
152 *bo_ptr = bo;
153 return 0;
154}
155
156int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
157{
158 int r;
159
160 dma_resv_assert_held(bo->tbo.base.resv);
161
162 if (bo->kptr) {
163 bo->map_count++;
164 goto out;
165 }
166
167 r = ttm_bo_vmap(&bo->tbo, &bo->map);
168 if (r) {
169 qxl_bo_unpin_locked(bo);
170 return r;
171 }
172 bo->map_count = 1;
173
174 /* TODO: Remove kptr in favor of map everywhere. */
175 if (bo->map.is_iomem)
176 bo->kptr = (void *)bo->map.vaddr_iomem;
177 else
178 bo->kptr = bo->map.vaddr;
179
180out:
181 *map = bo->map;
182 return 0;
183}
184
185int qxl_bo_pin_and_vmap(struct qxl_bo *bo, struct iosys_map *map)
186{
187 int r;
188
189 r = qxl_bo_reserve(bo);
190 if (r)
191 return r;
192
193 r = qxl_bo_pin_locked(bo);
194 if (r) {
195 qxl_bo_unreserve(bo);
196 return r;
197 }
198
199 r = qxl_bo_vmap_locked(bo, map);
200 if (r)
201 qxl_bo_unpin_locked(bo);
202 qxl_bo_unreserve(bo);
203 return r;
204}
205
206void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
207 struct qxl_bo *bo, int page_offset)
208{
209 unsigned long offset;
210 void *rptr;
211 int ret;
212 struct io_mapping *map;
213 struct iosys_map bo_map;
214
215 if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
216 map = qdev->vram_mapping;
217 else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
218 map = qdev->surface_mapping;
219 else
220 goto fallback;
221
222 offset = bo->tbo.resource->start << PAGE_SHIFT;
223 return io_mapping_map_atomic_wc(map, offset + page_offset);
224fallback:
225 if (bo->kptr) {
226 rptr = bo->kptr + (page_offset * PAGE_SIZE);
227 return rptr;
228 }
229
230 ret = qxl_bo_vmap_locked(bo, &bo_map);
231 if (ret)
232 return NULL;
233 rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
234
235 rptr += page_offset * PAGE_SIZE;
236 return rptr;
237}
238
239void qxl_bo_vunmap_locked(struct qxl_bo *bo)
240{
241 dma_resv_assert_held(bo->tbo.base.resv);
242
243 if (bo->kptr == NULL)
244 return;
245 bo->map_count--;
246 if (bo->map_count > 0)
247 return;
248 bo->kptr = NULL;
249 ttm_bo_vunmap(&bo->tbo, &bo->map);
250}
251
252int qxl_bo_vunmap_and_unpin(struct qxl_bo *bo)
253{
254 int r;
255
256 r = qxl_bo_reserve(bo);
257 if (r)
258 return r;
259
260 qxl_bo_vunmap_locked(bo);
261 qxl_bo_unpin_locked(bo);
262 qxl_bo_unreserve(bo);
263 return 0;
264}
265
266void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
267 struct qxl_bo *bo, void *pmap)
268{
269 if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
270 (bo->tbo.resource->mem_type != TTM_PL_PRIV))
271 goto fallback;
272
273 io_mapping_unmap_atomic(pmap);
274 return;
275 fallback:
276 qxl_bo_vunmap_locked(bo);
277}
278
279void qxl_bo_unref(struct qxl_bo **bo)
280{
281 if ((*bo) == NULL)
282 return;
283
284 drm_gem_object_put(&(*bo)->tbo.base);
285 *bo = NULL;
286}
287
288struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
289{
290 drm_gem_object_get(&bo->tbo.base);
291 return bo;
292}
293
294int qxl_bo_pin_locked(struct qxl_bo *bo)
295{
296 struct ttm_operation_ctx ctx = { false, false };
297 struct drm_device *ddev = bo->tbo.base.dev;
298 int r;
299
300 dma_resv_assert_held(bo->tbo.base.resv);
301
302 if (bo->tbo.pin_count) {
303 ttm_bo_pin(&bo->tbo);
304 return 0;
305 }
306 qxl_ttm_placement_from_domain(bo, bo->type);
307 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
308 if (likely(r == 0))
309 ttm_bo_pin(&bo->tbo);
310 if (unlikely(r != 0))
311 dev_err(ddev->dev, "%p pin failed\n", bo);
312 return r;
313}
314
315void qxl_bo_unpin_locked(struct qxl_bo *bo)
316{
317 dma_resv_assert_held(bo->tbo.base.resv);
318
319 ttm_bo_unpin(&bo->tbo);
320}
321
322/*
323 * Reserve the BO before pinning the object. If the BO was reserved
324 * beforehand, use the internal version directly qxl_bo_pin_locked.
325 *
326 */
327int qxl_bo_pin(struct qxl_bo *bo)
328{
329 int r;
330
331 r = qxl_bo_reserve(bo);
332 if (r)
333 return r;
334
335 r = qxl_bo_pin_locked(bo);
336 qxl_bo_unreserve(bo);
337 return r;
338}
339
340/*
341 * Reserve the BO before pinning the object. If the BO was reserved
342 * beforehand, use the internal version directly qxl_bo_unpin_locked.
343 *
344 */
345int qxl_bo_unpin(struct qxl_bo *bo)
346{
347 int r;
348
349 r = qxl_bo_reserve(bo);
350 if (r)
351 return r;
352
353 qxl_bo_unpin_locked(bo);
354 qxl_bo_unreserve(bo);
355 return 0;
356}
357
358void qxl_bo_force_delete(struct qxl_device *qdev)
359{
360 struct qxl_bo *bo, *n;
361
362 if (list_empty(&qdev->gem.objects))
363 return;
364 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
365 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
366 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
367 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
368 *((unsigned long *)&bo->tbo.base.refcount));
369 mutex_lock(&qdev->gem.mutex);
370 list_del_init(&bo->list);
371 mutex_unlock(&qdev->gem.mutex);
372 /* this should unref the ttm bo */
373 drm_gem_object_put(&bo->tbo.base);
374 }
375}
376
377int qxl_bo_init(struct qxl_device *qdev)
378{
379 return qxl_ttm_init(qdev);
380}
381
382void qxl_bo_fini(struct qxl_device *qdev)
383{
384 qxl_ttm_fini(qdev);
385}
386
387int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
388{
389 int ret;
390
391 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
392 /* allocate a surface id for this surface now */
393 ret = qxl_surface_id_alloc(qdev, bo);
394 if (ret)
395 return ret;
396
397 ret = qxl_hw_surface_alloc(qdev, bo);
398 if (ret)
399 return ret;
400 }
401 return 0;
402}
403
404int qxl_surf_evict(struct qxl_device *qdev)
405{
406 struct ttm_resource_manager *man;
407
408 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
409 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
410}
411
412int qxl_vram_evict(struct qxl_device *qdev)
413{
414 struct ttm_resource_manager *man;
415
416 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
417 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
418}
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <linux/io-mapping.h>
30static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31{
32 struct qxl_bo *bo;
33 struct qxl_device *qdev;
34
35 bo = container_of(tbo, struct qxl_bo, tbo);
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37
38 qxl_surface_evict(qdev, bo, false);
39 qxl_fence_fini(&bo->fence);
40 mutex_lock(&qdev->gem.mutex);
41 list_del_init(&bo->list);
42 mutex_unlock(&qdev->gem.mutex);
43 drm_gem_object_release(&bo->gem_base);
44 kfree(bo);
45}
46
47bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48{
49 if (bo->destroy == &qxl_ttm_bo_destroy)
50 return true;
51 return false;
52}
53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{
56 u32 c = 0;
57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
58
59 qbo->placement.fpfn = 0;
60 qbo->placement.lpfn = 0;
61 qbo->placement.placement = qbo->placements;
62 qbo->placement.busy_placement = qbo->placements;
63 if (domain == QXL_GEM_DOMAIN_VRAM)
64 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
65 if (domain == QXL_GEM_DOMAIN_SURFACE)
66 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
67 if (domain == QXL_GEM_DOMAIN_CPU)
68 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
69 if (!c)
70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
71 qbo->placement.num_placement = c;
72 qbo->placement.num_busy_placement = c;
73}
74
75
76int qxl_bo_create(struct qxl_device *qdev,
77 unsigned long size, bool kernel, bool pinned, u32 domain,
78 struct qxl_surface *surf,
79 struct qxl_bo **bo_ptr)
80{
81 struct qxl_bo *bo;
82 enum ttm_bo_type type;
83 int r;
84
85 if (kernel)
86 type = ttm_bo_type_kernel;
87 else
88 type = ttm_bo_type_device;
89 *bo_ptr = NULL;
90 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
91 if (bo == NULL)
92 return -ENOMEM;
93 size = roundup(size, PAGE_SIZE);
94 r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
95 if (unlikely(r)) {
96 kfree(bo);
97 return r;
98 }
99 bo->type = domain;
100 bo->pin_count = pinned ? 1 : 0;
101 bo->surface_id = 0;
102 qxl_fence_init(qdev, &bo->fence);
103 INIT_LIST_HEAD(&bo->list);
104
105 if (surf)
106 bo->surf = *surf;
107
108 qxl_ttm_placement_from_domain(bo, domain, pinned);
109
110 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
111 &bo->placement, 0, !kernel, NULL, size,
112 NULL, &qxl_ttm_bo_destroy);
113 if (unlikely(r != 0)) {
114 if (r != -ERESTARTSYS)
115 dev_err(qdev->dev,
116 "object_init failed for (%lu, 0x%08X)\n",
117 size, domain);
118 return r;
119 }
120 *bo_ptr = bo;
121 return 0;
122}
123
124int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
125{
126 bool is_iomem;
127 int r;
128
129 if (bo->kptr) {
130 if (ptr)
131 *ptr = bo->kptr;
132 return 0;
133 }
134 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
135 if (r)
136 return r;
137 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
138 if (ptr)
139 *ptr = bo->kptr;
140 return 0;
141}
142
143void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
144 struct qxl_bo *bo, int page_offset)
145{
146 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
147 void *rptr;
148 int ret;
149 struct io_mapping *map;
150
151 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
152 map = qdev->vram_mapping;
153 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
154 map = qdev->surface_mapping;
155 else
156 goto fallback;
157
158 (void) ttm_mem_io_lock(man, false);
159 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
160 ttm_mem_io_unlock(man);
161
162 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
163fallback:
164 if (bo->kptr) {
165 rptr = bo->kptr + (page_offset * PAGE_SIZE);
166 return rptr;
167 }
168
169 ret = qxl_bo_kmap(bo, &rptr);
170 if (ret)
171 return NULL;
172
173 rptr += page_offset * PAGE_SIZE;
174 return rptr;
175}
176
177void qxl_bo_kunmap(struct qxl_bo *bo)
178{
179 if (bo->kptr == NULL)
180 return;
181 bo->kptr = NULL;
182 ttm_bo_kunmap(&bo->kmap);
183}
184
185void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
186 struct qxl_bo *bo, void *pmap)
187{
188 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
189 struct io_mapping *map;
190
191 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
192 map = qdev->vram_mapping;
193 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
194 map = qdev->surface_mapping;
195 else
196 goto fallback;
197
198 io_mapping_unmap_atomic(pmap);
199
200 (void) ttm_mem_io_lock(man, false);
201 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
202 ttm_mem_io_unlock(man);
203 return ;
204 fallback:
205 qxl_bo_kunmap(bo);
206}
207
208void qxl_bo_unref(struct qxl_bo **bo)
209{
210 struct ttm_buffer_object *tbo;
211
212 if ((*bo) == NULL)
213 return;
214 tbo = &((*bo)->tbo);
215 ttm_bo_unref(&tbo);
216 if (tbo == NULL)
217 *bo = NULL;
218}
219
220struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
221{
222 ttm_bo_reference(&bo->tbo);
223 return bo;
224}
225
226int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
227{
228 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
229 int r;
230
231 if (bo->pin_count) {
232 bo->pin_count++;
233 if (gpu_addr)
234 *gpu_addr = qxl_bo_gpu_offset(bo);
235 return 0;
236 }
237 qxl_ttm_placement_from_domain(bo, domain, true);
238 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
239 if (likely(r == 0)) {
240 bo->pin_count = 1;
241 if (gpu_addr != NULL)
242 *gpu_addr = qxl_bo_gpu_offset(bo);
243 }
244 if (unlikely(r != 0))
245 dev_err(qdev->dev, "%p pin failed\n", bo);
246 return r;
247}
248
249int qxl_bo_unpin(struct qxl_bo *bo)
250{
251 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
252 int r, i;
253
254 if (!bo->pin_count) {
255 dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
256 return 0;
257 }
258 bo->pin_count--;
259 if (bo->pin_count)
260 return 0;
261 for (i = 0; i < bo->placement.num_placement; i++)
262 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
263 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
264 if (unlikely(r != 0))
265 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
266 return r;
267}
268
269void qxl_bo_force_delete(struct qxl_device *qdev)
270{
271 struct qxl_bo *bo, *n;
272
273 if (list_empty(&qdev->gem.objects))
274 return;
275 dev_err(qdev->dev, "Userspace still has active objects !\n");
276 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
277 mutex_lock(&qdev->ddev->struct_mutex);
278 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
279 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
280 *((unsigned long *)&bo->gem_base.refcount));
281 mutex_lock(&qdev->gem.mutex);
282 list_del_init(&bo->list);
283 mutex_unlock(&qdev->gem.mutex);
284 /* this should unref the ttm bo */
285 drm_gem_object_unreference(&bo->gem_base);
286 mutex_unlock(&qdev->ddev->struct_mutex);
287 }
288}
289
290int qxl_bo_init(struct qxl_device *qdev)
291{
292 return qxl_ttm_init(qdev);
293}
294
295void qxl_bo_fini(struct qxl_device *qdev)
296{
297 qxl_ttm_fini(qdev);
298}
299
300int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
301{
302 int ret;
303 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
304 /* allocate a surface id for this surface now */
305 ret = qxl_surface_id_alloc(qdev, bo);
306 if (ret)
307 return ret;
308
309 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
310 if (ret)
311 return ret;
312 }
313 return 0;
314}
315
316int qxl_surf_evict(struct qxl_device *qdev)
317{
318 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
319}
320
321int qxl_vram_evict(struct qxl_device *qdev)
322{
323 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
324}