Loading...
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/iosys-map.h>
27#include <linux/io-mapping.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32static int __qxl_bo_pin(struct qxl_bo *bo);
33static void __qxl_bo_unpin(struct qxl_bo *bo);
34
35static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
36{
37 struct qxl_bo *bo;
38 struct qxl_device *qdev;
39
40 bo = to_qxl_bo(tbo);
41 qdev = to_qxl(bo->tbo.base.dev);
42
43 qxl_surface_evict(qdev, bo, false);
44 WARN_ON_ONCE(bo->map_count > 0);
45 mutex_lock(&qdev->gem.mutex);
46 list_del_init(&bo->list);
47 mutex_unlock(&qdev->gem.mutex);
48 drm_gem_object_release(&bo->tbo.base);
49 kfree(bo);
50}
51
52bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
53{
54 if (bo->destroy == &qxl_ttm_bo_destroy)
55 return true;
56 return false;
57}
58
59void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
60{
61 u32 c = 0;
62 u32 pflag = 0;
63 unsigned int i;
64
65 if (qbo->tbo.base.size <= PAGE_SIZE)
66 pflag |= TTM_PL_FLAG_TOPDOWN;
67
68 qbo->placement.placement = qbo->placements;
69 qbo->placement.busy_placement = qbo->placements;
70 if (domain == QXL_GEM_DOMAIN_VRAM) {
71 qbo->placements[c].mem_type = TTM_PL_VRAM;
72 qbo->placements[c++].flags = pflag;
73 }
74 if (domain == QXL_GEM_DOMAIN_SURFACE) {
75 qbo->placements[c].mem_type = TTM_PL_PRIV;
76 qbo->placements[c++].flags = pflag;
77 qbo->placements[c].mem_type = TTM_PL_VRAM;
78 qbo->placements[c++].flags = pflag;
79 }
80 if (domain == QXL_GEM_DOMAIN_CPU) {
81 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
82 qbo->placements[c++].flags = pflag;
83 }
84 if (!c) {
85 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
86 qbo->placements[c++].flags = 0;
87 }
88 qbo->placement.num_placement = c;
89 qbo->placement.num_busy_placement = c;
90 for (i = 0; i < c; ++i) {
91 qbo->placements[i].fpfn = 0;
92 qbo->placements[i].lpfn = 0;
93 }
94}
95
96static const struct drm_gem_object_funcs qxl_object_funcs = {
97 .free = qxl_gem_object_free,
98 .open = qxl_gem_object_open,
99 .close = qxl_gem_object_close,
100 .pin = qxl_gem_prime_pin,
101 .unpin = qxl_gem_prime_unpin,
102 .get_sg_table = qxl_gem_prime_get_sg_table,
103 .vmap = qxl_gem_prime_vmap,
104 .vunmap = qxl_gem_prime_vunmap,
105 .mmap = drm_gem_ttm_mmap,
106 .print_info = drm_gem_ttm_print_info,
107};
108
109int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
110 bool kernel, bool pinned, u32 domain, u32 priority,
111 struct qxl_surface *surf,
112 struct qxl_bo **bo_ptr)
113{
114 struct ttm_operation_ctx ctx = { !kernel, false };
115 struct qxl_bo *bo;
116 enum ttm_bo_type type;
117 int r;
118
119 if (kernel)
120 type = ttm_bo_type_kernel;
121 else
122 type = ttm_bo_type_device;
123 *bo_ptr = NULL;
124 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
125 if (bo == NULL)
126 return -ENOMEM;
127 size = roundup(size, PAGE_SIZE);
128 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
129 if (unlikely(r)) {
130 kfree(bo);
131 return r;
132 }
133 bo->tbo.base.funcs = &qxl_object_funcs;
134 bo->type = domain;
135 bo->surface_id = 0;
136 INIT_LIST_HEAD(&bo->list);
137
138 if (surf)
139 bo->surf = *surf;
140
141 qxl_ttm_placement_from_domain(bo, domain);
142
143 bo->tbo.priority = priority;
144 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
145 &bo->placement, 0, &ctx, NULL, NULL,
146 &qxl_ttm_bo_destroy);
147 if (unlikely(r != 0)) {
148 if (r != -ERESTARTSYS)
149 dev_err(qdev->ddev.dev,
150 "object_init failed for (%lu, 0x%08X)\n",
151 size, domain);
152 return r;
153 }
154 if (pinned)
155 ttm_bo_pin(&bo->tbo);
156 ttm_bo_unreserve(&bo->tbo);
157 *bo_ptr = bo;
158 return 0;
159}
160
161int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
162{
163 int r;
164
165 dma_resv_assert_held(bo->tbo.base.resv);
166
167 if (bo->kptr) {
168 bo->map_count++;
169 goto out;
170 }
171
172 r = __qxl_bo_pin(bo);
173 if (r)
174 return r;
175
176 r = ttm_bo_vmap(&bo->tbo, &bo->map);
177 if (r) {
178 __qxl_bo_unpin(bo);
179 return r;
180 }
181 bo->map_count = 1;
182
183 /* TODO: Remove kptr in favor of map everywhere. */
184 if (bo->map.is_iomem)
185 bo->kptr = (void *)bo->map.vaddr_iomem;
186 else
187 bo->kptr = bo->map.vaddr;
188
189out:
190 *map = bo->map;
191 return 0;
192}
193
194int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
195{
196 int r;
197
198 r = qxl_bo_reserve(bo);
199 if (r)
200 return r;
201
202 r = qxl_bo_vmap_locked(bo, map);
203 qxl_bo_unreserve(bo);
204 return r;
205}
206
207void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
208 struct qxl_bo *bo, int page_offset)
209{
210 unsigned long offset;
211 void *rptr;
212 int ret;
213 struct io_mapping *map;
214 struct iosys_map bo_map;
215
216 if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
217 map = qdev->vram_mapping;
218 else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
219 map = qdev->surface_mapping;
220 else
221 goto fallback;
222
223 offset = bo->tbo.resource->start << PAGE_SHIFT;
224 return io_mapping_map_atomic_wc(map, offset + page_offset);
225fallback:
226 if (bo->kptr) {
227 rptr = bo->kptr + (page_offset * PAGE_SIZE);
228 return rptr;
229 }
230
231 ret = qxl_bo_vmap_locked(bo, &bo_map);
232 if (ret)
233 return NULL;
234 rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
235
236 rptr += page_offset * PAGE_SIZE;
237 return rptr;
238}
239
240void qxl_bo_vunmap_locked(struct qxl_bo *bo)
241{
242 dma_resv_assert_held(bo->tbo.base.resv);
243
244 if (bo->kptr == NULL)
245 return;
246 bo->map_count--;
247 if (bo->map_count > 0)
248 return;
249 bo->kptr = NULL;
250 ttm_bo_vunmap(&bo->tbo, &bo->map);
251 __qxl_bo_unpin(bo);
252}
253
254int qxl_bo_vunmap(struct qxl_bo *bo)
255{
256 int r;
257
258 r = qxl_bo_reserve(bo);
259 if (r)
260 return r;
261
262 qxl_bo_vunmap_locked(bo);
263 qxl_bo_unreserve(bo);
264 return 0;
265}
266
267void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
268 struct qxl_bo *bo, void *pmap)
269{
270 if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
271 (bo->tbo.resource->mem_type != TTM_PL_PRIV))
272 goto fallback;
273
274 io_mapping_unmap_atomic(pmap);
275 return;
276 fallback:
277 qxl_bo_vunmap_locked(bo);
278}
279
280void qxl_bo_unref(struct qxl_bo **bo)
281{
282 if ((*bo) == NULL)
283 return;
284
285 drm_gem_object_put(&(*bo)->tbo.base);
286 *bo = NULL;
287}
288
289struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
290{
291 drm_gem_object_get(&bo->tbo.base);
292 return bo;
293}
294
295static int __qxl_bo_pin(struct qxl_bo *bo)
296{
297 struct ttm_operation_ctx ctx = { false, false };
298 struct drm_device *ddev = bo->tbo.base.dev;
299 int r;
300
301 if (bo->tbo.pin_count) {
302 ttm_bo_pin(&bo->tbo);
303 return 0;
304 }
305 qxl_ttm_placement_from_domain(bo, bo->type);
306 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
307 if (likely(r == 0))
308 ttm_bo_pin(&bo->tbo);
309 if (unlikely(r != 0))
310 dev_err(ddev->dev, "%p pin failed\n", bo);
311 return r;
312}
313
314static void __qxl_bo_unpin(struct qxl_bo *bo)
315{
316 ttm_bo_unpin(&bo->tbo);
317}
318
319/*
320 * Reserve the BO before pinning the object. If the BO was reserved
321 * beforehand, use the internal version directly __qxl_bo_pin.
322 *
323 */
324int qxl_bo_pin(struct qxl_bo *bo)
325{
326 int r;
327
328 r = qxl_bo_reserve(bo);
329 if (r)
330 return r;
331
332 r = __qxl_bo_pin(bo);
333 qxl_bo_unreserve(bo);
334 return r;
335}
336
337/*
338 * Reserve the BO before pinning the object. If the BO was reserved
339 * beforehand, use the internal version directly __qxl_bo_unpin.
340 *
341 */
342int qxl_bo_unpin(struct qxl_bo *bo)
343{
344 int r;
345
346 r = qxl_bo_reserve(bo);
347 if (r)
348 return r;
349
350 __qxl_bo_unpin(bo);
351 qxl_bo_unreserve(bo);
352 return 0;
353}
354
355void qxl_bo_force_delete(struct qxl_device *qdev)
356{
357 struct qxl_bo *bo, *n;
358
359 if (list_empty(&qdev->gem.objects))
360 return;
361 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
362 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
363 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
364 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
365 *((unsigned long *)&bo->tbo.base.refcount));
366 mutex_lock(&qdev->gem.mutex);
367 list_del_init(&bo->list);
368 mutex_unlock(&qdev->gem.mutex);
369 /* this should unref the ttm bo */
370 drm_gem_object_put(&bo->tbo.base);
371 }
372}
373
374int qxl_bo_init(struct qxl_device *qdev)
375{
376 return qxl_ttm_init(qdev);
377}
378
379void qxl_bo_fini(struct qxl_device *qdev)
380{
381 qxl_ttm_fini(qdev);
382}
383
384int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
385{
386 int ret;
387
388 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
389 /* allocate a surface id for this surface now */
390 ret = qxl_surface_id_alloc(qdev, bo);
391 if (ret)
392 return ret;
393
394 ret = qxl_hw_surface_alloc(qdev, bo);
395 if (ret)
396 return ret;
397 }
398 return 0;
399}
400
401int qxl_surf_evict(struct qxl_device *qdev)
402{
403 struct ttm_resource_manager *man;
404
405 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
406 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
407}
408
409int qxl_vram_evict(struct qxl_device *qdev)
410{
411 struct ttm_resource_manager *man;
412
413 man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
414 return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
415}
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29#include <linux/io-mapping.h>
30static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31{
32 struct qxl_bo *bo;
33 struct qxl_device *qdev;
34
35 bo = to_qxl_bo(tbo);
36 qdev = to_qxl(bo->tbo.base.dev);
37
38 qxl_surface_evict(qdev, bo, false);
39 WARN_ON_ONCE(bo->map_count > 0);
40 mutex_lock(&qdev->gem.mutex);
41 list_del_init(&bo->list);
42 mutex_unlock(&qdev->gem.mutex);
43 drm_gem_object_release(&bo->tbo.base);
44 kfree(bo);
45}
46
47bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48{
49 if (bo->destroy == &qxl_ttm_bo_destroy)
50 return true;
51 return false;
52}
53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{
56 u32 c = 0;
57 u32 pflag = 0;
58 unsigned int i;
59
60 if (pinned)
61 pflag |= TTM_PL_FLAG_NO_EVICT;
62 if (qbo->tbo.base.size <= PAGE_SIZE)
63 pflag |= TTM_PL_FLAG_TOPDOWN;
64
65 qbo->placement.placement = qbo->placements;
66 qbo->placement.busy_placement = qbo->placements;
67 if (domain == QXL_GEM_DOMAIN_VRAM)
68 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
69 if (domain == QXL_GEM_DOMAIN_SURFACE) {
70 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
71 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
72 }
73 if (domain == QXL_GEM_DOMAIN_CPU)
74 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
75 if (!c)
76 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
77 qbo->placement.num_placement = c;
78 qbo->placement.num_busy_placement = c;
79 for (i = 0; i < c; ++i) {
80 qbo->placements[i].fpfn = 0;
81 qbo->placements[i].lpfn = 0;
82 }
83}
84
85static const struct drm_gem_object_funcs qxl_object_funcs = {
86 .free = qxl_gem_object_free,
87 .open = qxl_gem_object_open,
88 .close = qxl_gem_object_close,
89 .pin = qxl_gem_prime_pin,
90 .unpin = qxl_gem_prime_unpin,
91 .get_sg_table = qxl_gem_prime_get_sg_table,
92 .vmap = qxl_gem_prime_vmap,
93 .vunmap = qxl_gem_prime_vunmap,
94 .mmap = drm_gem_ttm_mmap,
95 .print_info = drm_gem_ttm_print_info,
96};
97
98int qxl_bo_create(struct qxl_device *qdev,
99 unsigned long size, bool kernel, bool pinned, u32 domain,
100 struct qxl_surface *surf,
101 struct qxl_bo **bo_ptr)
102{
103 struct qxl_bo *bo;
104 enum ttm_bo_type type;
105 int r;
106
107 if (kernel)
108 type = ttm_bo_type_kernel;
109 else
110 type = ttm_bo_type_device;
111 *bo_ptr = NULL;
112 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
113 if (bo == NULL)
114 return -ENOMEM;
115 size = roundup(size, PAGE_SIZE);
116 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
117 if (unlikely(r)) {
118 kfree(bo);
119 return r;
120 }
121 bo->tbo.base.funcs = &qxl_object_funcs;
122 bo->type = domain;
123 bo->pin_count = pinned ? 1 : 0;
124 bo->surface_id = 0;
125 INIT_LIST_HEAD(&bo->list);
126
127 if (surf)
128 bo->surf = *surf;
129
130 qxl_ttm_placement_from_domain(bo, domain, pinned);
131
132 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
133 &bo->placement, 0, !kernel, size,
134 NULL, NULL, &qxl_ttm_bo_destroy);
135 if (unlikely(r != 0)) {
136 if (r != -ERESTARTSYS)
137 dev_err(qdev->ddev.dev,
138 "object_init failed for (%lu, 0x%08X)\n",
139 size, domain);
140 return r;
141 }
142 *bo_ptr = bo;
143 return 0;
144}
145
146int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
147{
148 bool is_iomem;
149 int r;
150
151 if (bo->kptr) {
152 if (ptr)
153 *ptr = bo->kptr;
154 bo->map_count++;
155 return 0;
156 }
157 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
158 if (r)
159 return r;
160 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
161 if (ptr)
162 *ptr = bo->kptr;
163 bo->map_count = 1;
164 return 0;
165}
166
167void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
168 struct qxl_bo *bo, int page_offset)
169{
170 void *rptr;
171 int ret;
172 struct io_mapping *map;
173
174 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
175 map = qdev->vram_mapping;
176 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
177 map = qdev->surface_mapping;
178 else
179 goto fallback;
180
181 ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
182
183 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
184fallback:
185 if (bo->kptr) {
186 rptr = bo->kptr + (page_offset * PAGE_SIZE);
187 return rptr;
188 }
189
190 ret = qxl_bo_kmap(bo, &rptr);
191 if (ret)
192 return NULL;
193
194 rptr += page_offset * PAGE_SIZE;
195 return rptr;
196}
197
198void qxl_bo_kunmap(struct qxl_bo *bo)
199{
200 if (bo->kptr == NULL)
201 return;
202 bo->map_count--;
203 if (bo->map_count > 0)
204 return;
205 bo->kptr = NULL;
206 ttm_bo_kunmap(&bo->kmap);
207}
208
209void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
210 struct qxl_bo *bo, void *pmap)
211{
212 if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
213 (bo->tbo.mem.mem_type != TTM_PL_PRIV))
214 goto fallback;
215
216 io_mapping_unmap_atomic(pmap);
217 return;
218 fallback:
219 qxl_bo_kunmap(bo);
220}
221
222void qxl_bo_unref(struct qxl_bo **bo)
223{
224 if ((*bo) == NULL)
225 return;
226
227 drm_gem_object_put(&(*bo)->tbo.base);
228 *bo = NULL;
229}
230
231struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
232{
233 drm_gem_object_get(&bo->tbo.base);
234 return bo;
235}
236
237static int __qxl_bo_pin(struct qxl_bo *bo)
238{
239 struct ttm_operation_ctx ctx = { false, false };
240 struct drm_device *ddev = bo->tbo.base.dev;
241 int r;
242
243 if (bo->pin_count) {
244 bo->pin_count++;
245 return 0;
246 }
247 qxl_ttm_placement_from_domain(bo, bo->type, true);
248 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
249 if (likely(r == 0)) {
250 bo->pin_count = 1;
251 }
252 if (unlikely(r != 0))
253 dev_err(ddev->dev, "%p pin failed\n", bo);
254 return r;
255}
256
257static int __qxl_bo_unpin(struct qxl_bo *bo)
258{
259 struct ttm_operation_ctx ctx = { false, false };
260 struct drm_device *ddev = bo->tbo.base.dev;
261 int r, i;
262
263 if (!bo->pin_count) {
264 dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
265 return 0;
266 }
267 bo->pin_count--;
268 if (bo->pin_count)
269 return 0;
270 for (i = 0; i < bo->placement.num_placement; i++)
271 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
272 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
273 if (unlikely(r != 0))
274 dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
275 return r;
276}
277
278/*
279 * Reserve the BO before pinning the object. If the BO was reserved
280 * beforehand, use the internal version directly __qxl_bo_pin.
281 *
282 */
283int qxl_bo_pin(struct qxl_bo *bo)
284{
285 int r;
286
287 r = qxl_bo_reserve(bo, false);
288 if (r)
289 return r;
290
291 r = __qxl_bo_pin(bo);
292 qxl_bo_unreserve(bo);
293 return r;
294}
295
296/*
297 * Reserve the BO before pinning the object. If the BO was reserved
298 * beforehand, use the internal version directly __qxl_bo_unpin.
299 *
300 */
301int qxl_bo_unpin(struct qxl_bo *bo)
302{
303 int r;
304
305 r = qxl_bo_reserve(bo, false);
306 if (r)
307 return r;
308
309 r = __qxl_bo_unpin(bo);
310 qxl_bo_unreserve(bo);
311 return r;
312}
313
314void qxl_bo_force_delete(struct qxl_device *qdev)
315{
316 struct qxl_bo *bo, *n;
317
318 if (list_empty(&qdev->gem.objects))
319 return;
320 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
321 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
322 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
323 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
324 *((unsigned long *)&bo->tbo.base.refcount));
325 mutex_lock(&qdev->gem.mutex);
326 list_del_init(&bo->list);
327 mutex_unlock(&qdev->gem.mutex);
328 /* this should unref the ttm bo */
329 drm_gem_object_put(&bo->tbo.base);
330 }
331}
332
333int qxl_bo_init(struct qxl_device *qdev)
334{
335 return qxl_ttm_init(qdev);
336}
337
338void qxl_bo_fini(struct qxl_device *qdev)
339{
340 qxl_ttm_fini(qdev);
341}
342
343int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
344{
345 int ret;
346
347 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
348 /* allocate a surface id for this surface now */
349 ret = qxl_surface_id_alloc(qdev, bo);
350 if (ret)
351 return ret;
352
353 ret = qxl_hw_surface_alloc(qdev, bo);
354 if (ret)
355 return ret;
356 }
357 return 0;
358}
359
360int qxl_surf_evict(struct qxl_device *qdev)
361{
362 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
363}
364
365int qxl_vram_evict(struct qxl_device *qdev)
366{
367 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
368}