Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 26#include <linux/iosys-map.h>
 27#include <linux/io-mapping.h>
 28
 29#include "qxl_drv.h"
 30#include "qxl_object.h"
 31
 
 32static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 33{
 34	struct qxl_bo *bo;
 35	struct qxl_device *qdev;
 36
 37	bo = to_qxl_bo(tbo);
 38	qdev = to_qxl(bo->tbo.base.dev);
 39
 40	qxl_surface_evict(qdev, bo, false);
 41	WARN_ON_ONCE(bo->map_count > 0);
 42	mutex_lock(&qdev->gem.mutex);
 43	list_del_init(&bo->list);
 44	mutex_unlock(&qdev->gem.mutex);
 45	drm_gem_object_release(&bo->tbo.base);
 46	kfree(bo);
 47}
 48
 49bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
 50{
 51	if (bo->destroy == &qxl_ttm_bo_destroy)
 52		return true;
 53	return false;
 54}
 55
 56void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
 57{
 58	u32 c = 0;
 59	u32 pflag = 0;
 60	unsigned int i;
 61
 62	if (qbo->tbo.base.size <= PAGE_SIZE)
 63		pflag |= TTM_PL_FLAG_TOPDOWN;
 64
 65	qbo->placement.placement = qbo->placements;
 66	if (domain == QXL_GEM_DOMAIN_VRAM) {
 67		qbo->placements[c].mem_type = TTM_PL_VRAM;
 68		qbo->placements[c++].flags = pflag;
 69	}
 70	if (domain == QXL_GEM_DOMAIN_SURFACE) {
 71		qbo->placements[c].mem_type = TTM_PL_PRIV;
 72		qbo->placements[c++].flags = pflag;
 73		qbo->placements[c].mem_type = TTM_PL_VRAM;
 74		qbo->placements[c++].flags = pflag;
 75	}
 76	if (domain == QXL_GEM_DOMAIN_CPU) {
 77		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
 78		qbo->placements[c++].flags = pflag;
 79	}
 80	if (!c) {
 81		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
 82		qbo->placements[c++].flags = 0;
 83	}
 84	qbo->placement.num_placement = c;
 
 85	for (i = 0; i < c; ++i) {
 86		qbo->placements[i].fpfn = 0;
 87		qbo->placements[i].lpfn = 0;
 88	}
 89}
 90
 91static const struct drm_gem_object_funcs qxl_object_funcs = {
 92	.free = qxl_gem_object_free,
 93	.open = qxl_gem_object_open,
 94	.close = qxl_gem_object_close,
 95	.pin = qxl_gem_prime_pin,
 96	.unpin = qxl_gem_prime_unpin,
 97	.get_sg_table = qxl_gem_prime_get_sg_table,
 98	.vmap = qxl_gem_prime_vmap,
 99	.vunmap = qxl_gem_prime_vunmap,
100	.mmap = drm_gem_ttm_mmap,
101	.print_info = drm_gem_ttm_print_info,
102};
103
104int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
105		  bool kernel, bool pinned, u32 domain, u32 priority,
106		  struct qxl_surface *surf,
107		  struct qxl_bo **bo_ptr)
108{
109	struct ttm_operation_ctx ctx = { !kernel, false };
110	struct qxl_bo *bo;
111	enum ttm_bo_type type;
112	int r;
113
114	if (kernel)
115		type = ttm_bo_type_kernel;
116	else
117		type = ttm_bo_type_device;
118	*bo_ptr = NULL;
119	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
120	if (bo == NULL)
121		return -ENOMEM;
122	size = roundup(size, PAGE_SIZE);
123	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
124	if (unlikely(r)) {
125		kfree(bo);
126		return r;
127	}
128	bo->tbo.base.funcs = &qxl_object_funcs;
129	bo->type = domain;
 
130	bo->surface_id = 0;
131	INIT_LIST_HEAD(&bo->list);
132
133	if (surf)
134		bo->surf = *surf;
135
136	qxl_ttm_placement_from_domain(bo, domain);
137
138	bo->tbo.priority = priority;
139	r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
140				 &bo->placement, 0, &ctx, NULL, NULL,
141				 &qxl_ttm_bo_destroy);
142	if (unlikely(r != 0)) {
143		if (r != -ERESTARTSYS)
144			dev_err(qdev->ddev.dev,
145				"object_init failed for (%lu, 0x%08X)\n",
146				size, domain);
147		return r;
148	}
149	if (pinned)
150		ttm_bo_pin(&bo->tbo);
151	ttm_bo_unreserve(&bo->tbo);
152	*bo_ptr = bo;
153	return 0;
154}
155
156int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
157{
 
158	int r;
159
160	dma_resv_assert_held(bo->tbo.base.resv);
161
162	if (bo->kptr) {
163		bo->map_count++;
164		goto out;
165	}
166
167	r = ttm_bo_vmap(&bo->tbo, &bo->map);
168	if (r) {
169		qxl_bo_unpin_locked(bo);
170		return r;
171	}
172	bo->map_count = 1;
173
174	/* TODO: Remove kptr in favor of map everywhere. */
175	if (bo->map.is_iomem)
176		bo->kptr = (void *)bo->map.vaddr_iomem;
177	else
178		bo->kptr = bo->map.vaddr;
179
180out:
181	*map = bo->map;
182	return 0;
183}
184
185int qxl_bo_pin_and_vmap(struct qxl_bo *bo, struct iosys_map *map)
186{
187	int r;
188
189	r = qxl_bo_reserve(bo);
190	if (r)
191		return r;
192
193	r = qxl_bo_pin_locked(bo);
194	if (r) {
195		qxl_bo_unreserve(bo);
196		return r;
197	}
198
199	r = qxl_bo_vmap_locked(bo, map);
200	if (r)
201		qxl_bo_unpin_locked(bo);
202	qxl_bo_unreserve(bo);
203	return r;
204}
205
206void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
207			      struct qxl_bo *bo, int page_offset)
208{
209	unsigned long offset;
210	void *rptr;
211	int ret;
212	struct io_mapping *map;
213	struct iosys_map bo_map;
214
215	if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
216		map = qdev->vram_mapping;
217	else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
218		map = qdev->surface_mapping;
219	else
220		goto fallback;
221
222	offset = bo->tbo.resource->start << PAGE_SHIFT;
223	return io_mapping_map_atomic_wc(map, offset + page_offset);
 
 
 
224fallback:
225	if (bo->kptr) {
226		rptr = bo->kptr + (page_offset * PAGE_SIZE);
227		return rptr;
228	}
229
230	ret = qxl_bo_vmap_locked(bo, &bo_map);
231	if (ret)
232		return NULL;
233	rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
234
235	rptr += page_offset * PAGE_SIZE;
236	return rptr;
237}
238
239void qxl_bo_vunmap_locked(struct qxl_bo *bo)
240{
241	dma_resv_assert_held(bo->tbo.base.resv);
242
243	if (bo->kptr == NULL)
244		return;
245	bo->map_count--;
246	if (bo->map_count > 0)
247		return;
248	bo->kptr = NULL;
249	ttm_bo_vunmap(&bo->tbo, &bo->map);
250}
251
252int qxl_bo_vunmap_and_unpin(struct qxl_bo *bo)
253{
254	int r;
255
256	r = qxl_bo_reserve(bo);
257	if (r)
258		return r;
259
260	qxl_bo_vunmap_locked(bo);
261	qxl_bo_unpin_locked(bo);
262	qxl_bo_unreserve(bo);
263	return 0;
264}
265
266void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
267			       struct qxl_bo *bo, void *pmap)
268{
269	if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
270	    (bo->tbo.resource->mem_type != TTM_PL_PRIV))
 
 
 
 
 
 
271		goto fallback;
272
273	io_mapping_unmap_atomic(pmap);
274	return;
 
 
 
 
275 fallback:
276	qxl_bo_vunmap_locked(bo);
277}
278
279void qxl_bo_unref(struct qxl_bo **bo)
280{
281	if ((*bo) == NULL)
282		return;
283
284	drm_gem_object_put(&(*bo)->tbo.base);
285	*bo = NULL;
286}
287
288struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
289{
290	drm_gem_object_get(&bo->tbo.base);
291	return bo;
292}
293
294int qxl_bo_pin_locked(struct qxl_bo *bo)
295{
296	struct ttm_operation_ctx ctx = { false, false };
297	struct drm_device *ddev = bo->tbo.base.dev;
298	int r;
299
300	dma_resv_assert_held(bo->tbo.base.resv);
301
302	if (bo->tbo.pin_count) {
303		ttm_bo_pin(&bo->tbo);
304		return 0;
305	}
306	qxl_ttm_placement_from_domain(bo, bo->type);
307	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
308	if (likely(r == 0))
309		ttm_bo_pin(&bo->tbo);
 
 
 
310	if (unlikely(r != 0))
311		dev_err(ddev->dev, "%p pin failed\n", bo);
312	return r;
313}
314
315void qxl_bo_unpin_locked(struct qxl_bo *bo)
316{
317	dma_resv_assert_held(bo->tbo.base.resv);
318
319	ttm_bo_unpin(&bo->tbo);
320}
321
322/*
323 * Reserve the BO before pinning the object.  If the BO was reserved
324 * beforehand, use the internal version directly qxl_bo_pin_locked.
325 *
326 */
327int qxl_bo_pin(struct qxl_bo *bo)
328{
329	int r;
330
331	r = qxl_bo_reserve(bo);
332	if (r)
333		return r;
334
335	r = qxl_bo_pin_locked(bo);
336	qxl_bo_unreserve(bo);
337	return r;
338}
339
340/*
341 * Reserve the BO before pinning the object.  If the BO was reserved
342 * beforehand, use the internal version directly qxl_bo_unpin_locked.
343 *
344 */
345int qxl_bo_unpin(struct qxl_bo *bo)
346{
347	int r;
348
349	r = qxl_bo_reserve(bo);
350	if (r)
351		return r;
352
353	qxl_bo_unpin_locked(bo);
354	qxl_bo_unreserve(bo);
355	return 0;
 
 
 
 
 
 
 
 
 
 
356}
357
358void qxl_bo_force_delete(struct qxl_device *qdev)
359{
360	struct qxl_bo *bo, *n;
361
362	if (list_empty(&qdev->gem.objects))
363		return;
364	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
365	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
366		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
367			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
368			*((unsigned long *)&bo->tbo.base.refcount));
369		mutex_lock(&qdev->gem.mutex);
370		list_del_init(&bo->list);
371		mutex_unlock(&qdev->gem.mutex);
372		/* this should unref the ttm bo */
373		drm_gem_object_put(&bo->tbo.base);
374	}
375}
376
377int qxl_bo_init(struct qxl_device *qdev)
378{
379	return qxl_ttm_init(qdev);
380}
381
382void qxl_bo_fini(struct qxl_device *qdev)
383{
384	qxl_ttm_fini(qdev);
385}
386
387int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
388{
389	int ret;
390
391	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
392		/* allocate a surface id for this surface now */
393		ret = qxl_surface_id_alloc(qdev, bo);
394		if (ret)
395			return ret;
396
397		ret = qxl_hw_surface_alloc(qdev, bo);
398		if (ret)
399			return ret;
400	}
401	return 0;
402}
403
404int qxl_surf_evict(struct qxl_device *qdev)
405{
406	struct ttm_resource_manager *man;
407
408	man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
409	return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
410}
411
412int qxl_vram_evict(struct qxl_device *qdev)
413{
414	struct ttm_resource_manager *man;
415
416	man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
417	return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
418}
v4.10.11
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 
 
 
 26#include "qxl_drv.h"
 27#include "qxl_object.h"
 28
 29#include <linux/io-mapping.h>
 30static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 31{
 32	struct qxl_bo *bo;
 33	struct qxl_device *qdev;
 34
 35	bo = to_qxl_bo(tbo);
 36	qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
 37
 38	qxl_surface_evict(qdev, bo, false);
 
 39	mutex_lock(&qdev->gem.mutex);
 40	list_del_init(&bo->list);
 41	mutex_unlock(&qdev->gem.mutex);
 42	drm_gem_object_release(&bo->gem_base);
 43	kfree(bo);
 44}
 45
 46bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
 47{
 48	if (bo->destroy == &qxl_ttm_bo_destroy)
 49		return true;
 50	return false;
 51}
 52
 53void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
 54{
 55	u32 c = 0;
 56	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
 57	unsigned i;
 
 
 
 58
 59	qbo->placement.placement = qbo->placements;
 60	qbo->placement.busy_placement = qbo->placements;
 61	if (domain == QXL_GEM_DOMAIN_VRAM)
 62		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
 63	if (domain == QXL_GEM_DOMAIN_SURFACE)
 64		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
 65	if (domain == QXL_GEM_DOMAIN_CPU)
 66		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
 67	if (!c)
 68		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 
 
 
 
 
 
 
 
 
 69	qbo->placement.num_placement = c;
 70	qbo->placement.num_busy_placement = c;
 71	for (i = 0; i < c; ++i) {
 72		qbo->placements[i].fpfn = 0;
 73		qbo->placements[i].lpfn = 0;
 74	}
 75}
 76
 
 
 
 
 
 
 
 
 
 
 
 
 77
 78int qxl_bo_create(struct qxl_device *qdev,
 79		  unsigned long size, bool kernel, bool pinned, u32 domain,
 80		  struct qxl_surface *surf,
 81		  struct qxl_bo **bo_ptr)
 82{
 
 83	struct qxl_bo *bo;
 84	enum ttm_bo_type type;
 85	int r;
 86
 87	if (kernel)
 88		type = ttm_bo_type_kernel;
 89	else
 90		type = ttm_bo_type_device;
 91	*bo_ptr = NULL;
 92	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
 93	if (bo == NULL)
 94		return -ENOMEM;
 95	size = roundup(size, PAGE_SIZE);
 96	r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
 97	if (unlikely(r)) {
 98		kfree(bo);
 99		return r;
100	}
 
101	bo->type = domain;
102	bo->pin_count = pinned ? 1 : 0;
103	bo->surface_id = 0;
104	INIT_LIST_HEAD(&bo->list);
105
106	if (surf)
107		bo->surf = *surf;
108
109	qxl_ttm_placement_from_domain(bo, domain, pinned);
110
111	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
112			&bo->placement, 0, !kernel, NULL, size,
113			NULL, NULL, &qxl_ttm_bo_destroy);
 
114	if (unlikely(r != 0)) {
115		if (r != -ERESTARTSYS)
116			dev_err(qdev->dev,
117				"object_init failed for (%lu, 0x%08X)\n",
118				size, domain);
119		return r;
120	}
 
 
 
121	*bo_ptr = bo;
122	return 0;
123}
124
125int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
126{
127	bool is_iomem;
128	int r;
129
 
 
130	if (bo->kptr) {
131		if (ptr)
132			*ptr = bo->kptr;
133		return 0;
 
 
 
 
 
134	}
135	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136	if (r)
137		return r;
138	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
139	if (ptr)
140		*ptr = bo->kptr;
141	return 0;
 
 
 
 
 
 
 
 
142}
143
144void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
145			      struct qxl_bo *bo, int page_offset)
146{
147	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
148	void *rptr;
149	int ret;
150	struct io_mapping *map;
 
151
152	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
153		map = qdev->vram_mapping;
154	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
155		map = qdev->surface_mapping;
156	else
157		goto fallback;
158
159	(void) ttm_mem_io_lock(man, false);
160	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
161	ttm_mem_io_unlock(man);
162
163	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
164fallback:
165	if (bo->kptr) {
166		rptr = bo->kptr + (page_offset * PAGE_SIZE);
167		return rptr;
168	}
169
170	ret = qxl_bo_kmap(bo, &rptr);
171	if (ret)
172		return NULL;
 
173
174	rptr += page_offset * PAGE_SIZE;
175	return rptr;
176}
177
178void qxl_bo_kunmap(struct qxl_bo *bo)
179{
 
 
180	if (bo->kptr == NULL)
181		return;
 
 
 
182	bo->kptr = NULL;
183	ttm_bo_kunmap(&bo->kmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184}
185
186void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
187			       struct qxl_bo *bo, void *pmap)
188{
189	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
190	struct io_mapping *map;
191
192	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
193		map = qdev->vram_mapping;
194	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
195		map = qdev->surface_mapping;
196	else
197		goto fallback;
198
199	io_mapping_unmap_atomic(pmap);
200
201	(void) ttm_mem_io_lock(man, false);
202	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
203	ttm_mem_io_unlock(man);
204	return ;
205 fallback:
206	qxl_bo_kunmap(bo);
207}
208
209void qxl_bo_unref(struct qxl_bo **bo)
210{
211	if ((*bo) == NULL)
212		return;
213
214	drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
215	*bo = NULL;
216}
217
218struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
219{
220	drm_gem_object_reference(&bo->gem_base);
221	return bo;
222}
223
224int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
225{
226	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
 
227	int r;
228
229	if (bo->pin_count) {
230		bo->pin_count++;
231		if (gpu_addr)
232			*gpu_addr = qxl_bo_gpu_offset(bo);
233		return 0;
234	}
235	qxl_ttm_placement_from_domain(bo, domain, true);
236	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
237	if (likely(r == 0)) {
238		bo->pin_count = 1;
239		if (gpu_addr != NULL)
240			*gpu_addr = qxl_bo_gpu_offset(bo);
241	}
242	if (unlikely(r != 0))
243		dev_err(qdev->dev, "%p pin failed\n", bo);
244	return r;
245}
246
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247int qxl_bo_unpin(struct qxl_bo *bo)
248{
249	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
250	int r, i;
 
 
 
251
252	if (!bo->pin_count) {
253		dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
254		return 0;
255	}
256	bo->pin_count--;
257	if (bo->pin_count)
258		return 0;
259	for (i = 0; i < bo->placement.num_placement; i++)
260		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
261	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
262	if (unlikely(r != 0))
263		dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
264	return r;
265}
266
267void qxl_bo_force_delete(struct qxl_device *qdev)
268{
269	struct qxl_bo *bo, *n;
270
271	if (list_empty(&qdev->gem.objects))
272		return;
273	dev_err(qdev->dev, "Userspace still has active objects !\n");
274	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
275		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
276			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
277			*((unsigned long *)&bo->gem_base.refcount));
278		mutex_lock(&qdev->gem.mutex);
279		list_del_init(&bo->list);
280		mutex_unlock(&qdev->gem.mutex);
281		/* this should unref the ttm bo */
282		drm_gem_object_unreference_unlocked(&bo->gem_base);
283	}
284}
285
286int qxl_bo_init(struct qxl_device *qdev)
287{
288	return qxl_ttm_init(qdev);
289}
290
291void qxl_bo_fini(struct qxl_device *qdev)
292{
293	qxl_ttm_fini(qdev);
294}
295
296int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
297{
298	int ret;
 
299	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
300		/* allocate a surface id for this surface now */
301		ret = qxl_surface_id_alloc(qdev, bo);
302		if (ret)
303			return ret;
304
305		ret = qxl_hw_surface_alloc(qdev, bo, NULL);
306		if (ret)
307			return ret;
308	}
309	return 0;
310}
311
312int qxl_surf_evict(struct qxl_device *qdev)
313{
314	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 
 
 
315}
316
317int qxl_vram_evict(struct qxl_device *qdev)
318{
319	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
 
 
 
320}