Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2008 Ben Skeggs.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26#include "drmP.h"
 27#include "drm.h"
 28
 29#include "nouveau_drv.h"
 30#include "nouveau_drm.h"
 31#include "nouveau_dma.h"
 
 
 32
 33#define nouveau_gem_pushbuf_sync(chan) 0
 
 
 
 34
 35int
 36nouveau_gem_object_new(struct drm_gem_object *gem)
 
 
 37{
 38	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 39}
 40
 
 
 
 
 
 
 
 41void
 42nouveau_gem_object_del(struct drm_gem_object *gem)
 43{
 44	struct nouveau_bo *nvbo = gem->driver_private;
 45	struct ttm_buffer_object *bo = &nvbo->bo;
 
 
 46
 47	if (!nvbo)
 
 
 48		return;
 49	nvbo->gem = NULL;
 50
 51	if (unlikely(nvbo->pin_refcnt)) {
 52		nvbo->pin_refcnt = 1;
 53		nouveau_bo_unpin(nvbo);
 54	}
 55
 56	ttm_bo_unref(&bo);
 
 57
 58	drm_gem_object_release(gem);
 59	kfree(gem);
 
 
 60}
 61
 62int
 63nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 64{
 65	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
 66	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
 
 
 67	struct nouveau_vma *vma;
 68	int ret;
 69
 70	if (!fpriv->vm)
 71		return 0;
 72
 73	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
 74	if (ret)
 75		return ret;
 76
 77	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
 78	if (!vma) {
 79		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 80		if (!vma) {
 81			ret = -ENOMEM;
 82			goto out;
 83		}
 84
 85		ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
 86		if (ret) {
 87			kfree(vma);
 88			goto out;
 89		}
 90	} else {
 91		vma->refcount++;
 92	}
 93
 
 
 
 94out:
 95	ttm_bo_unreserve(&nvbo->bo);
 96	return ret;
 97}
 98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99void
100nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
101{
102	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
 
 
104	struct nouveau_vma *vma;
105	int ret;
106
107	if (!fpriv->vm)
108		return;
109
110	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
111	if (ret)
112		return;
113
114	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
115	if (vma) {
116		if (--vma->refcount == 0) {
117			nouveau_bo_vma_del(nvbo, vma);
118			kfree(vma);
 
 
 
 
119		}
120	}
121	ttm_bo_unreserve(&nvbo->bo);
122}
123
 
 
 
 
 
 
 
 
 
 
 
 
 
124int
125nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
126		uint32_t tile_mode, uint32_t tile_flags,
127		struct nouveau_bo **pnvbo)
128{
129	struct drm_nouveau_private *dev_priv = dev->dev_private;
130	struct nouveau_bo *nvbo;
131	u32 flags = 0;
132	int ret;
133
134	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
135		flags |= TTM_PL_FLAG_VRAM;
136	if (domain & NOUVEAU_GEM_DOMAIN_GART)
137		flags |= TTM_PL_FLAG_TT;
138	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
139		flags |= TTM_PL_FLAG_SYSTEM;
 
 
 
 
 
 
 
 
 
 
 
 
140
141	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
142			     tile_flags, pnvbo);
143	if (ret)
144		return ret;
145	nvbo = *pnvbo;
146
147	/* we restrict allowed domains on nv50+ to only the types
148	 * that were requested at creation time.  not possibly on
149	 * earlier chips without busting the ABI.
150	 */
151	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
152			      NOUVEAU_GEM_DOMAIN_GART;
153	if (dev_priv->card_type >= NV_50)
154		nvbo->valid_domains &= domain;
155
156	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
157	if (!nvbo->gem) {
158		nouveau_bo_ref(NULL, pnvbo);
159		return -ENOMEM;
160	}
161
162	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
163	nvbo->gem->driver_private = nvbo;
164	return 0;
165}
166
167static int
168nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
169		 struct drm_nouveau_gem_info *rep)
170{
171	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
172	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
173	struct nouveau_vma *vma;
174
175	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 
 
176		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
177	else
178		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
179
180	rep->offset = nvbo->bo.offset;
181	if (fpriv->vm) {
182		vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
183		if (!vma)
184			return -EINVAL;
185
186		rep->offset = vma->offset;
187	}
188
189	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
190	rep->map_handle = nvbo->bo.addr_space_offset;
191	rep->tile_mode = nvbo->tile_mode;
192	rep->tile_flags = nvbo->tile_flags;
 
 
 
 
 
 
 
193	return 0;
194}
195
196int
197nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
198		      struct drm_file *file_priv)
199{
200	struct drm_nouveau_private *dev_priv = dev->dev_private;
201	struct drm_nouveau_gem_new *req = data;
202	struct nouveau_bo *nvbo = NULL;
203	int ret = 0;
204
205	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
206		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
207
208	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
209		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
210		return -EINVAL;
211	}
212
213	ret = nouveau_gem_new(dev, req->info.size, req->align,
214			      req->info.domain, req->info.tile_mode,
215			      req->info.tile_flags, &nvbo);
216	if (ret)
217		return ret;
218
219	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
 
220	if (ret == 0) {
221		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
222		if (ret)
223			drm_gem_handle_delete(file_priv, req->info.handle);
224	}
225
226	/* drop reference from allocate - handle holds it now */
227	drm_gem_object_unreference_unlocked(nvbo->gem);
228	return ret;
229}
230
231static int
232nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
233		       uint32_t write_domains, uint32_t valid_domains)
234{
235	struct nouveau_bo *nvbo = gem->driver_private;
236	struct ttm_buffer_object *bo = &nvbo->bo;
237	uint32_t domains = valid_domains & nvbo->valid_domains &
238		(write_domains ? write_domains : read_domains);
239	uint32_t pref_flags = 0, valid_flags = 0;
240
241	if (!domains)
242		return -EINVAL;
243
244	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
245		valid_flags |= TTM_PL_FLAG_VRAM;
246
247	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
248		valid_flags |= TTM_PL_FLAG_TT;
249
250	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
251	    bo->mem.mem_type == TTM_PL_VRAM)
252		pref_flags |= TTM_PL_FLAG_VRAM;
253
254	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
255		 bo->mem.mem_type == TTM_PL_TT)
256		pref_flags |= TTM_PL_FLAG_TT;
257
258	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
259		pref_flags |= TTM_PL_FLAG_VRAM;
260
261	else
262		pref_flags |= TTM_PL_FLAG_TT;
263
264	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
265
266	return 0;
267}
268
269struct validate_op {
270	struct list_head vram_list;
271	struct list_head gart_list;
272	struct list_head both_list;
273};
274
275static void
276validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
 
 
277{
278	struct list_head *entry, *tmp;
279	struct nouveau_bo *nvbo;
 
280
281	list_for_each_safe(entry, tmp, list) {
282		nvbo = list_entry(entry, struct nouveau_bo, entry);
283
284		nouveau_bo_fence(nvbo, fence);
 
 
 
 
 
 
 
 
 
 
 
285
286		if (unlikely(nvbo->validate_mapped)) {
287			ttm_bo_kunmap(&nvbo->kmap);
288			nvbo->validate_mapped = false;
289		}
290
291		list_del(&nvbo->entry);
292		nvbo->reserved_by = NULL;
293		ttm_bo_unreserve(&nvbo->bo);
294		drm_gem_object_unreference_unlocked(nvbo->gem);
295	}
296}
297
298static void
299validate_fini(struct validate_op *op, struct nouveau_fence* fence)
 
 
300{
301	validate_fini_list(&op->vram_list, fence);
302	validate_fini_list(&op->gart_list, fence);
303	validate_fini_list(&op->both_list, fence);
304}
305
306static int
307validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
308	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
309	      int nr_buffers, struct validate_op *op)
310{
311	struct drm_device *dev = chan->dev;
312	struct drm_nouveau_private *dev_priv = dev->dev_private;
313	uint32_t sequence;
314	int trycnt = 0;
315	int ret, i;
 
 
 
 
316
317	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
318retry:
319	if (++trycnt > 100000) {
320		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
321		return -EINVAL;
322	}
323
324	for (i = 0; i < nr_buffers; i++) {
325		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
326		struct drm_gem_object *gem;
327		struct nouveau_bo *nvbo;
328
329		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
330		if (!gem) {
331			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
332			validate_fini(op, NULL);
333			return -ENOENT;
 
 
 
 
 
 
334		}
335		nvbo = gem->driver_private;
336
337		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
338			NV_ERROR(dev, "multiple instances of buffer %d on "
339				      "validation list\n", b->handle);
340			validate_fini(op, NULL);
341			return -EINVAL;
 
342		}
343
344		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
345		if (ret) {
346			validate_fini(op, NULL);
347			if (unlikely(ret == -EAGAIN))
348				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
349			drm_gem_object_unreference_unlocked(gem);
 
 
 
 
 
 
350			if (unlikely(ret)) {
351				if (ret != -ERESTARTSYS)
352					NV_ERROR(dev, "fail reserve\n");
353				return ret;
354			}
355			goto retry;
356		}
357
358		b->user_priv = (uint64_t)(unsigned long)nvbo;
 
 
 
 
 
 
 
 
 
 
 
 
 
359		nvbo->reserved_by = file_priv;
360		nvbo->pbbo_index = i;
361		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
362		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
363			list_add_tail(&nvbo->entry, &op->both_list);
364		else
365		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
366			list_add_tail(&nvbo->entry, &op->vram_list);
367		else
368		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
369			list_add_tail(&nvbo->entry, &op->gart_list);
370		else {
371			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
372				 b->valid_domains);
373			list_add_tail(&nvbo->entry, &op->both_list);
374			validate_fini(op, NULL);
375			return -EINVAL;
376		}
 
 
377	}
378
379	return 0;
 
 
 
 
 
 
 
380}
381
382static int
383validate_list(struct nouveau_channel *chan, struct list_head *list,
384	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
385{
386	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
387	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
388				(void __force __user *)(uintptr_t)user_pbbo_ptr;
389	struct drm_device *dev = chan->dev;
390	struct nouveau_bo *nvbo;
391	int ret, relocs = 0;
392
393	list_for_each_entry(nvbo, list, entry) {
394		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
395
396		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
397		if (unlikely(ret)) {
398			NV_ERROR(dev, "fail pre-validate sync\n");
399			return ret;
400		}
401
402		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
403					     b->write_domains,
404					     b->valid_domains);
405		if (unlikely(ret)) {
406			NV_ERROR(dev, "fail set_domain\n");
407			return ret;
408		}
409
410		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
411		ret = nouveau_bo_validate(nvbo, true, false, false);
412		nvbo->channel = NULL;
413		if (unlikely(ret)) {
414			if (ret != -ERESTARTSYS)
415				NV_ERROR(dev, "fail ttm_validate\n");
416			return ret;
417		}
418
419		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
420		if (unlikely(ret)) {
421			NV_ERROR(dev, "fail post-validate sync\n");
 
422			return ret;
423		}
424
425		if (dev_priv->card_type < NV_50) {
426			if (nvbo->bo.offset == b->presumed.offset &&
427			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
428			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
429			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
430			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
431				continue;
432
433			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
434				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
435			else
436				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
437			b->presumed.offset = nvbo->bo.offset;
438			b->presumed.valid = 0;
439			relocs++;
440
441			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
442					     &b->presumed, sizeof(b->presumed)))
443				return -EFAULT;
444		}
445	}
446
447	return relocs;
448}
449
450static int
451nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
452			     struct drm_file *file_priv,
453			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
454			     uint64_t user_buffers, int nr_buffers,
455			     struct validate_op *op, int *apply_relocs)
456{
457	struct drm_device *dev = chan->dev;
458	int ret, relocs = 0;
459
460	INIT_LIST_HEAD(&op->vram_list);
461	INIT_LIST_HEAD(&op->gart_list);
462	INIT_LIST_HEAD(&op->both_list);
463
464	if (nr_buffers == 0)
465		return 0;
466
467	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
468	if (unlikely(ret)) {
469		if (ret != -ERESTARTSYS)
470			NV_ERROR(dev, "validate_init\n");
471		return ret;
472	}
473
474	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
475	if (unlikely(ret < 0)) {
476		if (ret != -ERESTARTSYS)
477			NV_ERROR(dev, "validate vram_list\n");
478		validate_fini(op, NULL);
479		return ret;
480	}
481	relocs += ret;
482
483	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
484	if (unlikely(ret < 0)) {
485		if (ret != -ERESTARTSYS)
486			NV_ERROR(dev, "validate gart_list\n");
487		validate_fini(op, NULL);
488		return ret;
 
 
489	}
490	relocs += ret;
491
492	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
493	if (unlikely(ret < 0)) {
494		if (ret != -ERESTARTSYS)
495			NV_ERROR(dev, "validate both_list\n");
496		validate_fini(op, NULL);
497		return ret;
498	}
499	relocs += ret;
500
501	*apply_relocs = relocs;
502	return 0;
503}
504
 
 
 
 
 
 
505static inline void *
506u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
507{
508	void *mem;
509	void __user *userptr = (void __force __user *)(uintptr_t)user;
510
511	mem = kmalloc(nmemb * size, GFP_KERNEL);
 
 
512	if (!mem)
513		return ERR_PTR(-ENOMEM);
514
515	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
516		kfree(mem);
517		return ERR_PTR(-EFAULT);
518	}
519
520	return mem;
521}
522
523static int
524nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
525				struct drm_nouveau_gem_pushbuf *req,
 
526				struct drm_nouveau_gem_pushbuf_bo *bo)
527{
528	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
529	int ret = 0;
530	unsigned i;
531
532	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
533	if (IS_ERR(reloc))
534		return PTR_ERR(reloc);
535
536	for (i = 0; i < req->nr_relocs; i++) {
537		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
538		struct drm_nouveau_gem_pushbuf_bo *b;
539		struct nouveau_bo *nvbo;
540		uint32_t data;
541
542		if (unlikely(r->bo_index > req->nr_buffers)) {
543			NV_ERROR(dev, "reloc bo index invalid\n");
544			ret = -EINVAL;
545			break;
546		}
547
548		b = &bo[r->bo_index];
549		if (b->presumed.valid)
550			continue;
551
552		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
553			NV_ERROR(dev, "reloc container bo index invalid\n");
554			ret = -EINVAL;
555			break;
556		}
557		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
558
559		if (unlikely(r->reloc_bo_offset + 4 >
560			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
561			NV_ERROR(dev, "reloc outside of bo\n");
562			ret = -EINVAL;
563			break;
564		}
565
566		if (!nvbo->kmap.virtual) {
567			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
568					  &nvbo->kmap);
569			if (ret) {
570				NV_ERROR(dev, "failed kmap for reloc\n");
571				break;
572			}
573			nvbo->validate_mapped = true;
574		}
575
576		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
577			data = b->presumed.offset + r->data;
578		else
579		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
580			data = (b->presumed.offset + r->data) >> 32;
581		else
582			data = r->data;
583
584		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
585			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
586				data |= r->tor;
587			else
588				data |= r->vor;
589		}
590
591		spin_lock(&nvbo->bo.bdev->fence_lock);
592		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
593		spin_unlock(&nvbo->bo.bdev->fence_lock);
594		if (ret) {
595			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
596			break;
597		}
598
599		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
600	}
601
602	kfree(reloc);
603	return ret;
604}
605
606int
607nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
608			  struct drm_file *file_priv)
609{
610	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 
 
611	struct drm_nouveau_gem_pushbuf *req = data;
612	struct drm_nouveau_gem_pushbuf_push *push;
 
613	struct drm_nouveau_gem_pushbuf_bo *bo;
614	struct nouveau_channel *chan;
615	struct validate_op op;
616	struct nouveau_fence *fence = NULL;
617	int i, j, ret = 0, do_reloc = 0;
 
618
619	chan = nouveau_channel_get(file_priv, req->channel);
620	if (IS_ERR(chan))
621		return PTR_ERR(chan);
 
 
 
 
 
 
622
623	req->vram_available = dev_priv->fb_aper_free;
624	req->gart_available = dev_priv->gart_info.aper_free;
 
 
 
 
 
 
 
625	if (unlikely(req->nr_push == 0))
626		goto out_next;
627
628	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
629		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
630			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
631		nouveau_channel_put(&chan);
632		return -EINVAL;
633	}
634
635	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
636		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
637			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
638		nouveau_channel_put(&chan);
639		return -EINVAL;
640	}
641
642	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
643		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
644			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
645		nouveau_channel_put(&chan);
646		return -EINVAL;
647	}
648
649	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
650	if (IS_ERR(push)) {
651		nouveau_channel_put(&chan);
652		return PTR_ERR(push);
653	}
654
655	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
656	if (IS_ERR(bo)) {
657		kfree(push);
658		nouveau_channel_put(&chan);
659		return PTR_ERR(bo);
660	}
661
662	/* Mark push buffers as being used on PFIFO, the validation code
663	 * will then make sure that if the pushbuf bo moves, that they
664	 * happen on the kernel channel, which will in turn cause a sync
665	 * to happen before we try and submit the push buffer.
666	 */
667	for (i = 0; i < req->nr_push; i++) {
668		if (push[i].bo_index >= req->nr_buffers) {
669			NV_ERROR(dev, "push %d buffer not in list\n", i);
670			ret = -EINVAL;
671			goto out_prevalid;
672		}
673
674		bo[push[i].bo_index].read_domains |= (1 << 31);
675	}
676
677	/* Validate buffer list */
678	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 
679					   req->nr_buffers, &op, &do_reloc);
680	if (ret) {
681		if (ret != -ERESTARTSYS)
682			NV_ERROR(dev, "validate: %d\n", ret);
683		goto out_prevalid;
684	}
685
686	/* Apply any relocations that are required */
687	if (do_reloc) {
688		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
 
 
 
 
 
 
 
 
 
 
 
689		if (ret) {
690			NV_ERROR(dev, "reloc apply: %d\n", ret);
691			goto out;
692		}
693	}
694
695	if (chan->dma.ib_max) {
696		ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
697		if (ret) {
698			NV_INFO(dev, "nv50cal_space: %d\n", ret);
699			goto out;
700		}
701
702		for (i = 0; i < req->nr_push; i++) {
703			struct nouveau_bo *nvbo = (void *)(unsigned long)
704				bo[push[i].bo_index].user_priv;
705
706			nv50_dma_push(chan, nvbo, push[i].offset,
707				      push[i].length);
708		}
709	} else
710	if (dev_priv->chipset >= 0x25) {
711		ret = RING_SPACE(chan, req->nr_push * 2);
712		if (ret) {
713			NV_ERROR(dev, "cal_space: %d\n", ret);
714			goto out;
715		}
716
717		for (i = 0; i < req->nr_push; i++) {
718			struct nouveau_bo *nvbo = (void *)(unsigned long)
719				bo[push[i].bo_index].user_priv;
720			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
721
722			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
723					push[i].offset) | 2);
724			OUT_RING(chan, 0);
725		}
726	} else {
727		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
728		if (ret) {
729			NV_ERROR(dev, "jmp_space: %d\n", ret);
730			goto out;
731		}
732
733		for (i = 0; i < req->nr_push; i++) {
734			struct nouveau_bo *nvbo = (void *)(unsigned long)
735				bo[push[i].bo_index].user_priv;
736			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
737			uint32_t cmd;
738
739			cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
740			cmd |= 0x20000000;
741			if (unlikely(cmd != req->suffix0)) {
742				if (!nvbo->kmap.virtual) {
743					ret = ttm_bo_kmap(&nvbo->bo, 0,
744							  nvbo->bo.mem.
745							  num_pages,
746							  &nvbo->kmap);
747					if (ret) {
748						WIND_RING(chan);
749						goto out;
750					}
751					nvbo->validate_mapped = true;
752				}
753
754				nouveau_bo_wr32(nvbo, (push[i].offset +
755						push[i].length - 8) / 4, cmd);
756			}
757
758			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
759					push[i].offset) | 0x20000000);
760			OUT_RING(chan, 0);
761			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
762				OUT_RING(chan, 0);
763		}
764	}
765
766	ret = nouveau_fence_new(chan, &fence, true);
767	if (ret) {
768		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
769		WIND_RING(chan);
770		goto out;
771	}
772
 
 
 
 
 
 
 
773out:
774	validate_fini(&op, fence);
775	nouveau_fence_unref(&fence);
776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777out_prevalid:
778	kfree(bo);
779	kfree(push);
 
 
780
781out_next:
782	if (chan->dma.ib_max) {
783		req->suffix0 = 0x00000000;
784		req->suffix1 = 0x00000000;
785	} else
786	if (dev_priv->chipset >= 0x25) {
787		req->suffix0 = 0x00020000;
788		req->suffix1 = 0x00000000;
789	} else {
790		req->suffix0 = 0x20000000 |
791			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
792		req->suffix1 = 0x00000000;
793	}
794
795	nouveau_channel_put(&chan);
796	return ret;
797}
798
799static inline uint32_t
800domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
801{
802	uint32_t flags = 0;
803
804	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
805		flags |= TTM_PL_FLAG_VRAM;
806	if (domain & NOUVEAU_GEM_DOMAIN_GART)
807		flags |= TTM_PL_FLAG_TT;
808
809	return flags;
810}
811
812int
813nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
814			   struct drm_file *file_priv)
815{
816	struct drm_nouveau_gem_cpu_prep *req = data;
817	struct drm_gem_object *gem;
818	struct nouveau_bo *nvbo;
819	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
820	int ret = -EINVAL;
 
 
821
822	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
823	if (!gem)
824		return -ENOENT;
825	nvbo = nouveau_gem_object(gem);
826
827	spin_lock(&nvbo->bo.bdev->fence_lock);
828	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
829	spin_unlock(&nvbo->bo.bdev->fence_lock);
830	drm_gem_object_unreference_unlocked(gem);
 
 
 
 
 
 
 
 
831	return ret;
832}
833
834int
835nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
836			   struct drm_file *file_priv)
837{
 
 
 
 
 
 
 
 
 
 
 
838	return 0;
839}
840
841int
842nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
843		       struct drm_file *file_priv)
844{
845	struct drm_nouveau_gem_info *req = data;
846	struct drm_gem_object *gem;
847	int ret;
848
849	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
850	if (!gem)
851		return -ENOENT;
852
853	ret = nouveau_gem_info(file_priv, gem, req);
854	drm_gem_object_unreference_unlocked(gem);
855	return ret;
856}
857
v5.14.15
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <drm/drm_gem_ttm_helper.h>
  28
  29#include "nouveau_drv.h"
 
  30#include "nouveau_dma.h"
  31#include "nouveau_fence.h"
  32#include "nouveau_abi16.h"
  33
  34#include "nouveau_ttm.h"
  35#include "nouveau_gem.h"
  36#include "nouveau_mem.h"
  37#include "nouveau_vmm.h"
  38
  39#include <nvif/class.h>
  40#include <nvif/push206e.h>
  41
  42static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
  43{
  44	struct vm_area_struct *vma = vmf->vma;
  45	struct ttm_buffer_object *bo = vma->vm_private_data;
  46	pgprot_t prot;
  47	vm_fault_t ret;
  48
  49	ret = ttm_bo_vm_reserve(bo, vmf);
  50	if (ret)
  51		return ret;
  52
  53	ret = nouveau_ttm_fault_reserve_notify(bo);
  54	if (ret)
  55		goto error_unlock;
  56
  57	nouveau_bo_del_io_reserve_lru(bo);
  58	prot = vm_get_page_prot(vma->vm_flags);
  59	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
  60	nouveau_bo_add_io_reserve_lru(bo);
  61	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  62		return ret;
  63
  64error_unlock:
  65	dma_resv_unlock(bo->base.resv);
  66	return ret;
  67}
  68
  69static const struct vm_operations_struct nouveau_ttm_vm_ops = {
  70	.fault = nouveau_ttm_fault,
  71	.open = ttm_bo_vm_open,
  72	.close = ttm_bo_vm_close,
  73	.access = ttm_bo_vm_access
  74};
  75
  76void
  77nouveau_gem_object_del(struct drm_gem_object *gem)
  78{
  79	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  80	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  81	struct device *dev = drm->dev->dev;
  82	int ret;
  83
  84	ret = pm_runtime_get_sync(dev);
  85	if (WARN_ON(ret < 0 && ret != -EACCES)) {
  86		pm_runtime_put_autosuspend(dev);
  87		return;
 
 
 
 
 
  88	}
  89
  90	if (gem->import_attach)
  91		drm_prime_gem_destroy(gem, nvbo->bo.sg);
  92
  93	ttm_bo_put(&nvbo->bo);
  94
  95	pm_runtime_mark_last_busy(dev);
  96	pm_runtime_put_autosuspend(dev);
  97}
  98
  99int
 100nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 101{
 102	struct nouveau_cli *cli = nouveau_cli(file_priv);
 103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 104	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 105	struct device *dev = drm->dev->dev;
 106	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 107	struct nouveau_vma *vma;
 108	int ret;
 109
 110	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 111		return 0;
 112
 113	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 114	if (ret)
 115		return ret;
 116
 117	ret = pm_runtime_get_sync(dev);
 118	if (ret < 0 && ret != -EACCES) {
 119		pm_runtime_put_autosuspend(dev);
 120		goto out;
 
 
 
 
 
 
 
 
 
 
 
 121	}
 122
 123	ret = nouveau_vma_new(nvbo, vmm, &vma);
 124	pm_runtime_mark_last_busy(dev);
 125	pm_runtime_put_autosuspend(dev);
 126out:
 127	ttm_bo_unreserve(&nvbo->bo);
 128	return ret;
 129}
 130
 131struct nouveau_gem_object_unmap {
 132	struct nouveau_cli_work work;
 133	struct nouveau_vma *vma;
 134};
 135
 136static void
 137nouveau_gem_object_delete(struct nouveau_vma *vma)
 138{
 139	nouveau_fence_unref(&vma->fence);
 140	nouveau_vma_del(&vma);
 141}
 142
 143static void
 144nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
 145{
 146	struct nouveau_gem_object_unmap *work =
 147		container_of(w, typeof(*work), work);
 148	nouveau_gem_object_delete(work->vma);
 149	kfree(work);
 150}
 151
 152static void
 153nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 154{
 155	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
 156	struct nouveau_gem_object_unmap *work;
 157
 158	list_del_init(&vma->head);
 159
 160	if (!fence) {
 161		nouveau_gem_object_delete(vma);
 162		return;
 163	}
 164
 165	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
 166		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
 167		nouveau_gem_object_delete(vma);
 168		return;
 169	}
 170
 171	work->work.func = nouveau_gem_object_delete_work;
 172	work->vma = vma;
 173	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 174}
 175
 176void
 177nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 178{
 179	struct nouveau_cli *cli = nouveau_cli(file_priv);
 180	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 181	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 182	struct device *dev = drm->dev->dev;
 183	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
 184	struct nouveau_vma *vma;
 185	int ret;
 186
 187	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 188		return;
 189
 190	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 191	if (ret)
 192		return;
 193
 194	vma = nouveau_vma_find(nvbo, vmm);
 195	if (vma) {
 196		if (--vma->refs == 0) {
 197			ret = pm_runtime_get_sync(dev);
 198			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 199				nouveau_gem_object_unmap(nvbo, vma);
 200				pm_runtime_mark_last_busy(dev);
 201			}
 202			pm_runtime_put_autosuspend(dev);
 203		}
 204	}
 205	ttm_bo_unreserve(&nvbo->bo);
 206}
 207
 208const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
 209	.free = nouveau_gem_object_del,
 210	.open = nouveau_gem_object_open,
 211	.close = nouveau_gem_object_close,
 212	.pin = nouveau_gem_prime_pin,
 213	.unpin = nouveau_gem_prime_unpin,
 214	.get_sg_table = nouveau_gem_prime_get_sg_table,
 215	.vmap = drm_gem_ttm_vmap,
 216	.vunmap = drm_gem_ttm_vunmap,
 217	.mmap = drm_gem_ttm_mmap,
 218	.vm_ops = &nouveau_ttm_vm_ops,
 219};
 220
 221int
 222nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 223		uint32_t tile_mode, uint32_t tile_flags,
 224		struct nouveau_bo **pnvbo)
 225{
 226	struct nouveau_drm *drm = cli->drm;
 227	struct nouveau_bo *nvbo;
 
 228	int ret;
 229
 230	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
 231		domain |= NOUVEAU_GEM_DOMAIN_CPU;
 232
 233	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
 234				tile_flags);
 235	if (IS_ERR(nvbo))
 236		return PTR_ERR(nvbo);
 237
 238	nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
 239
 240	/* Initialize the embedded gem-object. We return a single gem-reference
 241	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 242	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
 243	if (ret) {
 244		drm_gem_object_release(&nvbo->bo.base);
 245		kfree(nvbo);
 246		return ret;
 247	}
 248
 249	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
 
 250	if (ret)
 251		return ret;
 
 252
 253	/* we restrict allowed domains on nv50+ to only the types
 254	 * that were requested at creation time.  not possibly on
 255	 * earlier chips without busting the ABI.
 256	 */
 257	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 258			      NOUVEAU_GEM_DOMAIN_GART;
 259	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 260		nvbo->valid_domains &= domain;
 261
 262	*pnvbo = nvbo;
 
 
 
 
 
 
 
 263	return 0;
 264}
 265
 266static int
 267nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 268		 struct drm_nouveau_gem_info *rep)
 269{
 270	struct nouveau_cli *cli = nouveau_cli(file_priv);
 271	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 272	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 273	struct nouveau_vma *vma;
 274
 275	if (is_power_of_2(nvbo->valid_domains))
 276		rep->domain = nvbo->valid_domains;
 277	else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 278		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 279	else
 280		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 281	rep->offset = nvbo->offset;
 282	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 283		vma = nouveau_vma_find(nvbo, vmm);
 
 284		if (!vma)
 285			return -EINVAL;
 286
 287		rep->offset = vma->addr;
 288	}
 289
 290	rep->size = nvbo->bo.base.size;
 291	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
 292	rep->tile_mode = nvbo->mode;
 293	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
 294	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
 295		rep->tile_flags |= nvbo->kind << 8;
 296	else
 297	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 298		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
 299	else
 300		rep->tile_flags |= nvbo->zeta;
 301	return 0;
 302}
 303
 304int
 305nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 306		      struct drm_file *file_priv)
 307{
 308	struct nouveau_cli *cli = nouveau_cli(file_priv);
 309	struct drm_nouveau_gem_new *req = data;
 310	struct nouveau_bo *nvbo = NULL;
 311	int ret = 0;
 312
 313	ret = nouveau_gem_new(cli, req->info.size, req->align,
 
 
 
 
 
 
 
 
 314			      req->info.domain, req->info.tile_mode,
 315			      req->info.tile_flags, &nvbo);
 316	if (ret)
 317		return ret;
 318
 319	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
 320				    &req->info.handle);
 321	if (ret == 0) {
 322		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
 323		if (ret)
 324			drm_gem_handle_delete(file_priv, req->info.handle);
 325	}
 326
 327	/* drop reference from allocate - handle holds it now */
 328	drm_gem_object_put(&nvbo->bo.base);
 329	return ret;
 330}
 331
 332static int
 333nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 334		       uint32_t write_domains, uint32_t valid_domains)
 335{
 336	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 337	struct ttm_buffer_object *bo = &nvbo->bo;
 338	uint32_t domains = valid_domains & nvbo->valid_domains &
 339		(write_domains ? write_domains : read_domains);
 340	uint32_t pref_domains = 0;;
 341
 342	if (!domains)
 343		return -EINVAL;
 344
 345	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 
 
 
 
 346
 347	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 348	    bo->resource->mem_type == TTM_PL_VRAM)
 349		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 350
 351	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 352		 bo->resource->mem_type == TTM_PL_TT)
 353		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 354
 355	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 356		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 357
 358	else
 359		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 360
 361	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
 362
 363	return 0;
 364}
 365
 366struct validate_op {
 367	struct list_head list;
 368	struct ww_acquire_ctx ticket;
 
 369};
 370
 371static void
 372validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
 373			struct nouveau_fence *fence,
 374			struct drm_nouveau_gem_pushbuf_bo *pbbo)
 375{
 
 376	struct nouveau_bo *nvbo;
 377	struct drm_nouveau_gem_pushbuf_bo *b;
 378
 379	while (!list_empty(&op->list)) {
 380		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
 381		b = &pbbo[nvbo->pbbo_index];
 382
 383		if (likely(fence)) {
 384			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
 385
 386			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 387				struct nouveau_vma *vma =
 388					(void *)(unsigned long)b->user_priv;
 389				nouveau_fence_unref(&vma->fence);
 390				dma_fence_get(&fence->base);
 391				vma->fence = fence;
 392			}
 393		}
 394
 395		if (unlikely(nvbo->validate_mapped)) {
 396			ttm_bo_kunmap(&nvbo->kmap);
 397			nvbo->validate_mapped = false;
 398		}
 399
 400		list_del(&nvbo->entry);
 401		nvbo->reserved_by = NULL;
 402		ttm_bo_unreserve(&nvbo->bo);
 403		drm_gem_object_put(&nvbo->bo.base);
 404	}
 405}
 406
 407static void
 408validate_fini(struct validate_op *op, struct nouveau_channel *chan,
 409	      struct nouveau_fence *fence,
 410	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
 411{
 412	validate_fini_no_ticket(op, chan, fence, pbbo);
 413	ww_acquire_fini(&op->ticket);
 
 414}
 415
 416static int
 417validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 418	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
 419	      int nr_buffers, struct validate_op *op)
 420{
 421	struct nouveau_cli *cli = nouveau_cli(file_priv);
 
 
 422	int trycnt = 0;
 423	int ret = -EINVAL, i;
 424	struct nouveau_bo *res_bo = NULL;
 425	LIST_HEAD(gart_list);
 426	LIST_HEAD(vram_list);
 427	LIST_HEAD(both_list);
 428
 429	ww_acquire_init(&op->ticket, &reservation_ww_class);
 430retry:
 431	if (++trycnt > 100000) {
 432		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 433		return -EINVAL;
 434	}
 435
 436	for (i = 0; i < nr_buffers; i++) {
 437		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 438		struct drm_gem_object *gem;
 439		struct nouveau_bo *nvbo;
 440
 441		gem = drm_gem_object_lookup(file_priv, b->handle);
 442		if (!gem) {
 443			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 444			ret = -ENOENT;
 445			break;
 446		}
 447		nvbo = nouveau_gem_object(gem);
 448		if (nvbo == res_bo) {
 449			res_bo = NULL;
 450			drm_gem_object_put(gem);
 451			continue;
 452		}
 
 453
 454		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 455			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 456				      "validation list\n", b->handle);
 457			drm_gem_object_put(gem);
 458			ret = -EINVAL;
 459			break;
 460		}
 461
 462		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
 463		if (ret) {
 464			list_splice_tail_init(&vram_list, &op->list);
 465			list_splice_tail_init(&gart_list, &op->list);
 466			list_splice_tail_init(&both_list, &op->list);
 467			validate_fini_no_ticket(op, chan, NULL, NULL);
 468			if (unlikely(ret == -EDEADLK)) {
 469				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
 470							      &op->ticket);
 471				if (!ret)
 472					res_bo = nvbo;
 473			}
 474			if (unlikely(ret)) {
 475				if (ret != -ERESTARTSYS)
 476					NV_PRINTK(err, cli, "fail reserve\n");
 477				break;
 478			}
 
 479		}
 480
 481		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 482			struct nouveau_vmm *vmm = chan->vmm;
 483			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
 484			if (!vma) {
 485				NV_PRINTK(err, cli, "vma not found!\n");
 486				ret = -EINVAL;
 487				break;
 488			}
 489
 490			b->user_priv = (uint64_t)(unsigned long)vma;
 491		} else {
 492			b->user_priv = (uint64_t)(unsigned long)nvbo;
 493		}
 494
 495		nvbo->reserved_by = file_priv;
 496		nvbo->pbbo_index = i;
 497		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 498		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 499			list_add_tail(&nvbo->entry, &both_list);
 500		else
 501		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 502			list_add_tail(&nvbo->entry, &vram_list);
 503		else
 504		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 505			list_add_tail(&nvbo->entry, &gart_list);
 506		else {
 507			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 508				 b->valid_domains);
 509			list_add_tail(&nvbo->entry, &both_list);
 510			ret = -EINVAL;
 511			break;
 512		}
 513		if (nvbo == res_bo)
 514			goto retry;
 515	}
 516
 517	ww_acquire_done(&op->ticket);
 518	list_splice_tail(&vram_list, &op->list);
 519	list_splice_tail(&gart_list, &op->list);
 520	list_splice_tail(&both_list, &op->list);
 521	if (ret)
 522		validate_fini(op, chan, NULL, NULL);
 523	return ret;
 524
 525}
 526
 527static int
 528validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 529	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
 530{
 531	struct nouveau_drm *drm = chan->drm;
 
 
 
 532	struct nouveau_bo *nvbo;
 533	int ret, relocs = 0;
 534
 535	list_for_each_entry(nvbo, list, entry) {
 536		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 537
 538		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
 
 
 
 
 
 
 539					     b->write_domains,
 540					     b->valid_domains);
 541		if (unlikely(ret)) {
 542			NV_PRINTK(err, cli, "fail set_domain\n");
 543			return ret;
 544		}
 545
 546		ret = nouveau_bo_validate(nvbo, true, false);
 
 
 547		if (unlikely(ret)) {
 548			if (ret != -ERESTARTSYS)
 549				NV_PRINTK(err, cli, "fail ttm_validate\n");
 550			return ret;
 551		}
 552
 553		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 554		if (unlikely(ret)) {
 555			if (ret != -ERESTARTSYS)
 556				NV_PRINTK(err, cli, "fail post-validate sync\n");
 557			return ret;
 558		}
 559
 560		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 561			if (nvbo->offset == b->presumed.offset &&
 562			    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 563			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 564			     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 565			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 566				continue;
 567
 568			if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 569				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 570			else
 571				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 572			b->presumed.offset = nvbo->offset;
 573			b->presumed.valid = 0;
 574			relocs++;
 
 
 
 
 575		}
 576	}
 577
 578	return relocs;
 579}
 580
 581static int
 582nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 583			     struct drm_file *file_priv,
 584			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
 585			     int nr_buffers,
 586			     struct validate_op *op, bool *apply_relocs)
 587{
 588	struct nouveau_cli *cli = nouveau_cli(file_priv);
 589	int ret;
 590
 591	INIT_LIST_HEAD(&op->list);
 
 
 592
 593	if (nr_buffers == 0)
 594		return 0;
 595
 596	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 597	if (unlikely(ret)) {
 598		if (ret != -ERESTARTSYS)
 599			NV_PRINTK(err, cli, "validate_init\n");
 
 
 
 
 
 
 
 
 600		return ret;
 601	}
 
 602
 603	ret = validate_list(chan, cli, &op->list, pbbo);
 604	if (unlikely(ret < 0)) {
 605		if (ret != -ERESTARTSYS)
 606			NV_PRINTK(err, cli, "validating bo list\n");
 607		validate_fini(op, chan, NULL, NULL);
 608		return ret;
 609	} else if (ret > 0) {
 610		*apply_relocs = true;
 611	}
 
 612
 
 
 
 
 
 
 
 
 
 
 613	return 0;
 614}
 615
 616static inline void
 617u_free(void *addr)
 618{
 619	kvfree(addr);
 620}
 621
 622static inline void *
 623u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
 624{
 625	void *mem;
 626	void __user *userptr = (void __force __user *)(uintptr_t)user;
 627
 628	size *= nmemb;
 629
 630	mem = kvmalloc(size, GFP_KERNEL);
 631	if (!mem)
 632		return ERR_PTR(-ENOMEM);
 633
 634	if (copy_from_user(mem, userptr, size)) {
 635		u_free(mem);
 636		return ERR_PTR(-EFAULT);
 637	}
 638
 639	return mem;
 640}
 641
 642static int
 643nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 644				struct drm_nouveau_gem_pushbuf *req,
 645				struct drm_nouveau_gem_pushbuf_reloc *reloc,
 646				struct drm_nouveau_gem_pushbuf_bo *bo)
 647{
 
 648	int ret = 0;
 649	unsigned i;
 650
 
 
 
 
 651	for (i = 0; i < req->nr_relocs; i++) {
 652		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 653		struct drm_nouveau_gem_pushbuf_bo *b;
 654		struct nouveau_bo *nvbo;
 655		uint32_t data;
 656
 657		if (unlikely(r->bo_index >= req->nr_buffers)) {
 658			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 659			ret = -EINVAL;
 660			break;
 661		}
 662
 663		b = &bo[r->bo_index];
 664		if (b->presumed.valid)
 665			continue;
 666
 667		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
 668			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 669			ret = -EINVAL;
 670			break;
 671		}
 672		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 673
 674		if (unlikely(r->reloc_bo_offset + 4 >
 675			     nvbo->bo.base.size)) {
 676			NV_PRINTK(err, cli, "reloc outside of bo\n");
 677			ret = -EINVAL;
 678			break;
 679		}
 680
 681		if (!nvbo->kmap.virtual) {
 682			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
 683					  &nvbo->kmap);
 684			if (ret) {
 685				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 686				break;
 687			}
 688			nvbo->validate_mapped = true;
 689		}
 690
 691		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 692			data = b->presumed.offset + r->data;
 693		else
 694		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 695			data = (b->presumed.offset + r->data) >> 32;
 696		else
 697			data = r->data;
 698
 699		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 700			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 701				data |= r->tor;
 702			else
 703				data |= r->vor;
 704		}
 705
 706		ret = ttm_bo_wait(&nvbo->bo, false, false);
 
 
 707		if (ret) {
 708			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 709			break;
 710		}
 711
 712		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 713	}
 714
 
 715	return ret;
 716}
 717
 718int
 719nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 720			  struct drm_file *file_priv)
 721{
 722	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
 723	struct nouveau_cli *cli = nouveau_cli(file_priv);
 724	struct nouveau_abi16_chan *temp;
 725	struct nouveau_drm *drm = nouveau_drm(dev);
 726	struct drm_nouveau_gem_pushbuf *req = data;
 727	struct drm_nouveau_gem_pushbuf_push *push;
 728	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 729	struct drm_nouveau_gem_pushbuf_bo *bo;
 730	struct nouveau_channel *chan = NULL;
 731	struct validate_op op;
 732	struct nouveau_fence *fence = NULL;
 733	int i, j, ret = 0;
 734	bool do_reloc = false, sync = false;
 735
 736	if (unlikely(!abi16))
 737		return -ENOMEM;
 738
 739	list_for_each_entry(temp, &abi16->channels, head) {
 740		if (temp->chan->chid == req->channel) {
 741			chan = temp->chan;
 742			break;
 743		}
 744	}
 745
 746	if (!chan)
 747		return nouveau_abi16_put(abi16, -ENOENT);
 748	if (unlikely(atomic_read(&chan->killed)))
 749		return nouveau_abi16_put(abi16, -ENODEV);
 750
 751	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
 752
 753	req->vram_available = drm->gem.vram_available;
 754	req->gart_available = drm->gem.gart_available;
 755	if (unlikely(req->nr_push == 0))
 756		goto out_next;
 757
 758	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 759		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 760			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 761		return nouveau_abi16_put(abi16, -EINVAL);
 
 762	}
 763
 764	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 765		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 766			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 767		return nouveau_abi16_put(abi16, -EINVAL);
 
 768	}
 769
 770	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 771		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 772			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 773		return nouveau_abi16_put(abi16, -EINVAL);
 
 774	}
 775
 776	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 777	if (IS_ERR(push))
 778		return nouveau_abi16_put(abi16, PTR_ERR(push));
 
 
 779
 780	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 781	if (IS_ERR(bo)) {
 782		u_free(push);
 783		return nouveau_abi16_put(abi16, PTR_ERR(bo));
 
 784	}
 785
 786	/* Ensure all push buffers are on validate list */
 
 
 
 
 787	for (i = 0; i < req->nr_push; i++) {
 788		if (push[i].bo_index >= req->nr_buffers) {
 789			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 790			ret = -EINVAL;
 791			goto out_prevalid;
 792		}
 
 
 793	}
 794
 795	/* Validate buffer list */
 796revalidate:
 797	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
 798					   req->nr_buffers, &op, &do_reloc);
 799	if (ret) {
 800		if (ret != -ERESTARTSYS)
 801			NV_PRINTK(err, cli, "validate: %d\n", ret);
 802		goto out_prevalid;
 803	}
 804
 805	/* Apply any relocations that are required */
 806	if (do_reloc) {
 807		if (!reloc) {
 808			validate_fini(&op, chan, NULL, bo);
 809			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 810			if (IS_ERR(reloc)) {
 811				ret = PTR_ERR(reloc);
 812				goto out_prevalid;
 813			}
 814
 815			goto revalidate;
 816		}
 817
 818		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
 819		if (ret) {
 820			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 821			goto out;
 822		}
 823	}
 824
 825	if (chan->dma.ib_max) {
 826		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 827		if (ret) {
 828			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 829			goto out;
 830		}
 831
 832		for (i = 0; i < req->nr_push; i++) {
 833			struct nouveau_vma *vma = (void *)(unsigned long)
 834				bo[push[i].bo_index].user_priv;
 835
 836			nv50_dma_push(chan, vma->addr + push[i].offset,
 837				      push[i].length);
 838		}
 839	} else
 840	if (drm->client.device.info.chipset >= 0x25) {
 841		ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
 842		if (ret) {
 843			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 844			goto out;
 845		}
 846
 847		for (i = 0; i < req->nr_push; i++) {
 848			struct nouveau_bo *nvbo = (void *)(unsigned long)
 849				bo[push[i].bo_index].user_priv;
 
 850
 851			PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
 852			PUSH_DATA(chan->chan.push, 0);
 
 853		}
 854	} else {
 855		ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 856		if (ret) {
 857			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 858			goto out;
 859		}
 860
 861		for (i = 0; i < req->nr_push; i++) {
 862			struct nouveau_bo *nvbo = (void *)(unsigned long)
 863				bo[push[i].bo_index].user_priv;
 
 864			uint32_t cmd;
 865
 866			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 867			cmd |= 0x20000000;
 868			if (unlikely(cmd != req->suffix0)) {
 869				if (!nvbo->kmap.virtual) {
 870					ret = ttm_bo_kmap(&nvbo->bo, 0,
 871							  nvbo->bo.resource->
 872							  num_pages,
 873							  &nvbo->kmap);
 874					if (ret) {
 875						WIND_RING(chan);
 876						goto out;
 877					}
 878					nvbo->validate_mapped = true;
 879				}
 880
 881				nouveau_bo_wr32(nvbo, (push[i].offset +
 882						push[i].length - 8) / 4, cmd);
 883			}
 884
 885			PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
 886			PUSH_DATA(chan->chan.push, 0);
 
 887			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 888				PUSH_DATA(chan->chan.push, 0);
 889		}
 890	}
 891
 892	ret = nouveau_fence_new(chan, false, &fence);
 893	if (ret) {
 894		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 895		WIND_RING(chan);
 896		goto out;
 897	}
 898
 899	if (sync) {
 900		if (!(ret = nouveau_fence_wait(fence, false, false))) {
 901			if ((ret = dma_fence_get_status(&fence->base)) == 1)
 902				ret = 0;
 903		}
 904	}
 905
 906out:
 907	validate_fini(&op, chan, fence, bo);
 908	nouveau_fence_unref(&fence);
 909
 910	if (do_reloc) {
 911		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 912			u64_to_user_ptr(req->buffers);
 913
 914		for (i = 0; i < req->nr_buffers; i++) {
 915			if (bo[i].presumed.valid)
 916				continue;
 917
 918			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
 919					 sizeof(bo[i].presumed))) {
 920				ret = -EFAULT;
 921				break;
 922			}
 923		}
 924	}
 925out_prevalid:
 926	if (!IS_ERR(reloc))
 927		u_free(reloc);
 928	u_free(bo);
 929	u_free(push);
 930
 931out_next:
 932	if (chan->dma.ib_max) {
 933		req->suffix0 = 0x00000000;
 934		req->suffix1 = 0x00000000;
 935	} else
 936	if (drm->client.device.info.chipset >= 0x25) {
 937		req->suffix0 = 0x00020000;
 938		req->suffix1 = 0x00000000;
 939	} else {
 940		req->suffix0 = 0x20000000 |
 941			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
 942		req->suffix1 = 0x00000000;
 943	}
 944
 945	return nouveau_abi16_put(abi16, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946}
 947
 948int
 949nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 950			   struct drm_file *file_priv)
 951{
 952	struct drm_nouveau_gem_cpu_prep *req = data;
 953	struct drm_gem_object *gem;
 954	struct nouveau_bo *nvbo;
 955	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 956	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 957	long lret;
 958	int ret;
 959
 960	gem = drm_gem_object_lookup(file_priv, req->handle);
 961	if (!gem)
 962		return -ENOENT;
 963	nvbo = nouveau_gem_object(gem);
 964
 965	lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
 966				     no_wait ? 0 : 30 * HZ);
 967	if (!lret)
 968		ret = -EBUSY;
 969	else if (lret > 0)
 970		ret = 0;
 971	else
 972		ret = lret;
 973
 974	nouveau_bo_sync_for_cpu(nvbo);
 975	drm_gem_object_put(gem);
 976
 977	return ret;
 978}
 979
 980int
 981nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 982			   struct drm_file *file_priv)
 983{
 984	struct drm_nouveau_gem_cpu_fini *req = data;
 985	struct drm_gem_object *gem;
 986	struct nouveau_bo *nvbo;
 987
 988	gem = drm_gem_object_lookup(file_priv, req->handle);
 989	if (!gem)
 990		return -ENOENT;
 991	nvbo = nouveau_gem_object(gem);
 992
 993	nouveau_bo_sync_for_device(nvbo);
 994	drm_gem_object_put(gem);
 995	return 0;
 996}
 997
 998int
 999nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1000		       struct drm_file *file_priv)
1001{
1002	struct drm_nouveau_gem_info *req = data;
1003	struct drm_gem_object *gem;
1004	int ret;
1005
1006	gem = drm_gem_object_lookup(file_priv, req->handle);
1007	if (!gem)
1008		return -ENOENT;
1009
1010	ret = nouveau_gem_info(file_priv, gem, req);
1011	drm_gem_object_put(gem);
1012	return ret;
1013}
1014