Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2008 Ben Skeggs.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26#include "drmP.h"
 27#include "drm.h"
 28
 29#include "nouveau_drv.h"
 30#include "nouveau_drm.h"
 31#include "nouveau_dma.h"
 
 
 32
 33#define nouveau_gem_pushbuf_sync(chan) 0
 
 
 
 34
 35int
 36nouveau_gem_object_new(struct drm_gem_object *gem)
 
 
 37{
 38	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 39}
 40
 
 
 
 
 
 
 
 41void
 42nouveau_gem_object_del(struct drm_gem_object *gem)
 43{
 44	struct nouveau_bo *nvbo = gem->driver_private;
 45	struct ttm_buffer_object *bo = &nvbo->bo;
 
 
 46
 47	if (!nvbo)
 
 
 48		return;
 49	nvbo->gem = NULL;
 50
 51	if (unlikely(nvbo->pin_refcnt)) {
 52		nvbo->pin_refcnt = 1;
 53		nouveau_bo_unpin(nvbo);
 54	}
 55
 56	ttm_bo_unref(&bo);
 
 
 
 57
 58	drm_gem_object_release(gem);
 59	kfree(gem);
 60}
 61
 62int
 63nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 64{
 65	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
 66	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
 
 
 
 67	struct nouveau_vma *vma;
 68	int ret;
 69
 70	if (!fpriv->vm)
 71		return 0;
 72
 73	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
 
 
 
 
 74	if (ret)
 75		return ret;
 76
 77	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
 78	if (!vma) {
 79		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 80		if (!vma) {
 81			ret = -ENOMEM;
 82			goto out;
 83		}
 84
 85		ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
 86		if (ret) {
 87			kfree(vma);
 88			goto out;
 89		}
 90	} else {
 91		vma->refcount++;
 92	}
 93
 
 
 
 
 
 
 
 94out:
 95	ttm_bo_unreserve(&nvbo->bo);
 96	return ret;
 97}
 98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99void
100nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
101{
102	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
 
 
104	struct nouveau_vma *vma;
105	int ret;
106
107	if (!fpriv->vm)
108		return;
109
110	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
 
 
 
111	if (ret)
112		return;
113
114	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
115	if (vma) {
116		if (--vma->refcount == 0) {
117			nouveau_bo_vma_del(nvbo, vma);
118			kfree(vma);
 
 
 
 
119		}
120	}
121	ttm_bo_unreserve(&nvbo->bo);
122}
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124int
125nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
126		uint32_t tile_mode, uint32_t tile_flags,
127		struct nouveau_bo **pnvbo)
128{
129	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 
130	struct nouveau_bo *nvbo;
131	u32 flags = 0;
132	int ret;
133
134	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
135		flags |= TTM_PL_FLAG_VRAM;
136	if (domain & NOUVEAU_GEM_DOMAIN_GART)
137		flags |= TTM_PL_FLAG_TT;
138	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
139		flags |= TTM_PL_FLAG_SYSTEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
141	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
142			     tile_flags, pnvbo);
143	if (ret)
144		return ret;
145	nvbo = *pnvbo;
146
147	/* we restrict allowed domains on nv50+ to only the types
148	 * that were requested at creation time.  not possibly on
149	 * earlier chips without busting the ABI.
150	 */
151	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
152			      NOUVEAU_GEM_DOMAIN_GART;
153	if (dev_priv->card_type >= NV_50)
154		nvbo->valid_domains &= domain;
155
156	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
157	if (!nvbo->gem) {
158		nouveau_bo_ref(NULL, pnvbo);
159		return -ENOMEM;
160	}
161
162	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
163	nvbo->gem->driver_private = nvbo;
164	return 0;
165}
166
167static int
168nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
169		 struct drm_nouveau_gem_info *rep)
170{
171	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
172	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
173	struct nouveau_vma *vma;
174
175	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 
 
176		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
177	else
178		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
179
180	rep->offset = nvbo->bo.offset;
181	if (fpriv->vm) {
182		vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
183		if (!vma)
184			return -EINVAL;
185
186		rep->offset = vma->offset;
187	}
 
188
189	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
190	rep->map_handle = nvbo->bo.addr_space_offset;
191	rep->tile_mode = nvbo->tile_mode;
192	rep->tile_flags = nvbo->tile_flags;
 
 
 
 
 
 
 
193	return 0;
194}
195
196int
197nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
198		      struct drm_file *file_priv)
199{
200	struct drm_nouveau_private *dev_priv = dev->dev_private;
201	struct drm_nouveau_gem_new *req = data;
202	struct nouveau_bo *nvbo = NULL;
203	int ret = 0;
204
205	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
206		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
207
208	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
209		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
210		return -EINVAL;
211	}
212
213	ret = nouveau_gem_new(dev, req->info.size, req->align,
214			      req->info.domain, req->info.tile_mode,
215			      req->info.tile_flags, &nvbo);
216	if (ret)
217		return ret;
218
219	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
 
220	if (ret == 0) {
221		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
222		if (ret)
223			drm_gem_handle_delete(file_priv, req->info.handle);
224	}
225
226	/* drop reference from allocate - handle holds it now */
227	drm_gem_object_unreference_unlocked(nvbo->gem);
228	return ret;
229}
230
231static int
232nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
233		       uint32_t write_domains, uint32_t valid_domains)
234{
235	struct nouveau_bo *nvbo = gem->driver_private;
236	struct ttm_buffer_object *bo = &nvbo->bo;
237	uint32_t domains = valid_domains & nvbo->valid_domains &
238		(write_domains ? write_domains : read_domains);
239	uint32_t pref_flags = 0, valid_flags = 0;
240
241	if (!domains)
242		return -EINVAL;
243
244	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
245		valid_flags |= TTM_PL_FLAG_VRAM;
246
247	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
248		valid_flags |= TTM_PL_FLAG_TT;
249
250	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
251	    bo->mem.mem_type == TTM_PL_VRAM)
252		pref_flags |= TTM_PL_FLAG_VRAM;
253
254	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
255		 bo->mem.mem_type == TTM_PL_TT)
256		pref_flags |= TTM_PL_FLAG_TT;
257
258	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
259		pref_flags |= TTM_PL_FLAG_VRAM;
260
261	else
262		pref_flags |= TTM_PL_FLAG_TT;
263
264	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
265
266	return 0;
267}
268
269struct validate_op {
270	struct list_head vram_list;
271	struct list_head gart_list;
272	struct list_head both_list;
273};
274
275static void
276validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
 
 
277{
278	struct list_head *entry, *tmp;
279	struct nouveau_bo *nvbo;
 
280
281	list_for_each_safe(entry, tmp, list) {
282		nvbo = list_entry(entry, struct nouveau_bo, entry);
283
284		nouveau_bo_fence(nvbo, fence);
 
 
 
 
 
 
 
 
 
 
 
285
286		if (unlikely(nvbo->validate_mapped)) {
287			ttm_bo_kunmap(&nvbo->kmap);
288			nvbo->validate_mapped = false;
289		}
290
291		list_del(&nvbo->entry);
292		nvbo->reserved_by = NULL;
293		ttm_bo_unreserve(&nvbo->bo);
294		drm_gem_object_unreference_unlocked(nvbo->gem);
295	}
296}
297
298static void
299validate_fini(struct validate_op *op, struct nouveau_fence* fence)
 
 
300{
301	validate_fini_list(&op->vram_list, fence);
302	validate_fini_list(&op->gart_list, fence);
303	validate_fini_list(&op->both_list, fence);
304}
305
306static int
307validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
308	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
309	      int nr_buffers, struct validate_op *op)
310{
311	struct drm_device *dev = chan->dev;
312	struct drm_nouveau_private *dev_priv = dev->dev_private;
313	uint32_t sequence;
314	int trycnt = 0;
315	int ret, i;
 
 
 
 
316
317	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
318retry:
319	if (++trycnt > 100000) {
320		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
321		return -EINVAL;
322	}
323
324	for (i = 0; i < nr_buffers; i++) {
325		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
326		struct drm_gem_object *gem;
327		struct nouveau_bo *nvbo;
328
329		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
330		if (!gem) {
331			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
332			validate_fini(op, NULL);
333			return -ENOENT;
 
 
 
 
 
 
334		}
335		nvbo = gem->driver_private;
336
337		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
338			NV_ERROR(dev, "multiple instances of buffer %d on "
339				      "validation list\n", b->handle);
340			validate_fini(op, NULL);
341			return -EINVAL;
 
342		}
343
344		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
345		if (ret) {
346			validate_fini(op, NULL);
347			if (unlikely(ret == -EAGAIN))
348				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
349			drm_gem_object_unreference_unlocked(gem);
 
 
 
 
 
 
350			if (unlikely(ret)) {
351				if (ret != -ERESTARTSYS)
352					NV_ERROR(dev, "fail reserve\n");
353				return ret;
354			}
355			goto retry;
356		}
357
358		b->user_priv = (uint64_t)(unsigned long)nvbo;
 
 
 
 
 
 
 
 
 
 
 
 
 
359		nvbo->reserved_by = file_priv;
360		nvbo->pbbo_index = i;
361		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
362		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
363			list_add_tail(&nvbo->entry, &op->both_list);
364		else
365		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
366			list_add_tail(&nvbo->entry, &op->vram_list);
367		else
368		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
369			list_add_tail(&nvbo->entry, &op->gart_list);
370		else {
371			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
372				 b->valid_domains);
373			list_add_tail(&nvbo->entry, &op->both_list);
374			validate_fini(op, NULL);
375			return -EINVAL;
376		}
 
 
377	}
378
379	return 0;
 
 
 
 
 
 
 
380}
381
382static int
383validate_list(struct nouveau_channel *chan, struct list_head *list,
384	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
385{
386	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
387	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
388				(void __force __user *)(uintptr_t)user_pbbo_ptr;
389	struct drm_device *dev = chan->dev;
390	struct nouveau_bo *nvbo;
391	int ret, relocs = 0;
392
393	list_for_each_entry(nvbo, list, entry) {
394		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
395
396		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
397		if (unlikely(ret)) {
398			NV_ERROR(dev, "fail pre-validate sync\n");
399			return ret;
400		}
401
402		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
403					     b->write_domains,
404					     b->valid_domains);
405		if (unlikely(ret)) {
406			NV_ERROR(dev, "fail set_domain\n");
407			return ret;
408		}
409
410		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
411		ret = nouveau_bo_validate(nvbo, true, false, false);
412		nvbo->channel = NULL;
413		if (unlikely(ret)) {
414			if (ret != -ERESTARTSYS)
415				NV_ERROR(dev, "fail ttm_validate\n");
416			return ret;
417		}
418
419		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
420		if (unlikely(ret)) {
421			NV_ERROR(dev, "fail post-validate sync\n");
 
422			return ret;
423		}
424
425		if (dev_priv->card_type < NV_50) {
426			if (nvbo->bo.offset == b->presumed.offset &&
427			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
428			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
429			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
430			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
431				continue;
432
433			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
434				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
435			else
436				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
437			b->presumed.offset = nvbo->bo.offset;
438			b->presumed.valid = 0;
439			relocs++;
440
441			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
442					     &b->presumed, sizeof(b->presumed)))
443				return -EFAULT;
444		}
445	}
446
447	return relocs;
448}
449
450static int
451nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
452			     struct drm_file *file_priv,
453			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
454			     uint64_t user_buffers, int nr_buffers,
455			     struct validate_op *op, int *apply_relocs)
456{
457	struct drm_device *dev = chan->dev;
458	int ret, relocs = 0;
459
460	INIT_LIST_HEAD(&op->vram_list);
461	INIT_LIST_HEAD(&op->gart_list);
462	INIT_LIST_HEAD(&op->both_list);
463
464	if (nr_buffers == 0)
465		return 0;
466
467	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
468	if (unlikely(ret)) {
469		if (ret != -ERESTARTSYS)
470			NV_ERROR(dev, "validate_init\n");
471		return ret;
472	}
473
474	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
475	if (unlikely(ret < 0)) {
476		if (ret != -ERESTARTSYS)
477			NV_ERROR(dev, "validate vram_list\n");
478		validate_fini(op, NULL);
479		return ret;
 
 
480	}
481	relocs += ret;
482
483	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
484	if (unlikely(ret < 0)) {
485		if (ret != -ERESTARTSYS)
486			NV_ERROR(dev, "validate gart_list\n");
487		validate_fini(op, NULL);
488		return ret;
489	}
490	relocs += ret;
491
492	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
493	if (unlikely(ret < 0)) {
494		if (ret != -ERESTARTSYS)
495			NV_ERROR(dev, "validate both_list\n");
496		validate_fini(op, NULL);
497		return ret;
498	}
499	relocs += ret;
500
501	*apply_relocs = relocs;
502	return 0;
503}
504
505static inline void *
506u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
507{
508	void *mem;
509	void __user *userptr = (void __force __user *)(uintptr_t)user;
510
511	mem = kmalloc(nmemb * size, GFP_KERNEL);
512	if (!mem)
513		return ERR_PTR(-ENOMEM);
514
515	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
516		kfree(mem);
517		return ERR_PTR(-EFAULT);
518	}
519
520	return mem;
521}
522
523static int
524nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
525				struct drm_nouveau_gem_pushbuf *req,
 
526				struct drm_nouveau_gem_pushbuf_bo *bo)
527{
528	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
529	int ret = 0;
530	unsigned i;
531
532	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
533	if (IS_ERR(reloc))
534		return PTR_ERR(reloc);
535
536	for (i = 0; i < req->nr_relocs; i++) {
537		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
538		struct drm_nouveau_gem_pushbuf_bo *b;
539		struct nouveau_bo *nvbo;
540		uint32_t data;
 
541
542		if (unlikely(r->bo_index > req->nr_buffers)) {
543			NV_ERROR(dev, "reloc bo index invalid\n");
544			ret = -EINVAL;
545			break;
546		}
547
548		b = &bo[r->bo_index];
549		if (b->presumed.valid)
550			continue;
551
552		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
553			NV_ERROR(dev, "reloc container bo index invalid\n");
554			ret = -EINVAL;
555			break;
556		}
557		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
558
559		if (unlikely(r->reloc_bo_offset + 4 >
560			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
561			NV_ERROR(dev, "reloc outside of bo\n");
562			ret = -EINVAL;
563			break;
564		}
565
566		if (!nvbo->kmap.virtual) {
567			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
568					  &nvbo->kmap);
569			if (ret) {
570				NV_ERROR(dev, "failed kmap for reloc\n");
571				break;
572			}
573			nvbo->validate_mapped = true;
574		}
575
576		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
577			data = b->presumed.offset + r->data;
578		else
579		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
580			data = (b->presumed.offset + r->data) >> 32;
581		else
582			data = r->data;
583
584		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
585			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
586				data |= r->tor;
587			else
588				data |= r->vor;
589		}
590
591		spin_lock(&nvbo->bo.bdev->fence_lock);
592		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
593		spin_unlock(&nvbo->bo.bdev->fence_lock);
 
 
 
 
 
 
 
594		if (ret) {
595			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
 
596			break;
597		}
598
599		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
600	}
601
602	kfree(reloc);
603	return ret;
604}
605
606int
607nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
608			  struct drm_file *file_priv)
609{
610	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 
 
611	struct drm_nouveau_gem_pushbuf *req = data;
612	struct drm_nouveau_gem_pushbuf_push *push;
 
613	struct drm_nouveau_gem_pushbuf_bo *bo;
614	struct nouveau_channel *chan;
615	struct validate_op op;
616	struct nouveau_fence *fence = NULL;
617	int i, j, ret = 0, do_reloc = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
618
619	chan = nouveau_channel_get(file_priv, req->channel);
620	if (IS_ERR(chan))
621		return PTR_ERR(chan);
 
622
623	req->vram_available = dev_priv->fb_aper_free;
624	req->gart_available = dev_priv->gart_info.aper_free;
 
 
625	if (unlikely(req->nr_push == 0))
626		goto out_next;
627
628	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
629		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
630			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
631		nouveau_channel_put(&chan);
632		return -EINVAL;
633	}
634
635	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
636		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
637			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
638		nouveau_channel_put(&chan);
639		return -EINVAL;
640	}
641
642	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
643		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
644			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
645		nouveau_channel_put(&chan);
646		return -EINVAL;
647	}
648
649	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
650	if (IS_ERR(push)) {
651		nouveau_channel_put(&chan);
652		return PTR_ERR(push);
653	}
654
655	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
656	if (IS_ERR(bo)) {
657		kfree(push);
658		nouveau_channel_put(&chan);
659		return PTR_ERR(bo);
660	}
661
662	/* Mark push buffers as being used on PFIFO, the validation code
663	 * will then make sure that if the pushbuf bo moves, that they
664	 * happen on the kernel channel, which will in turn cause a sync
665	 * to happen before we try and submit the push buffer.
666	 */
667	for (i = 0; i < req->nr_push; i++) {
668		if (push[i].bo_index >= req->nr_buffers) {
669			NV_ERROR(dev, "push %d buffer not in list\n", i);
670			ret = -EINVAL;
671			goto out_prevalid;
672		}
673
674		bo[push[i].bo_index].read_domains |= (1 << 31);
675	}
676
677	/* Validate buffer list */
678	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 
679					   req->nr_buffers, &op, &do_reloc);
680	if (ret) {
681		if (ret != -ERESTARTSYS)
682			NV_ERROR(dev, "validate: %d\n", ret);
683		goto out_prevalid;
684	}
685
686	/* Apply any relocations that are required */
687	if (do_reloc) {
688		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
 
 
 
 
 
 
 
 
 
 
 
689		if (ret) {
690			NV_ERROR(dev, "reloc apply: %d\n", ret);
691			goto out;
692		}
693	}
694
695	if (chan->dma.ib_max) {
696		ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
697		if (ret) {
698			NV_INFO(dev, "nv50cal_space: %d\n", ret);
699			goto out;
700		}
701
702		for (i = 0; i < req->nr_push; i++) {
703			struct nouveau_bo *nvbo = (void *)(unsigned long)
704				bo[push[i].bo_index].user_priv;
 
 
 
705
706			nv50_dma_push(chan, nvbo, push[i].offset,
707				      push[i].length);
708		}
709	} else
710	if (dev_priv->chipset >= 0x25) {
711		ret = RING_SPACE(chan, req->nr_push * 2);
712		if (ret) {
713			NV_ERROR(dev, "cal_space: %d\n", ret);
714			goto out;
715		}
716
717		for (i = 0; i < req->nr_push; i++) {
718			struct nouveau_bo *nvbo = (void *)(unsigned long)
719				bo[push[i].bo_index].user_priv;
720			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
721
722			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
723					push[i].offset) | 2);
724			OUT_RING(chan, 0);
725		}
726	} else {
727		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
728		if (ret) {
729			NV_ERROR(dev, "jmp_space: %d\n", ret);
730			goto out;
731		}
732
733		for (i = 0; i < req->nr_push; i++) {
734			struct nouveau_bo *nvbo = (void *)(unsigned long)
735				bo[push[i].bo_index].user_priv;
736			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
737			uint32_t cmd;
738
739			cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
740			cmd |= 0x20000000;
741			if (unlikely(cmd != req->suffix0)) {
742				if (!nvbo->kmap.virtual) {
743					ret = ttm_bo_kmap(&nvbo->bo, 0,
744							  nvbo->bo.mem.
745							  num_pages,
746							  &nvbo->kmap);
747					if (ret) {
748						WIND_RING(chan);
749						goto out;
750					}
751					nvbo->validate_mapped = true;
752				}
753
754				nouveau_bo_wr32(nvbo, (push[i].offset +
755						push[i].length - 8) / 4, cmd);
756			}
757
758			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
759					push[i].offset) | 0x20000000);
760			OUT_RING(chan, 0);
761			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
762				OUT_RING(chan, 0);
763		}
764	}
765
766	ret = nouveau_fence_new(chan, &fence, true);
767	if (ret) {
768		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
769		WIND_RING(chan);
770		goto out;
771	}
772
 
 
 
 
 
 
 
773out:
774	validate_fini(&op, fence);
775	nouveau_fence_unref(&fence);
776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777out_prevalid:
778	kfree(bo);
779	kfree(push);
 
 
780
781out_next:
782	if (chan->dma.ib_max) {
783		req->suffix0 = 0x00000000;
784		req->suffix1 = 0x00000000;
785	} else
786	if (dev_priv->chipset >= 0x25) {
787		req->suffix0 = 0x00020000;
788		req->suffix1 = 0x00000000;
789	} else {
790		req->suffix0 = 0x20000000 |
791			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
792		req->suffix1 = 0x00000000;
793	}
794
795	nouveau_channel_put(&chan);
796	return ret;
797}
798
799static inline uint32_t
800domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
801{
802	uint32_t flags = 0;
803
804	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
805		flags |= TTM_PL_FLAG_VRAM;
806	if (domain & NOUVEAU_GEM_DOMAIN_GART)
807		flags |= TTM_PL_FLAG_TT;
808
809	return flags;
810}
811
812int
813nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
814			   struct drm_file *file_priv)
815{
816	struct drm_nouveau_gem_cpu_prep *req = data;
817	struct drm_gem_object *gem;
818	struct nouveau_bo *nvbo;
819	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
820	int ret = -EINVAL;
 
 
821
822	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
823	if (!gem)
824		return -ENOENT;
825	nvbo = nouveau_gem_object(gem);
826
827	spin_lock(&nvbo->bo.bdev->fence_lock);
828	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
829	spin_unlock(&nvbo->bo.bdev->fence_lock);
830	drm_gem_object_unreference_unlocked(gem);
 
 
 
 
 
 
 
 
 
831	return ret;
832}
833
834int
835nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
836			   struct drm_file *file_priv)
837{
 
 
 
 
 
 
 
 
 
 
 
838	return 0;
839}
840
841int
842nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
843		       struct drm_file *file_priv)
844{
845	struct drm_nouveau_gem_info *req = data;
846	struct drm_gem_object *gem;
847	int ret;
848
849	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
850	if (!gem)
851		return -ENOENT;
852
853	ret = nouveau_gem_info(file_priv, gem, req);
854	drm_gem_object_unreference_unlocked(gem);
855	return ret;
856}
857
v6.13.7
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <drm/drm_gem_ttm_helper.h>
  28
  29#include "nouveau_drv.h"
 
  30#include "nouveau_dma.h"
  31#include "nouveau_fence.h"
  32#include "nouveau_abi16.h"
  33
  34#include "nouveau_ttm.h"
  35#include "nouveau_gem.h"
  36#include "nouveau_mem.h"
  37#include "nouveau_vmm.h"
  38
  39#include <nvif/class.h>
  40#include <nvif/push206e.h>
  41
  42static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
  43{
  44	struct vm_area_struct *vma = vmf->vma;
  45	struct ttm_buffer_object *bo = vma->vm_private_data;
  46	pgprot_t prot;
  47	vm_fault_t ret;
  48
  49	ret = ttm_bo_vm_reserve(bo, vmf);
  50	if (ret)
  51		return ret;
  52
  53	ret = nouveau_ttm_fault_reserve_notify(bo);
  54	if (ret)
  55		goto error_unlock;
  56
  57	nouveau_bo_del_io_reserve_lru(bo);
  58	prot = vm_get_page_prot(vma->vm_flags);
  59	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
  60	nouveau_bo_add_io_reserve_lru(bo);
  61	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  62		return ret;
  63
  64error_unlock:
  65	dma_resv_unlock(bo->base.resv);
  66	return ret;
  67}
  68
  69static const struct vm_operations_struct nouveau_ttm_vm_ops = {
  70	.fault = nouveau_ttm_fault,
  71	.open = ttm_bo_vm_open,
  72	.close = ttm_bo_vm_close,
  73	.access = ttm_bo_vm_access
  74};
  75
  76void
  77nouveau_gem_object_del(struct drm_gem_object *gem)
  78{
  79	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  80	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  81	struct device *dev = drm->dev->dev;
  82	int ret;
  83
  84	ret = pm_runtime_get_sync(dev);
  85	if (WARN_ON(ret < 0 && ret != -EACCES)) {
  86		pm_runtime_put_autosuspend(dev);
  87		return;
 
 
 
 
 
  88	}
  89
  90	if (gem->import_attach)
  91		drm_prime_gem_destroy(gem, nvbo->bo.sg);
  92
  93	ttm_bo_put(&nvbo->bo);
  94
  95	pm_runtime_mark_last_busy(dev);
  96	pm_runtime_put_autosuspend(dev);
  97}
  98
  99int
 100nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 101{
 102	struct nouveau_cli *cli = nouveau_cli(file_priv);
 103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 104	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 105	struct device *dev = drm->dev->dev;
 106	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
 107	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 108	struct nouveau_vma *vma;
 109	int ret;
 110
 111	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 112		return 0;
 113
 114	if (nvbo->no_share && uvmm &&
 115	    drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
 116		return -EPERM;
 117
 118	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 119	if (ret)
 120		return ret;
 121
 122	ret = pm_runtime_get_sync(dev);
 123	if (ret < 0 && ret != -EACCES) {
 124		pm_runtime_put_autosuspend(dev);
 125		goto out;
 
 
 
 
 
 
 
 
 
 
 
 126	}
 127
 128	/* only create a VMA on binding */
 129	if (!nouveau_cli_uvmm(cli))
 130		ret = nouveau_vma_new(nvbo, vmm, &vma);
 131	else
 132		ret = 0;
 133	pm_runtime_mark_last_busy(dev);
 134	pm_runtime_put_autosuspend(dev);
 135out:
 136	ttm_bo_unreserve(&nvbo->bo);
 137	return ret;
 138}
 139
 140struct nouveau_gem_object_unmap {
 141	struct nouveau_cli_work work;
 142	struct nouveau_vma *vma;
 143};
 144
 145static void
 146nouveau_gem_object_delete(struct nouveau_vma *vma)
 147{
 148	nouveau_fence_unref(&vma->fence);
 149	nouveau_vma_del(&vma);
 150}
 151
 152static void
 153nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
 154{
 155	struct nouveau_gem_object_unmap *work =
 156		container_of(w, typeof(*work), work);
 157	nouveau_gem_object_delete(work->vma);
 158	kfree(work);
 159}
 160
 161static void
 162nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 163{
 164	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
 165	struct nouveau_gem_object_unmap *work;
 166
 167	list_del_init(&vma->head);
 168
 169	if (!fence) {
 170		nouveau_gem_object_delete(vma);
 171		return;
 172	}
 173
 174	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
 175		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
 176		nouveau_gem_object_delete(vma);
 177		return;
 178	}
 179
 180	work->work.func = nouveau_gem_object_delete_work;
 181	work->vma = vma;
 182	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 183}
 184
 185void
 186nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 187{
 188	struct nouveau_cli *cli = nouveau_cli(file_priv);
 189	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 190	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 191	struct device *dev = drm->dev->dev;
 192	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 193	struct nouveau_vma *vma;
 194	int ret;
 195
 196	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 197		return;
 198
 199	if (nouveau_cli_uvmm(cli))
 200		return;
 201
 202	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 203	if (ret)
 204		return;
 205
 206	vma = nouveau_vma_find(nvbo, vmm);
 207	if (vma) {
 208		if (--vma->refs == 0) {
 209			ret = pm_runtime_get_sync(dev);
 210			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 211				nouveau_gem_object_unmap(nvbo, vma);
 212				pm_runtime_mark_last_busy(dev);
 213			}
 214			pm_runtime_put_autosuspend(dev);
 215		}
 216	}
 217	ttm_bo_unreserve(&nvbo->bo);
 218}
 219
 220const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
 221	.free = nouveau_gem_object_del,
 222	.open = nouveau_gem_object_open,
 223	.close = nouveau_gem_object_close,
 224	.export = nouveau_gem_prime_export,
 225	.pin = nouveau_gem_prime_pin,
 226	.unpin = nouveau_gem_prime_unpin,
 227	.get_sg_table = nouveau_gem_prime_get_sg_table,
 228	.vmap = drm_gem_ttm_vmap,
 229	.vunmap = drm_gem_ttm_vunmap,
 230	.mmap = drm_gem_ttm_mmap,
 231	.vm_ops = &nouveau_ttm_vm_ops,
 232};
 233
 234int
 235nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 236		uint32_t tile_mode, uint32_t tile_flags,
 237		struct nouveau_bo **pnvbo)
 238{
 239	struct nouveau_drm *drm = cli->drm;
 240	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
 241	struct dma_resv *resv = NULL;
 242	struct nouveau_bo *nvbo;
 
 243	int ret;
 244
 245	if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
 246		if (unlikely(!uvmm))
 247			return -EINVAL;
 248
 249		resv = drm_gpuvm_resv(&uvmm->base);
 250	}
 251
 252	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
 253		domain |= NOUVEAU_GEM_DOMAIN_CPU;
 254
 255	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
 256				tile_flags, false);
 257	if (IS_ERR(nvbo))
 258		return PTR_ERR(nvbo);
 259
 260	nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
 261	nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
 262
 263	/* Initialize the embedded gem-object. We return a single gem-reference
 264	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 265	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
 266	if (ret) {
 267		drm_gem_object_release(&nvbo->bo.base);
 268		kfree(nvbo);
 269		return ret;
 270	}
 271
 272	if (resv)
 273		dma_resv_lock(resv, NULL);
 274
 275	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
 276
 277	if (resv)
 278		dma_resv_unlock(resv);
 279
 
 
 280	if (ret)
 281		return ret;
 
 282
 283	/* we restrict allowed domains on nv50+ to only the types
 284	 * that were requested at creation time.  not possibly on
 285	 * earlier chips without busting the ABI.
 286	 */
 287	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 288			      NOUVEAU_GEM_DOMAIN_GART;
 289	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 290		nvbo->valid_domains &= domain;
 291
 292	if (nvbo->no_share) {
 293		nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
 294		drm_gem_object_get(nvbo->r_obj);
 
 295	}
 296
 297	*pnvbo = nvbo;
 
 298	return 0;
 299}
 300
 301static int
 302nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 303		 struct drm_nouveau_gem_info *rep)
 304{
 305	struct nouveau_cli *cli = nouveau_cli(file_priv);
 306	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 307	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 308	struct nouveau_vma *vma;
 309
 310	if (is_power_of_2(nvbo->valid_domains))
 311		rep->domain = nvbo->valid_domains;
 312	else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 313		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 314	else
 315		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 316	rep->offset = nvbo->offset;
 317	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
 318	    !nouveau_cli_uvmm(cli)) {
 319		vma = nouveau_vma_find(nvbo, vmm);
 320		if (!vma)
 321			return -EINVAL;
 322
 323		rep->offset = vma->addr;
 324	} else
 325		rep->offset = 0;
 326
 327	rep->size = nvbo->bo.base.size;
 328	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
 329	rep->tile_mode = nvbo->mode;
 330	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
 331	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
 332		rep->tile_flags |= nvbo->kind << 8;
 333	else
 334	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 335		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
 336	else
 337		rep->tile_flags |= nvbo->zeta;
 338	return 0;
 339}
 340
 341int
 342nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 343		      struct drm_file *file_priv)
 344{
 345	struct nouveau_cli *cli = nouveau_cli(file_priv);
 346	struct drm_nouveau_gem_new *req = data;
 347	struct nouveau_bo *nvbo = NULL;
 348	int ret = 0;
 349
 350	/* If uvmm wasn't initialized until now disable it completely to prevent
 351	 * userspace from mixing up UAPIs.
 352	 */
 353	nouveau_cli_disable_uvmm_noinit(cli);
 
 
 
 354
 355	ret = nouveau_gem_new(cli, req->info.size, req->align,
 356			      req->info.domain, req->info.tile_mode,
 357			      req->info.tile_flags, &nvbo);
 358	if (ret)
 359		return ret;
 360
 361	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
 362				    &req->info.handle);
 363	if (ret == 0) {
 364		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
 365		if (ret)
 366			drm_gem_handle_delete(file_priv, req->info.handle);
 367	}
 368
 369	/* drop reference from allocate - handle holds it now */
 370	drm_gem_object_put(&nvbo->bo.base);
 371	return ret;
 372}
 373
 374static int
 375nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 376		       uint32_t write_domains, uint32_t valid_domains)
 377{
 378	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 379	struct ttm_buffer_object *bo = &nvbo->bo;
 380	uint32_t domains = valid_domains & nvbo->valid_domains &
 381		(write_domains ? write_domains : read_domains);
 382	uint32_t pref_domains = 0;
 383
 384	if (!domains)
 385		return -EINVAL;
 386
 387	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 
 
 
 
 388
 389	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 390	    bo->resource->mem_type == TTM_PL_VRAM)
 391		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 392
 393	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 394		 bo->resource->mem_type == TTM_PL_TT)
 395		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 396
 397	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 398		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 399
 400	else
 401		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 402
 403	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
 404
 405	return 0;
 406}
 407
 408struct validate_op {
 409	struct list_head list;
 410	struct ww_acquire_ctx ticket;
 
 411};
 412
 413static void
 414validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
 415			struct nouveau_fence *fence,
 416			struct drm_nouveau_gem_pushbuf_bo *pbbo)
 417{
 
 418	struct nouveau_bo *nvbo;
 419	struct drm_nouveau_gem_pushbuf_bo *b;
 420
 421	while (!list_empty(&op->list)) {
 422		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
 423		b = &pbbo[nvbo->pbbo_index];
 424
 425		if (likely(fence)) {
 426			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
 427
 428			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 429				struct nouveau_vma *vma =
 430					(void *)(unsigned long)b->user_priv;
 431				nouveau_fence_unref(&vma->fence);
 432				dma_fence_get(&fence->base);
 433				vma->fence = fence;
 434			}
 435		}
 436
 437		if (unlikely(nvbo->validate_mapped)) {
 438			ttm_bo_kunmap(&nvbo->kmap);
 439			nvbo->validate_mapped = false;
 440		}
 441
 442		list_del(&nvbo->entry);
 443		nvbo->reserved_by = NULL;
 444		ttm_bo_unreserve(&nvbo->bo);
 445		drm_gem_object_put(&nvbo->bo.base);
 446	}
 447}
 448
 449static void
 450validate_fini(struct validate_op *op, struct nouveau_channel *chan,
 451	      struct nouveau_fence *fence,
 452	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
 453{
 454	validate_fini_no_ticket(op, chan, fence, pbbo);
 455	ww_acquire_fini(&op->ticket);
 
 456}
 457
 458static int
 459validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 460	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
 461	      int nr_buffers, struct validate_op *op)
 462{
 463	struct nouveau_cli *cli = nouveau_cli(file_priv);
 
 
 464	int trycnt = 0;
 465	int ret = -EINVAL, i;
 466	struct nouveau_bo *res_bo = NULL;
 467	LIST_HEAD(gart_list);
 468	LIST_HEAD(vram_list);
 469	LIST_HEAD(both_list);
 470
 471	ww_acquire_init(&op->ticket, &reservation_ww_class);
 472retry:
 473	if (++trycnt > 100000) {
 474		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 475		return -EINVAL;
 476	}
 477
 478	for (i = 0; i < nr_buffers; i++) {
 479		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 480		struct drm_gem_object *gem;
 481		struct nouveau_bo *nvbo;
 482
 483		gem = drm_gem_object_lookup(file_priv, b->handle);
 484		if (!gem) {
 485			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 486			ret = -ENOENT;
 487			break;
 488		}
 489		nvbo = nouveau_gem_object(gem);
 490		if (nvbo == res_bo) {
 491			res_bo = NULL;
 492			drm_gem_object_put(gem);
 493			continue;
 494		}
 
 495
 496		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 497			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 498				      "validation list\n", b->handle);
 499			drm_gem_object_put(gem);
 500			ret = -EINVAL;
 501			break;
 502		}
 503
 504		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
 505		if (ret) {
 506			list_splice_tail_init(&vram_list, &op->list);
 507			list_splice_tail_init(&gart_list, &op->list);
 508			list_splice_tail_init(&both_list, &op->list);
 509			validate_fini_no_ticket(op, chan, NULL, NULL);
 510			if (unlikely(ret == -EDEADLK)) {
 511				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
 512							      &op->ticket);
 513				if (!ret)
 514					res_bo = nvbo;
 515			}
 516			if (unlikely(ret)) {
 517				if (ret != -ERESTARTSYS)
 518					NV_PRINTK(err, cli, "fail reserve\n");
 519				break;
 520			}
 
 521		}
 522
 523		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 524			struct nouveau_vmm *vmm = chan->vmm;
 525			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
 526			if (!vma) {
 527				NV_PRINTK(err, cli, "vma not found!\n");
 528				ret = -EINVAL;
 529				break;
 530			}
 531
 532			b->user_priv = (uint64_t)(unsigned long)vma;
 533		} else {
 534			b->user_priv = (uint64_t)(unsigned long)nvbo;
 535		}
 536
 537		nvbo->reserved_by = file_priv;
 538		nvbo->pbbo_index = i;
 539		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 540		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 541			list_add_tail(&nvbo->entry, &both_list);
 542		else
 543		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 544			list_add_tail(&nvbo->entry, &vram_list);
 545		else
 546		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 547			list_add_tail(&nvbo->entry, &gart_list);
 548		else {
 549			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 550				 b->valid_domains);
 551			list_add_tail(&nvbo->entry, &both_list);
 552			ret = -EINVAL;
 553			break;
 554		}
 555		if (nvbo == res_bo)
 556			goto retry;
 557	}
 558
 559	ww_acquire_done(&op->ticket);
 560	list_splice_tail(&vram_list, &op->list);
 561	list_splice_tail(&gart_list, &op->list);
 562	list_splice_tail(&both_list, &op->list);
 563	if (ret)
 564		validate_fini(op, chan, NULL, NULL);
 565	return ret;
 566
 567}
 568
 569static int
 570validate_list(struct nouveau_channel *chan,
 571	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
 572{
 573	struct nouveau_cli *cli = chan->cli;
 574	struct nouveau_drm *drm = cli->drm;
 
 
 575	struct nouveau_bo *nvbo;
 576	int ret, relocs = 0;
 577
 578	list_for_each_entry(nvbo, list, entry) {
 579		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 580
 581		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
 
 
 
 
 
 
 582					     b->write_domains,
 583					     b->valid_domains);
 584		if (unlikely(ret)) {
 585			NV_PRINTK(err, cli, "fail set_domain\n");
 586			return ret;
 587		}
 588
 589		ret = nouveau_bo_validate(nvbo, true, false);
 
 
 590		if (unlikely(ret)) {
 591			if (ret != -ERESTARTSYS)
 592				NV_PRINTK(err, cli, "fail ttm_validate\n");
 593			return ret;
 594		}
 595
 596		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 597		if (unlikely(ret)) {
 598			if (ret != -ERESTARTSYS)
 599				NV_PRINTK(err, cli, "fail post-validate sync\n");
 600			return ret;
 601		}
 602
 603		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 604			if (nvbo->offset == b->presumed.offset &&
 605			    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 606			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 607			     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 608			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 609				continue;
 610
 611			if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 612				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 613			else
 614				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 615			b->presumed.offset = nvbo->offset;
 616			b->presumed.valid = 0;
 617			relocs++;
 
 
 
 
 618		}
 619	}
 620
 621	return relocs;
 622}
 623
 624static int
 625nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 626			     struct drm_file *file_priv,
 627			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
 628			     int nr_buffers,
 629			     struct validate_op *op, bool *apply_relocs)
 630{
 631	struct nouveau_cli *cli = nouveau_cli(file_priv);
 632	int ret;
 633
 634	INIT_LIST_HEAD(&op->list);
 
 
 635
 636	if (nr_buffers == 0)
 637		return 0;
 638
 639	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 640	if (unlikely(ret)) {
 641		if (ret != -ERESTARTSYS)
 642			NV_PRINTK(err, cli, "validate_init\n");
 643		return ret;
 644	}
 645
 646	ret = validate_list(chan, &op->list, pbbo);
 647	if (unlikely(ret < 0)) {
 648		if (ret != -ERESTARTSYS)
 649			NV_PRINTK(err, cli, "validating bo list\n");
 650		validate_fini(op, chan, NULL, NULL);
 651		return ret;
 652	} else if (ret > 0) {
 653		*apply_relocs = true;
 654	}
 
 655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 656	return 0;
 657}
 658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 659static int
 660nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 661				struct drm_nouveau_gem_pushbuf *req,
 662				struct drm_nouveau_gem_pushbuf_reloc *reloc,
 663				struct drm_nouveau_gem_pushbuf_bo *bo)
 664{
 
 665	int ret = 0;
 666	unsigned i;
 667
 
 
 
 
 668	for (i = 0; i < req->nr_relocs; i++) {
 669		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 670		struct drm_nouveau_gem_pushbuf_bo *b;
 671		struct nouveau_bo *nvbo;
 672		uint32_t data;
 673		long lret;
 674
 675		if (unlikely(r->bo_index >= req->nr_buffers)) {
 676			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 677			ret = -EINVAL;
 678			break;
 679		}
 680
 681		b = &bo[r->bo_index];
 682		if (b->presumed.valid)
 683			continue;
 684
 685		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
 686			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 687			ret = -EINVAL;
 688			break;
 689		}
 690		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 691
 692		if (unlikely(r->reloc_bo_offset + 4 >
 693			     nvbo->bo.base.size)) {
 694			NV_PRINTK(err, cli, "reloc outside of bo\n");
 695			ret = -EINVAL;
 696			break;
 697		}
 698
 699		if (!nvbo->kmap.virtual) {
 700			ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
 701					  &nvbo->kmap);
 702			if (ret) {
 703				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 704				break;
 705			}
 706			nvbo->validate_mapped = true;
 707		}
 708
 709		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 710			data = b->presumed.offset + r->data;
 711		else
 712		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 713			data = (b->presumed.offset + r->data) >> 32;
 714		else
 715			data = r->data;
 716
 717		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 718			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 719				data |= r->tor;
 720			else
 721				data |= r->vor;
 722		}
 723
 724		lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
 725					     DMA_RESV_USAGE_BOOKKEEP,
 726					     false, 15 * HZ);
 727		if (!lret)
 728			ret = -EBUSY;
 729		else if (lret > 0)
 730			ret = 0;
 731		else
 732			ret = lret;
 733
 734		if (ret) {
 735			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n",
 736				  ret);
 737			break;
 738		}
 739
 740		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 741	}
 742
 
 743	return ret;
 744}
 745
 746int
 747nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 748			  struct drm_file *file_priv)
 749{
 750	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
 751	struct nouveau_cli *cli = nouveau_cli(file_priv);
 752	struct nouveau_abi16_chan *temp;
 753	struct nouveau_drm *drm = nouveau_drm(dev);
 754	struct drm_nouveau_gem_pushbuf *req = data;
 755	struct drm_nouveau_gem_pushbuf_push *push;
 756	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 757	struct drm_nouveau_gem_pushbuf_bo *bo;
 758	struct nouveau_channel *chan = NULL;
 759	struct validate_op op;
 760	struct nouveau_fence *fence = NULL;
 761	int i, j, ret = 0;
 762	bool do_reloc = false, sync = false;
 763
 764	if (unlikely(!abi16))
 765		return -ENOMEM;
 766
 767	if (unlikely(nouveau_cli_uvmm(cli)))
 768		return nouveau_abi16_put(abi16, -ENOSYS);
 769
 770	list_for_each_entry(temp, &abi16->channels, head) {
 771		if (temp->chan->chid == req->channel) {
 772			chan = temp->chan;
 773			break;
 774		}
 775	}
 776
 777	if (!chan)
 778		return nouveau_abi16_put(abi16, -ENOENT);
 779	if (unlikely(atomic_read(&chan->killed)))
 780		return nouveau_abi16_put(abi16, -ENODEV);
 781
 782	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
 783
 784	req->vram_available = drm->gem.vram_available;
 785	req->gart_available = drm->gem.gart_available;
 786	if (unlikely(req->nr_push == 0))
 787		goto out_next;
 788
 789	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 790		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 791			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 792		return nouveau_abi16_put(abi16, -EINVAL);
 
 793	}
 794
 795	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 796		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 797			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 798		return nouveau_abi16_put(abi16, -EINVAL);
 
 799	}
 800
 801	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 802		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 803			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 804		return nouveau_abi16_put(abi16, -EINVAL);
 
 805	}
 806
 807	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 808	if (IS_ERR(push))
 809		return nouveau_abi16_put(abi16, PTR_ERR(push));
 
 
 810
 811	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 812	if (IS_ERR(bo)) {
 813		u_free(push);
 814		return nouveau_abi16_put(abi16, PTR_ERR(bo));
 
 815	}
 816
 817	/* Ensure all push buffers are on validate list */
 
 
 
 
 818	for (i = 0; i < req->nr_push; i++) {
 819		if (push[i].bo_index >= req->nr_buffers) {
 820			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 821			ret = -EINVAL;
 822			goto out_prevalid;
 823		}
 
 
 824	}
 825
 826	/* Validate buffer list */
 827revalidate:
 828	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
 829					   req->nr_buffers, &op, &do_reloc);
 830	if (ret) {
 831		if (ret != -ERESTARTSYS)
 832			NV_PRINTK(err, cli, "validate: %d\n", ret);
 833		goto out_prevalid;
 834	}
 835
 836	/* Apply any relocations that are required */
 837	if (do_reloc) {
 838		if (!reloc) {
 839			validate_fini(&op, chan, NULL, bo);
 840			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 841			if (IS_ERR(reloc)) {
 842				ret = PTR_ERR(reloc);
 843				goto out_prevalid;
 844			}
 845
 846			goto revalidate;
 847		}
 848
 849		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
 850		if (ret) {
 851			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 852			goto out;
 853		}
 854	}
 855
 856	if (chan->dma.ib_max) {
 857		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 858		if (ret) {
 859			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 860			goto out;
 861		}
 862
 863		for (i = 0; i < req->nr_push; i++) {
 864			struct nouveau_vma *vma = (void *)(unsigned long)
 865				bo[push[i].bo_index].user_priv;
 866			u64 addr = vma->addr + push[i].offset;
 867			u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
 868			bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
 869
 870			nv50_dma_push(chan, addr, length, no_prefetch);
 
 871		}
 872	} else
 873	if (drm->client.device.info.chipset >= 0x25) {
 874		ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
 875		if (ret) {
 876			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 877			goto out;
 878		}
 879
 880		for (i = 0; i < req->nr_push; i++) {
 881			struct nouveau_bo *nvbo = (void *)(unsigned long)
 882				bo[push[i].bo_index].user_priv;
 
 883
 884			PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset);
 885			PUSH_DATA(&chan->chan.push, 0);
 
 886		}
 887	} else {
 888		ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 889		if (ret) {
 890			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 891			goto out;
 892		}
 893
 894		for (i = 0; i < req->nr_push; i++) {
 895			struct nouveau_bo *nvbo = (void *)(unsigned long)
 896				bo[push[i].bo_index].user_priv;
 
 897			uint32_t cmd;
 898
 899			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 900			cmd |= 0x20000000;
 901			if (unlikely(cmd != req->suffix0)) {
 902				if (!nvbo->kmap.virtual) {
 903					ret = ttm_bo_kmap(&nvbo->bo, 0,
 904							  PFN_UP(nvbo->bo.base.size),
 
 905							  &nvbo->kmap);
 906					if (ret) {
 907						WIND_RING(chan);
 908						goto out;
 909					}
 910					nvbo->validate_mapped = true;
 911				}
 912
 913				nouveau_bo_wr32(nvbo, (push[i].offset +
 914						push[i].length - 8) / 4, cmd);
 915			}
 916
 917			PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset);
 918			PUSH_DATA(&chan->chan.push, 0);
 
 919			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 920				PUSH_DATA(&chan->chan.push, 0);
 921		}
 922	}
 923
 924	ret = nouveau_fence_new(&fence, chan);
 925	if (ret) {
 926		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 927		WIND_RING(chan);
 928		goto out;
 929	}
 930
 931	if (sync) {
 932		if (!(ret = nouveau_fence_wait(fence, false, false))) {
 933			if ((ret = dma_fence_get_status(&fence->base)) == 1)
 934				ret = 0;
 935		}
 936	}
 937
 938out:
 939	validate_fini(&op, chan, fence, bo);
 940	nouveau_fence_unref(&fence);
 941
 942	if (do_reloc) {
 943		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 944			u64_to_user_ptr(req->buffers);
 945
 946		for (i = 0; i < req->nr_buffers; i++) {
 947			if (bo[i].presumed.valid)
 948				continue;
 949
 950			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
 951					 sizeof(bo[i].presumed))) {
 952				ret = -EFAULT;
 953				break;
 954			}
 955		}
 956	}
 957out_prevalid:
 958	if (!IS_ERR(reloc))
 959		u_free(reloc);
 960	u_free(bo);
 961	u_free(push);
 962
 963out_next:
 964	if (chan->dma.ib_max) {
 965		req->suffix0 = 0x00000000;
 966		req->suffix1 = 0x00000000;
 967	} else
 968	if (drm->client.device.info.chipset >= 0x25) {
 969		req->suffix0 = 0x00020000;
 970		req->suffix1 = 0x00000000;
 971	} else {
 972		req->suffix0 = 0x20000000 |
 973			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
 974		req->suffix1 = 0x00000000;
 975	}
 976
 977	return nouveau_abi16_put(abi16, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 978}
 979
 980int
 981nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 982			   struct drm_file *file_priv)
 983{
 984	struct drm_nouveau_gem_cpu_prep *req = data;
 985	struct drm_gem_object *gem;
 986	struct nouveau_bo *nvbo;
 987	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 988	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 989	long lret;
 990	int ret;
 991
 992	gem = drm_gem_object_lookup(file_priv, req->handle);
 993	if (!gem)
 994		return -ENOENT;
 995	nvbo = nouveau_gem_object(gem);
 996
 997	lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
 998				     dma_resv_usage_rw(write), true,
 999				     no_wait ? 0 : 30 * HZ);
1000	if (!lret)
1001		ret = -EBUSY;
1002	else if (lret > 0)
1003		ret = 0;
1004	else
1005		ret = lret;
1006
1007	nouveau_bo_sync_for_cpu(nvbo);
1008	drm_gem_object_put(gem);
1009
1010	return ret;
1011}
1012
1013int
1014nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
1015			   struct drm_file *file_priv)
1016{
1017	struct drm_nouveau_gem_cpu_fini *req = data;
1018	struct drm_gem_object *gem;
1019	struct nouveau_bo *nvbo;
1020
1021	gem = drm_gem_object_lookup(file_priv, req->handle);
1022	if (!gem)
1023		return -ENOENT;
1024	nvbo = nouveau_gem_object(gem);
1025
1026	nouveau_bo_sync_for_device(nvbo);
1027	drm_gem_object_put(gem);
1028	return 0;
1029}
1030
1031int
1032nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1033		       struct drm_file *file_priv)
1034{
1035	struct drm_nouveau_gem_info *req = data;
1036	struct drm_gem_object *gem;
1037	int ret;
1038
1039	gem = drm_gem_object_lookup(file_priv, req->handle);
1040	if (!gem)
1041		return -ENOENT;
1042
1043	ret = nouveau_gem_info(file_priv, gem, req);
1044	drm_gem_object_put(gem);
1045	return ret;
1046}
1047