Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2008 Ben Skeggs.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26#include "drmP.h"
 27#include "drm.h"
 28
 29#include "nouveau_drv.h"
 30#include "nouveau_drm.h"
 31#include "nouveau_dma.h"
 
 
 32
 33#define nouveau_gem_pushbuf_sync(chan) 0
 
 
 
 34
 35int
 36nouveau_gem_object_new(struct drm_gem_object *gem)
 37{
 38	return 0;
 39}
 40
 41void
 42nouveau_gem_object_del(struct drm_gem_object *gem)
 43{
 44	struct nouveau_bo *nvbo = gem->driver_private;
 45	struct ttm_buffer_object *bo = &nvbo->bo;
 
 
 46
 47	if (!nvbo)
 
 
 48		return;
 49	nvbo->gem = NULL;
 50
 51	if (unlikely(nvbo->pin_refcnt)) {
 52		nvbo->pin_refcnt = 1;
 53		nouveau_bo_unpin(nvbo);
 54	}
 55
 56	ttm_bo_unref(&bo);
 
 
 
 57
 58	drm_gem_object_release(gem);
 59	kfree(gem);
 60}
 61
 62int
 63nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 64{
 65	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
 66	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
 
 
 67	struct nouveau_vma *vma;
 68	int ret;
 69
 70	if (!fpriv->vm)
 71		return 0;
 72
 73	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
 74	if (ret)
 75		return ret;
 76
 77	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
 78	if (!vma) {
 79		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 80		if (!vma) {
 81			ret = -ENOMEM;
 82			goto out;
 83		}
 84
 85		ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
 86		if (ret) {
 87			kfree(vma);
 88			goto out;
 89		}
 90	} else {
 91		vma->refcount++;
 92	}
 93
 
 
 
 94out:
 95	ttm_bo_unreserve(&nvbo->bo);
 96	return ret;
 97}
 98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99void
100nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
101{
102	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
 
 
104	struct nouveau_vma *vma;
105	int ret;
106
107	if (!fpriv->vm)
108		return;
109
110	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
111	if (ret)
112		return;
113
114	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
115	if (vma) {
116		if (--vma->refcount == 0) {
117			nouveau_bo_vma_del(nvbo, vma);
118			kfree(vma);
 
 
 
 
119		}
120	}
121	ttm_bo_unreserve(&nvbo->bo);
122}
123
124int
125nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
126		uint32_t tile_mode, uint32_t tile_flags,
127		struct nouveau_bo **pnvbo)
128{
129	struct drm_nouveau_private *dev_priv = dev->dev_private;
130	struct nouveau_bo *nvbo;
131	u32 flags = 0;
132	int ret;
133
134	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
135		flags |= TTM_PL_FLAG_VRAM;
136	if (domain & NOUVEAU_GEM_DOMAIN_GART)
137		flags |= TTM_PL_FLAG_TT;
138	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
139		flags |= TTM_PL_FLAG_SYSTEM;
140
141	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
142			     tile_flags, pnvbo);
143	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144		return ret;
145	nvbo = *pnvbo;
146
147	/* we restrict allowed domains on nv50+ to only the types
148	 * that were requested at creation time.  not possibly on
149	 * earlier chips without busting the ABI.
150	 */
151	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
152			      NOUVEAU_GEM_DOMAIN_GART;
153	if (dev_priv->card_type >= NV_50)
154		nvbo->valid_domains &= domain;
155
156	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
157	if (!nvbo->gem) {
158		nouveau_bo_ref(NULL, pnvbo);
159		return -ENOMEM;
160	}
161
162	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
163	nvbo->gem->driver_private = nvbo;
164	return 0;
165}
166
167static int
168nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
169		 struct drm_nouveau_gem_info *rep)
170{
171	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
172	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 
173	struct nouveau_vma *vma;
174
175	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
 
 
176		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
177	else
178		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
179
180	rep->offset = nvbo->bo.offset;
181	if (fpriv->vm) {
182		vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
183		if (!vma)
184			return -EINVAL;
185
186		rep->offset = vma->offset;
187	}
188
189	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
190	rep->map_handle = nvbo->bo.addr_space_offset;
191	rep->tile_mode = nvbo->tile_mode;
192	rep->tile_flags = nvbo->tile_flags;
 
 
 
 
 
 
 
193	return 0;
194}
195
196int
197nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
198		      struct drm_file *file_priv)
199{
200	struct drm_nouveau_private *dev_priv = dev->dev_private;
201	struct drm_nouveau_gem_new *req = data;
202	struct nouveau_bo *nvbo = NULL;
203	int ret = 0;
204
205	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
206		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
207
208	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
209		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
210		return -EINVAL;
211	}
212
213	ret = nouveau_gem_new(dev, req->info.size, req->align,
214			      req->info.domain, req->info.tile_mode,
215			      req->info.tile_flags, &nvbo);
216	if (ret)
217		return ret;
218
219	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
 
220	if (ret == 0) {
221		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
222		if (ret)
223			drm_gem_handle_delete(file_priv, req->info.handle);
224	}
225
226	/* drop reference from allocate - handle holds it now */
227	drm_gem_object_unreference_unlocked(nvbo->gem);
228	return ret;
229}
230
231static int
232nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
233		       uint32_t write_domains, uint32_t valid_domains)
234{
235	struct nouveau_bo *nvbo = gem->driver_private;
236	struct ttm_buffer_object *bo = &nvbo->bo;
237	uint32_t domains = valid_domains & nvbo->valid_domains &
238		(write_domains ? write_domains : read_domains);
239	uint32_t pref_flags = 0, valid_flags = 0;
240
241	if (!domains)
242		return -EINVAL;
243
244	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
245		valid_flags |= TTM_PL_FLAG_VRAM;
246
247	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
248		valid_flags |= TTM_PL_FLAG_TT;
249
250	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
251	    bo->mem.mem_type == TTM_PL_VRAM)
252		pref_flags |= TTM_PL_FLAG_VRAM;
253
254	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
255		 bo->mem.mem_type == TTM_PL_TT)
256		pref_flags |= TTM_PL_FLAG_TT;
257
258	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
259		pref_flags |= TTM_PL_FLAG_VRAM;
260
261	else
262		pref_flags |= TTM_PL_FLAG_TT;
263
264	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
265
266	return 0;
267}
268
269struct validate_op {
270	struct list_head vram_list;
271	struct list_head gart_list;
272	struct list_head both_list;
273};
274
275static void
276validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
 
 
277{
278	struct list_head *entry, *tmp;
279	struct nouveau_bo *nvbo;
 
280
281	list_for_each_safe(entry, tmp, list) {
282		nvbo = list_entry(entry, struct nouveau_bo, entry);
283
284		nouveau_bo_fence(nvbo, fence);
 
 
 
 
 
 
 
 
 
 
 
285
286		if (unlikely(nvbo->validate_mapped)) {
287			ttm_bo_kunmap(&nvbo->kmap);
288			nvbo->validate_mapped = false;
289		}
290
291		list_del(&nvbo->entry);
292		nvbo->reserved_by = NULL;
293		ttm_bo_unreserve(&nvbo->bo);
294		drm_gem_object_unreference_unlocked(nvbo->gem);
295	}
296}
297
298static void
299validate_fini(struct validate_op *op, struct nouveau_fence* fence)
 
 
300{
301	validate_fini_list(&op->vram_list, fence);
302	validate_fini_list(&op->gart_list, fence);
303	validate_fini_list(&op->both_list, fence);
304}
305
306static int
307validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
308	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
309	      int nr_buffers, struct validate_op *op)
310{
311	struct drm_device *dev = chan->dev;
312	struct drm_nouveau_private *dev_priv = dev->dev_private;
313	uint32_t sequence;
314	int trycnt = 0;
315	int ret, i;
 
 
 
 
316
317	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
318retry:
319	if (++trycnt > 100000) {
320		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
321		return -EINVAL;
322	}
323
324	for (i = 0; i < nr_buffers; i++) {
325		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
326		struct drm_gem_object *gem;
327		struct nouveau_bo *nvbo;
328
329		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
330		if (!gem) {
331			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
332			validate_fini(op, NULL);
333			return -ENOENT;
 
 
 
 
 
 
334		}
335		nvbo = gem->driver_private;
336
337		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
338			NV_ERROR(dev, "multiple instances of buffer %d on "
339				      "validation list\n", b->handle);
340			validate_fini(op, NULL);
341			return -EINVAL;
 
342		}
343
344		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
345		if (ret) {
346			validate_fini(op, NULL);
347			if (unlikely(ret == -EAGAIN))
348				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
349			drm_gem_object_unreference_unlocked(gem);
 
 
 
 
 
 
350			if (unlikely(ret)) {
351				if (ret != -ERESTARTSYS)
352					NV_ERROR(dev, "fail reserve\n");
353				return ret;
354			}
355			goto retry;
356		}
357
358		b->user_priv = (uint64_t)(unsigned long)nvbo;
 
 
 
 
 
 
 
 
 
 
 
 
 
359		nvbo->reserved_by = file_priv;
360		nvbo->pbbo_index = i;
361		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
362		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
363			list_add_tail(&nvbo->entry, &op->both_list);
364		else
365		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
366			list_add_tail(&nvbo->entry, &op->vram_list);
367		else
368		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
369			list_add_tail(&nvbo->entry, &op->gart_list);
370		else {
371			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
372				 b->valid_domains);
373			list_add_tail(&nvbo->entry, &op->both_list);
374			validate_fini(op, NULL);
375			return -EINVAL;
376		}
 
 
377	}
378
379	return 0;
 
 
 
 
 
 
 
380}
381
382static int
383validate_list(struct nouveau_channel *chan, struct list_head *list,
384	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
385{
386	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
387	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
388				(void __force __user *)(uintptr_t)user_pbbo_ptr;
389	struct drm_device *dev = chan->dev;
390	struct nouveau_bo *nvbo;
391	int ret, relocs = 0;
392
393	list_for_each_entry(nvbo, list, entry) {
394		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
395
396		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
397		if (unlikely(ret)) {
398			NV_ERROR(dev, "fail pre-validate sync\n");
399			return ret;
400		}
401
402		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
403					     b->write_domains,
404					     b->valid_domains);
405		if (unlikely(ret)) {
406			NV_ERROR(dev, "fail set_domain\n");
407			return ret;
408		}
409
410		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
411		ret = nouveau_bo_validate(nvbo, true, false, false);
412		nvbo->channel = NULL;
413		if (unlikely(ret)) {
414			if (ret != -ERESTARTSYS)
415				NV_ERROR(dev, "fail ttm_validate\n");
416			return ret;
417		}
418
419		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
420		if (unlikely(ret)) {
421			NV_ERROR(dev, "fail post-validate sync\n");
 
422			return ret;
423		}
424
425		if (dev_priv->card_type < NV_50) {
426			if (nvbo->bo.offset == b->presumed.offset &&
427			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
428			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
429			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
430			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
431				continue;
432
433			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
434				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
435			else
436				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
437			b->presumed.offset = nvbo->bo.offset;
438			b->presumed.valid = 0;
439			relocs++;
440
441			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
442					     &b->presumed, sizeof(b->presumed)))
443				return -EFAULT;
444		}
445	}
446
447	return relocs;
448}
449
450static int
451nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
452			     struct drm_file *file_priv,
453			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
454			     uint64_t user_buffers, int nr_buffers,
455			     struct validate_op *op, int *apply_relocs)
456{
457	struct drm_device *dev = chan->dev;
458	int ret, relocs = 0;
459
460	INIT_LIST_HEAD(&op->vram_list);
461	INIT_LIST_HEAD(&op->gart_list);
462	INIT_LIST_HEAD(&op->both_list);
463
464	if (nr_buffers == 0)
465		return 0;
466
467	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
468	if (unlikely(ret)) {
469		if (ret != -ERESTARTSYS)
470			NV_ERROR(dev, "validate_init\n");
471		return ret;
472	}
473
474	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
475	if (unlikely(ret < 0)) {
476		if (ret != -ERESTARTSYS)
477			NV_ERROR(dev, "validate vram_list\n");
478		validate_fini(op, NULL);
479		return ret;
480	}
481	relocs += ret;
482
483	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
484	if (unlikely(ret < 0)) {
485		if (ret != -ERESTARTSYS)
486			NV_ERROR(dev, "validate gart_list\n");
487		validate_fini(op, NULL);
488		return ret;
489	}
490	relocs += ret;
491
492	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
493	if (unlikely(ret < 0)) {
494		if (ret != -ERESTARTSYS)
495			NV_ERROR(dev, "validate both_list\n");
496		validate_fini(op, NULL);
497		return ret;
498	}
499	relocs += ret;
500
501	*apply_relocs = relocs;
502	return 0;
503}
504
 
 
 
 
 
 
505static inline void *
506u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
507{
508	void *mem;
509	void __user *userptr = (void __force __user *)(uintptr_t)user;
510
511	mem = kmalloc(nmemb * size, GFP_KERNEL);
 
 
512	if (!mem)
513		return ERR_PTR(-ENOMEM);
514
515	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
516		kfree(mem);
517		return ERR_PTR(-EFAULT);
518	}
519
520	return mem;
521}
522
523static int
524nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
525				struct drm_nouveau_gem_pushbuf *req,
 
526				struct drm_nouveau_gem_pushbuf_bo *bo)
527{
528	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
529	int ret = 0;
530	unsigned i;
531
532	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
533	if (IS_ERR(reloc))
534		return PTR_ERR(reloc);
535
536	for (i = 0; i < req->nr_relocs; i++) {
537		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
538		struct drm_nouveau_gem_pushbuf_bo *b;
539		struct nouveau_bo *nvbo;
540		uint32_t data;
541
542		if (unlikely(r->bo_index > req->nr_buffers)) {
543			NV_ERROR(dev, "reloc bo index invalid\n");
544			ret = -EINVAL;
545			break;
546		}
547
548		b = &bo[r->bo_index];
549		if (b->presumed.valid)
550			continue;
551
552		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
553			NV_ERROR(dev, "reloc container bo index invalid\n");
554			ret = -EINVAL;
555			break;
556		}
557		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
558
559		if (unlikely(r->reloc_bo_offset + 4 >
560			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
561			NV_ERROR(dev, "reloc outside of bo\n");
562			ret = -EINVAL;
563			break;
564		}
565
566		if (!nvbo->kmap.virtual) {
567			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
568					  &nvbo->kmap);
569			if (ret) {
570				NV_ERROR(dev, "failed kmap for reloc\n");
571				break;
572			}
573			nvbo->validate_mapped = true;
574		}
575
576		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
577			data = b->presumed.offset + r->data;
578		else
579		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
580			data = (b->presumed.offset + r->data) >> 32;
581		else
582			data = r->data;
583
584		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
585			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
586				data |= r->tor;
587			else
588				data |= r->vor;
589		}
590
591		spin_lock(&nvbo->bo.bdev->fence_lock);
592		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
593		spin_unlock(&nvbo->bo.bdev->fence_lock);
594		if (ret) {
595			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
596			break;
597		}
598
599		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
600	}
601
602	kfree(reloc);
603	return ret;
604}
605
606int
607nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
608			  struct drm_file *file_priv)
609{
610	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 
 
611	struct drm_nouveau_gem_pushbuf *req = data;
612	struct drm_nouveau_gem_pushbuf_push *push;
 
613	struct drm_nouveau_gem_pushbuf_bo *bo;
614	struct nouveau_channel *chan;
615	struct validate_op op;
616	struct nouveau_fence *fence = NULL;
617	int i, j, ret = 0, do_reloc = 0;
 
618
619	chan = nouveau_channel_get(file_priv, req->channel);
620	if (IS_ERR(chan))
621		return PTR_ERR(chan);
 
 
 
 
 
 
622
623	req->vram_available = dev_priv->fb_aper_free;
624	req->gart_available = dev_priv->gart_info.aper_free;
 
 
 
 
 
 
 
625	if (unlikely(req->nr_push == 0))
626		goto out_next;
627
628	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
629		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
630			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
631		nouveau_channel_put(&chan);
632		return -EINVAL;
633	}
634
635	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
636		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
637			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
638		nouveau_channel_put(&chan);
639		return -EINVAL;
640	}
641
642	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
643		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
644			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
645		nouveau_channel_put(&chan);
646		return -EINVAL;
647	}
648
649	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
650	if (IS_ERR(push)) {
651		nouveau_channel_put(&chan);
652		return PTR_ERR(push);
653	}
654
655	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
656	if (IS_ERR(bo)) {
657		kfree(push);
658		nouveau_channel_put(&chan);
659		return PTR_ERR(bo);
660	}
661
662	/* Mark push buffers as being used on PFIFO, the validation code
663	 * will then make sure that if the pushbuf bo moves, that they
664	 * happen on the kernel channel, which will in turn cause a sync
665	 * to happen before we try and submit the push buffer.
666	 */
667	for (i = 0; i < req->nr_push; i++) {
668		if (push[i].bo_index >= req->nr_buffers) {
669			NV_ERROR(dev, "push %d buffer not in list\n", i);
670			ret = -EINVAL;
671			goto out_prevalid;
672		}
673
674		bo[push[i].bo_index].read_domains |= (1 << 31);
675	}
676
677	/* Validate buffer list */
678	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 
679					   req->nr_buffers, &op, &do_reloc);
680	if (ret) {
681		if (ret != -ERESTARTSYS)
682			NV_ERROR(dev, "validate: %d\n", ret);
683		goto out_prevalid;
684	}
685
686	/* Apply any relocations that are required */
687	if (do_reloc) {
688		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
 
 
 
 
 
 
 
 
 
 
 
689		if (ret) {
690			NV_ERROR(dev, "reloc apply: %d\n", ret);
691			goto out;
692		}
693	}
694
695	if (chan->dma.ib_max) {
696		ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
697		if (ret) {
698			NV_INFO(dev, "nv50cal_space: %d\n", ret);
699			goto out;
700		}
701
702		for (i = 0; i < req->nr_push; i++) {
703			struct nouveau_bo *nvbo = (void *)(unsigned long)
704				bo[push[i].bo_index].user_priv;
705
706			nv50_dma_push(chan, nvbo, push[i].offset,
707				      push[i].length);
708		}
709	} else
710	if (dev_priv->chipset >= 0x25) {
711		ret = RING_SPACE(chan, req->nr_push * 2);
712		if (ret) {
713			NV_ERROR(dev, "cal_space: %d\n", ret);
714			goto out;
715		}
716
717		for (i = 0; i < req->nr_push; i++) {
718			struct nouveau_bo *nvbo = (void *)(unsigned long)
719				bo[push[i].bo_index].user_priv;
720			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
721
722			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
723					push[i].offset) | 2);
724			OUT_RING(chan, 0);
725		}
726	} else {
727		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
728		if (ret) {
729			NV_ERROR(dev, "jmp_space: %d\n", ret);
730			goto out;
731		}
732
733		for (i = 0; i < req->nr_push; i++) {
734			struct nouveau_bo *nvbo = (void *)(unsigned long)
735				bo[push[i].bo_index].user_priv;
736			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
737			uint32_t cmd;
738
739			cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
740			cmd |= 0x20000000;
741			if (unlikely(cmd != req->suffix0)) {
742				if (!nvbo->kmap.virtual) {
743					ret = ttm_bo_kmap(&nvbo->bo, 0,
744							  nvbo->bo.mem.
745							  num_pages,
746							  &nvbo->kmap);
747					if (ret) {
748						WIND_RING(chan);
749						goto out;
750					}
751					nvbo->validate_mapped = true;
752				}
753
754				nouveau_bo_wr32(nvbo, (push[i].offset +
755						push[i].length - 8) / 4, cmd);
756			}
757
758			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
759					push[i].offset) | 0x20000000);
760			OUT_RING(chan, 0);
761			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
762				OUT_RING(chan, 0);
763		}
764	}
765
766	ret = nouveau_fence_new(chan, &fence, true);
767	if (ret) {
768		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
769		WIND_RING(chan);
770		goto out;
771	}
772
 
 
 
 
 
 
 
773out:
774	validate_fini(&op, fence);
775	nouveau_fence_unref(&fence);
776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777out_prevalid:
778	kfree(bo);
779	kfree(push);
780
781out_next:
782	if (chan->dma.ib_max) {
783		req->suffix0 = 0x00000000;
784		req->suffix1 = 0x00000000;
785	} else
786	if (dev_priv->chipset >= 0x25) {
787		req->suffix0 = 0x00020000;
788		req->suffix1 = 0x00000000;
789	} else {
790		req->suffix0 = 0x20000000 |
791			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
792		req->suffix1 = 0x00000000;
793	}
794
795	nouveau_channel_put(&chan);
796	return ret;
797}
798
799static inline uint32_t
800domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
801{
802	uint32_t flags = 0;
803
804	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
805		flags |= TTM_PL_FLAG_VRAM;
806	if (domain & NOUVEAU_GEM_DOMAIN_GART)
807		flags |= TTM_PL_FLAG_TT;
808
809	return flags;
810}
811
812int
813nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
814			   struct drm_file *file_priv)
815{
816	struct drm_nouveau_gem_cpu_prep *req = data;
817	struct drm_gem_object *gem;
818	struct nouveau_bo *nvbo;
819	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
820	int ret = -EINVAL;
 
 
821
822	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
823	if (!gem)
824		return -ENOENT;
825	nvbo = nouveau_gem_object(gem);
826
827	spin_lock(&nvbo->bo.bdev->fence_lock);
828	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
829	spin_unlock(&nvbo->bo.bdev->fence_lock);
830	drm_gem_object_unreference_unlocked(gem);
 
 
 
 
 
 
 
 
831	return ret;
832}
833
834int
835nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
836			   struct drm_file *file_priv)
837{
 
 
 
 
 
 
 
 
 
 
 
838	return 0;
839}
840
841int
842nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
843		       struct drm_file *file_priv)
844{
845	struct drm_nouveau_gem_info *req = data;
846	struct drm_gem_object *gem;
847	int ret;
848
849	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
850	if (!gem)
851		return -ENOENT;
852
853	ret = nouveau_gem_info(file_priv, gem, req);
854	drm_gem_object_unreference_unlocked(gem);
855	return ret;
856}
857
v5.9
  1/*
  2 * Copyright (C) 2008 Ben Skeggs.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 
 
 26
 27#include "nouveau_drv.h"
 
 28#include "nouveau_dma.h"
 29#include "nouveau_fence.h"
 30#include "nouveau_abi16.h"
 31
 32#include "nouveau_ttm.h"
 33#include "nouveau_gem.h"
 34#include "nouveau_mem.h"
 35#include "nouveau_vmm.h"
 36
 37#include <nvif/class.h>
 38#include <nvif/push206e.h>
 
 
 
 39
 40void
 41nouveau_gem_object_del(struct drm_gem_object *gem)
 42{
 43	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 44	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 45	struct device *dev = drm->dev->dev;
 46	int ret;
 47
 48	ret = pm_runtime_get_sync(dev);
 49	if (WARN_ON(ret < 0 && ret != -EACCES)) {
 50		pm_runtime_put_autosuspend(dev);
 51		return;
 
 
 
 
 
 52	}
 53
 54	if (gem->import_attach)
 55		drm_prime_gem_destroy(gem, nvbo->bo.sg);
 56
 57	ttm_bo_put(&nvbo->bo);
 58
 59	pm_runtime_mark_last_busy(dev);
 60	pm_runtime_put_autosuspend(dev);
 61}
 62
 63int
 64nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 65{
 66	struct nouveau_cli *cli = nouveau_cli(file_priv);
 67	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 68	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 69	struct device *dev = drm->dev->dev;
 70	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 71	struct nouveau_vma *vma;
 72	int ret;
 73
 74	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 75		return 0;
 76
 77	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 78	if (ret)
 79		return ret;
 80
 81	ret = pm_runtime_get_sync(dev);
 82	if (ret < 0 && ret != -EACCES) {
 83		pm_runtime_put_autosuspend(dev);
 84		goto out;
 
 
 
 
 
 
 
 
 
 
 
 85	}
 86
 87	ret = nouveau_vma_new(nvbo, vmm, &vma);
 88	pm_runtime_mark_last_busy(dev);
 89	pm_runtime_put_autosuspend(dev);
 90out:
 91	ttm_bo_unreserve(&nvbo->bo);
 92	return ret;
 93}
 94
 95struct nouveau_gem_object_unmap {
 96	struct nouveau_cli_work work;
 97	struct nouveau_vma *vma;
 98};
 99
100static void
101nouveau_gem_object_delete(struct nouveau_vma *vma)
102{
103	nouveau_fence_unref(&vma->fence);
104	nouveau_vma_del(&vma);
105}
106
107static void
108nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
109{
110	struct nouveau_gem_object_unmap *work =
111		container_of(w, typeof(*work), work);
112	nouveau_gem_object_delete(work->vma);
113	kfree(work);
114}
115
116static void
117nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
118{
119	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
120	struct nouveau_gem_object_unmap *work;
121
122	list_del_init(&vma->head);
123
124	if (!fence) {
125		nouveau_gem_object_delete(vma);
126		return;
127	}
128
129	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
130		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
131		nouveau_gem_object_delete(vma);
132		return;
133	}
134
135	work->work.func = nouveau_gem_object_delete_work;
136	work->vma = vma;
137	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
138}
139
140void
141nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
142{
143	struct nouveau_cli *cli = nouveau_cli(file_priv);
144	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
145	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
146	struct device *dev = drm->dev->dev;
147	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
148	struct nouveau_vma *vma;
149	int ret;
150
151	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
152		return;
153
154	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
155	if (ret)
156		return;
157
158	vma = nouveau_vma_find(nvbo, vmm);
159	if (vma) {
160		if (--vma->refs == 0) {
161			ret = pm_runtime_get_sync(dev);
162			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
163				nouveau_gem_object_unmap(nvbo, vma);
164				pm_runtime_mark_last_busy(dev);
165			}
166			pm_runtime_put_autosuspend(dev);
167		}
168	}
169	ttm_bo_unreserve(&nvbo->bo);
170}
171
172int
173nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
174		uint32_t tile_mode, uint32_t tile_flags,
175		struct nouveau_bo **pnvbo)
176{
177	struct nouveau_drm *drm = cli->drm;
178	struct nouveau_bo *nvbo;
179	u32 flags = 0;
180	int ret;
181
182	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
183		flags |= TTM_PL_FLAG_VRAM;
184	if (domain & NOUVEAU_GEM_DOMAIN_GART)
185		flags |= TTM_PL_FLAG_TT;
186	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
187		flags |= TTM_PL_FLAG_SYSTEM;
188
189	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
190		flags |= TTM_PL_FLAG_UNCACHED;
191
192	nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
193				tile_flags);
194	if (IS_ERR(nvbo))
195		return PTR_ERR(nvbo);
196
197	/* Initialize the embedded gem-object. We return a single gem-reference
198	 * to the caller, instead of a normal nouveau_bo ttm reference. */
199	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
200	if (ret) {
201		nouveau_bo_ref(NULL, &nvbo);
202		return ret;
203	}
204
205	ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
206	if (ret) {
207		nouveau_bo_ref(NULL, &nvbo);
208		return ret;
209	}
210
211	/* we restrict allowed domains on nv50+ to only the types
212	 * that were requested at creation time.  not possibly on
213	 * earlier chips without busting the ABI.
214	 */
215	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
216			      NOUVEAU_GEM_DOMAIN_GART;
217	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
218		nvbo->valid_domains &= domain;
219
220	nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
221	*pnvbo = nvbo;
 
 
 
 
 
 
222	return 0;
223}
224
225static int
226nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
227		 struct drm_nouveau_gem_info *rep)
228{
229	struct nouveau_cli *cli = nouveau_cli(file_priv);
230	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
231	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
232	struct nouveau_vma *vma;
233
234	if (is_power_of_2(nvbo->valid_domains))
235		rep->domain = nvbo->valid_domains;
236	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
237		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
238	else
239		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
240	rep->offset = nvbo->offset;
241	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
242		vma = nouveau_vma_find(nvbo, vmm);
 
243		if (!vma)
244			return -EINVAL;
245
246		rep->offset = vma->addr;
247	}
248
249	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
250	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
251	rep->tile_mode = nvbo->mode;
252	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
253	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
254		rep->tile_flags |= nvbo->kind << 8;
255	else
256	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
257		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
258	else
259		rep->tile_flags |= nvbo->zeta;
260	return 0;
261}
262
263int
264nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
265		      struct drm_file *file_priv)
266{
267	struct nouveau_cli *cli = nouveau_cli(file_priv);
268	struct drm_nouveau_gem_new *req = data;
269	struct nouveau_bo *nvbo = NULL;
270	int ret = 0;
271
272	ret = nouveau_gem_new(cli, req->info.size, req->align,
 
 
 
 
 
 
 
 
273			      req->info.domain, req->info.tile_mode,
274			      req->info.tile_flags, &nvbo);
275	if (ret)
276		return ret;
277
278	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
279				    &req->info.handle);
280	if (ret == 0) {
281		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
282		if (ret)
283			drm_gem_handle_delete(file_priv, req->info.handle);
284	}
285
286	/* drop reference from allocate - handle holds it now */
287	drm_gem_object_put(&nvbo->bo.base);
288	return ret;
289}
290
291static int
292nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
293		       uint32_t write_domains, uint32_t valid_domains)
294{
295	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
296	struct ttm_buffer_object *bo = &nvbo->bo;
297	uint32_t domains = valid_domains & nvbo->valid_domains &
298		(write_domains ? write_domains : read_domains);
299	uint32_t pref_flags = 0, valid_flags = 0;
300
301	if (!domains)
302		return -EINVAL;
303
304	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
305		valid_flags |= TTM_PL_FLAG_VRAM;
306
307	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
308		valid_flags |= TTM_PL_FLAG_TT;
309
310	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
311	    bo->mem.mem_type == TTM_PL_VRAM)
312		pref_flags |= TTM_PL_FLAG_VRAM;
313
314	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
315		 bo->mem.mem_type == TTM_PL_TT)
316		pref_flags |= TTM_PL_FLAG_TT;
317
318	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
319		pref_flags |= TTM_PL_FLAG_VRAM;
320
321	else
322		pref_flags |= TTM_PL_FLAG_TT;
323
324	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
325
326	return 0;
327}
328
329struct validate_op {
330	struct list_head list;
331	struct ww_acquire_ctx ticket;
 
332};
333
334static void
335validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
336			struct nouveau_fence *fence,
337			struct drm_nouveau_gem_pushbuf_bo *pbbo)
338{
 
339	struct nouveau_bo *nvbo;
340	struct drm_nouveau_gem_pushbuf_bo *b;
341
342	while (!list_empty(&op->list)) {
343		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
344		b = &pbbo[nvbo->pbbo_index];
345
346		if (likely(fence)) {
347			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
348
349			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
350				struct nouveau_vma *vma =
351					(void *)(unsigned long)b->user_priv;
352				nouveau_fence_unref(&vma->fence);
353				dma_fence_get(&fence->base);
354				vma->fence = fence;
355			}
356		}
357
358		if (unlikely(nvbo->validate_mapped)) {
359			ttm_bo_kunmap(&nvbo->kmap);
360			nvbo->validate_mapped = false;
361		}
362
363		list_del(&nvbo->entry);
364		nvbo->reserved_by = NULL;
365		ttm_bo_unreserve(&nvbo->bo);
366		drm_gem_object_put(&nvbo->bo.base);
367	}
368}
369
370static void
371validate_fini(struct validate_op *op, struct nouveau_channel *chan,
372	      struct nouveau_fence *fence,
373	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
374{
375	validate_fini_no_ticket(op, chan, fence, pbbo);
376	ww_acquire_fini(&op->ticket);
 
377}
378
379static int
380validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
381	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
382	      int nr_buffers, struct validate_op *op)
383{
384	struct nouveau_cli *cli = nouveau_cli(file_priv);
 
 
385	int trycnt = 0;
386	int ret = -EINVAL, i;
387	struct nouveau_bo *res_bo = NULL;
388	LIST_HEAD(gart_list);
389	LIST_HEAD(vram_list);
390	LIST_HEAD(both_list);
391
392	ww_acquire_init(&op->ticket, &reservation_ww_class);
393retry:
394	if (++trycnt > 100000) {
395		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
396		return -EINVAL;
397	}
398
399	for (i = 0; i < nr_buffers; i++) {
400		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
401		struct drm_gem_object *gem;
402		struct nouveau_bo *nvbo;
403
404		gem = drm_gem_object_lookup(file_priv, b->handle);
405		if (!gem) {
406			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
407			ret = -ENOENT;
408			break;
409		}
410		nvbo = nouveau_gem_object(gem);
411		if (nvbo == res_bo) {
412			res_bo = NULL;
413			drm_gem_object_put(gem);
414			continue;
415		}
 
416
417		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
418			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
419				      "validation list\n", b->handle);
420			drm_gem_object_put(gem);
421			ret = -EINVAL;
422			break;
423		}
424
425		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
426		if (ret) {
427			list_splice_tail_init(&vram_list, &op->list);
428			list_splice_tail_init(&gart_list, &op->list);
429			list_splice_tail_init(&both_list, &op->list);
430			validate_fini_no_ticket(op, chan, NULL, NULL);
431			if (unlikely(ret == -EDEADLK)) {
432				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
433							      &op->ticket);
434				if (!ret)
435					res_bo = nvbo;
436			}
437			if (unlikely(ret)) {
438				if (ret != -ERESTARTSYS)
439					NV_PRINTK(err, cli, "fail reserve\n");
440				break;
441			}
 
442		}
443
444		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
445			struct nouveau_vmm *vmm = chan->vmm;
446			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
447			if (!vma) {
448				NV_PRINTK(err, cli, "vma not found!\n");
449				ret = -EINVAL;
450				break;
451			}
452
453			b->user_priv = (uint64_t)(unsigned long)vma;
454		} else {
455			b->user_priv = (uint64_t)(unsigned long)nvbo;
456		}
457
458		nvbo->reserved_by = file_priv;
459		nvbo->pbbo_index = i;
460		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
461		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
462			list_add_tail(&nvbo->entry, &both_list);
463		else
464		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
465			list_add_tail(&nvbo->entry, &vram_list);
466		else
467		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
468			list_add_tail(&nvbo->entry, &gart_list);
469		else {
470			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
471				 b->valid_domains);
472			list_add_tail(&nvbo->entry, &both_list);
473			ret = -EINVAL;
474			break;
475		}
476		if (nvbo == res_bo)
477			goto retry;
478	}
479
480	ww_acquire_done(&op->ticket);
481	list_splice_tail(&vram_list, &op->list);
482	list_splice_tail(&gart_list, &op->list);
483	list_splice_tail(&both_list, &op->list);
484	if (ret)
485		validate_fini(op, chan, NULL, NULL);
486	return ret;
487
488}
489
490static int
491validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
492	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
493{
494	struct nouveau_drm *drm = chan->drm;
 
 
 
495	struct nouveau_bo *nvbo;
496	int ret, relocs = 0;
497
498	list_for_each_entry(nvbo, list, entry) {
499		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
500
501		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
 
 
 
 
 
 
502					     b->write_domains,
503					     b->valid_domains);
504		if (unlikely(ret)) {
505			NV_PRINTK(err, cli, "fail set_domain\n");
506			return ret;
507		}
508
509		ret = nouveau_bo_validate(nvbo, true, false);
 
 
510		if (unlikely(ret)) {
511			if (ret != -ERESTARTSYS)
512				NV_PRINTK(err, cli, "fail ttm_validate\n");
513			return ret;
514		}
515
516		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
517		if (unlikely(ret)) {
518			if (ret != -ERESTARTSYS)
519				NV_PRINTK(err, cli, "fail post-validate sync\n");
520			return ret;
521		}
522
523		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
524			if (nvbo->offset == b->presumed.offset &&
525			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
526			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
527			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
528			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
529				continue;
530
531			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
532				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
533			else
534				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
535			b->presumed.offset = nvbo->offset;
536			b->presumed.valid = 0;
537			relocs++;
 
 
 
 
538		}
539	}
540
541	return relocs;
542}
543
544static int
545nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
546			     struct drm_file *file_priv,
547			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
548			     int nr_buffers,
549			     struct validate_op *op, bool *apply_relocs)
550{
551	struct nouveau_cli *cli = nouveau_cli(file_priv);
552	int ret;
553
554	INIT_LIST_HEAD(&op->list);
 
 
555
556	if (nr_buffers == 0)
557		return 0;
558
559	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
560	if (unlikely(ret)) {
561		if (ret != -ERESTARTSYS)
562			NV_PRINTK(err, cli, "validate_init\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563		return ret;
564	}
 
565
566	ret = validate_list(chan, cli, &op->list, pbbo);
567	if (unlikely(ret < 0)) {
568		if (ret != -ERESTARTSYS)
569			NV_PRINTK(err, cli, "validating bo list\n");
570		validate_fini(op, chan, NULL, NULL);
571		return ret;
572	}
573	*apply_relocs = ret;
 
 
574	return 0;
575}
576
577static inline void
578u_free(void *addr)
579{
580	kvfree(addr);
581}
582
583static inline void *
584u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
585{
586	void *mem;
587	void __user *userptr = (void __force __user *)(uintptr_t)user;
588
589	size *= nmemb;
590
591	mem = kvmalloc(size, GFP_KERNEL);
592	if (!mem)
593		return ERR_PTR(-ENOMEM);
594
595	if (copy_from_user(mem, userptr, size)) {
596		u_free(mem);
597		return ERR_PTR(-EFAULT);
598	}
599
600	return mem;
601}
602
603static int
604nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
605				struct drm_nouveau_gem_pushbuf *req,
606				struct drm_nouveau_gem_pushbuf_reloc *reloc,
607				struct drm_nouveau_gem_pushbuf_bo *bo)
608{
 
609	int ret = 0;
610	unsigned i;
611
 
 
 
 
612	for (i = 0; i < req->nr_relocs; i++) {
613		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
614		struct drm_nouveau_gem_pushbuf_bo *b;
615		struct nouveau_bo *nvbo;
616		uint32_t data;
617
618		if (unlikely(r->bo_index >= req->nr_buffers)) {
619			NV_PRINTK(err, cli, "reloc bo index invalid\n");
620			ret = -EINVAL;
621			break;
622		}
623
624		b = &bo[r->bo_index];
625		if (b->presumed.valid)
626			continue;
627
628		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
629			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
630			ret = -EINVAL;
631			break;
632		}
633		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
634
635		if (unlikely(r->reloc_bo_offset + 4 >
636			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
637			NV_PRINTK(err, cli, "reloc outside of bo\n");
638			ret = -EINVAL;
639			break;
640		}
641
642		if (!nvbo->kmap.virtual) {
643			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
644					  &nvbo->kmap);
645			if (ret) {
646				NV_PRINTK(err, cli, "failed kmap for reloc\n");
647				break;
648			}
649			nvbo->validate_mapped = true;
650		}
651
652		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
653			data = b->presumed.offset + r->data;
654		else
655		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
656			data = (b->presumed.offset + r->data) >> 32;
657		else
658			data = r->data;
659
660		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
661			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
662				data |= r->tor;
663			else
664				data |= r->vor;
665		}
666
667		ret = ttm_bo_wait(&nvbo->bo, false, false);
 
 
668		if (ret) {
669			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
670			break;
671		}
672
673		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
674	}
675
676	u_free(reloc);
677	return ret;
678}
679
680int
681nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
682			  struct drm_file *file_priv)
683{
684	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
685	struct nouveau_cli *cli = nouveau_cli(file_priv);
686	struct nouveau_abi16_chan *temp;
687	struct nouveau_drm *drm = nouveau_drm(dev);
688	struct drm_nouveau_gem_pushbuf *req = data;
689	struct drm_nouveau_gem_pushbuf_push *push;
690	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
691	struct drm_nouveau_gem_pushbuf_bo *bo;
692	struct nouveau_channel *chan = NULL;
693	struct validate_op op;
694	struct nouveau_fence *fence = NULL;
695	int i, j, ret = 0;
696	bool do_reloc = false, sync = false;
697
698	if (unlikely(!abi16))
699		return -ENOMEM;
700
701	list_for_each_entry(temp, &abi16->channels, head) {
702		if (temp->chan->chid == req->channel) {
703			chan = temp->chan;
704			break;
705		}
706	}
707
708	if (!chan)
709		return nouveau_abi16_put(abi16, -ENOENT);
710	if (unlikely(atomic_read(&chan->killed)))
711		return nouveau_abi16_put(abi16, -ENODEV);
712
713	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
714
715	req->vram_available = drm->gem.vram_available;
716	req->gart_available = drm->gem.gart_available;
717	if (unlikely(req->nr_push == 0))
718		goto out_next;
719
720	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
721		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
722			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
723		return nouveau_abi16_put(abi16, -EINVAL);
 
724	}
725
726	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
727		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
728			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
729		return nouveau_abi16_put(abi16, -EINVAL);
 
730	}
731
732	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
733		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
734			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
735		return nouveau_abi16_put(abi16, -EINVAL);
 
736	}
737
738	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
739	if (IS_ERR(push))
740		return nouveau_abi16_put(abi16, PTR_ERR(push));
 
 
741
742	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
743	if (IS_ERR(bo)) {
744		u_free(push);
745		return nouveau_abi16_put(abi16, PTR_ERR(bo));
 
746	}
747
748	/* Ensure all push buffers are on validate list */
 
 
 
 
749	for (i = 0; i < req->nr_push; i++) {
750		if (push[i].bo_index >= req->nr_buffers) {
751			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
752			ret = -EINVAL;
753			goto out_prevalid;
754		}
 
 
755	}
756
757	/* Validate buffer list */
758revalidate:
759	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
760					   req->nr_buffers, &op, &do_reloc);
761	if (ret) {
762		if (ret != -ERESTARTSYS)
763			NV_PRINTK(err, cli, "validate: %d\n", ret);
764		goto out_prevalid;
765	}
766
767	/* Apply any relocations that are required */
768	if (do_reloc) {
769		if (!reloc) {
770			validate_fini(&op, chan, NULL, bo);
771			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
772			if (IS_ERR(reloc)) {
773				ret = PTR_ERR(reloc);
774				goto out_prevalid;
775			}
776
777			goto revalidate;
778		}
779
780		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
781		if (ret) {
782			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
783			goto out;
784		}
785	}
786
787	if (chan->dma.ib_max) {
788		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
789		if (ret) {
790			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
791			goto out;
792		}
793
794		for (i = 0; i < req->nr_push; i++) {
795			struct nouveau_vma *vma = (void *)(unsigned long)
796				bo[push[i].bo_index].user_priv;
797
798			nv50_dma_push(chan, vma->addr + push[i].offset,
799				      push[i].length);
800		}
801	} else
802	if (drm->client.device.info.chipset >= 0x25) {
803		ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
804		if (ret) {
805			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
806			goto out;
807		}
808
809		for (i = 0; i < req->nr_push; i++) {
810			struct nouveau_bo *nvbo = (void *)(unsigned long)
811				bo[push[i].bo_index].user_priv;
 
812
813			PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
814			PUSH_DATA(chan->chan.push, 0);
 
815		}
816	} else {
817		ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
818		if (ret) {
819			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
820			goto out;
821		}
822
823		for (i = 0; i < req->nr_push; i++) {
824			struct nouveau_bo *nvbo = (void *)(unsigned long)
825				bo[push[i].bo_index].user_priv;
 
826			uint32_t cmd;
827
828			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
829			cmd |= 0x20000000;
830			if (unlikely(cmd != req->suffix0)) {
831				if (!nvbo->kmap.virtual) {
832					ret = ttm_bo_kmap(&nvbo->bo, 0,
833							  nvbo->bo.mem.
834							  num_pages,
835							  &nvbo->kmap);
836					if (ret) {
837						WIND_RING(chan);
838						goto out;
839					}
840					nvbo->validate_mapped = true;
841				}
842
843				nouveau_bo_wr32(nvbo, (push[i].offset +
844						push[i].length - 8) / 4, cmd);
845			}
846
847			PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
848			PUSH_DATA(chan->chan.push, 0);
 
849			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
850				PUSH_DATA(chan->chan.push, 0);
851		}
852	}
853
854	ret = nouveau_fence_new(chan, false, &fence);
855	if (ret) {
856		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
857		WIND_RING(chan);
858		goto out;
859	}
860
861	if (sync) {
862		if (!(ret = nouveau_fence_wait(fence, false, false))) {
863			if ((ret = dma_fence_get_status(&fence->base)) == 1)
864				ret = 0;
865		}
866	}
867
868out:
869	validate_fini(&op, chan, fence, bo);
870	nouveau_fence_unref(&fence);
871
872	if (do_reloc) {
873		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
874			u64_to_user_ptr(req->buffers);
875
876		for (i = 0; i < req->nr_buffers; i++) {
877			if (bo[i].presumed.valid)
878				continue;
879
880			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
881					 sizeof(bo[i].presumed))) {
882				ret = -EFAULT;
883				break;
884			}
885		}
886		u_free(reloc);
887	}
888out_prevalid:
889	u_free(bo);
890	u_free(push);
891
892out_next:
893	if (chan->dma.ib_max) {
894		req->suffix0 = 0x00000000;
895		req->suffix1 = 0x00000000;
896	} else
897	if (drm->client.device.info.chipset >= 0x25) {
898		req->suffix0 = 0x00020000;
899		req->suffix1 = 0x00000000;
900	} else {
901		req->suffix0 = 0x20000000 |
902			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
903		req->suffix1 = 0x00000000;
904	}
905
906	return nouveau_abi16_put(abi16, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
907}
908
909int
910nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
911			   struct drm_file *file_priv)
912{
913	struct drm_nouveau_gem_cpu_prep *req = data;
914	struct drm_gem_object *gem;
915	struct nouveau_bo *nvbo;
916	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
917	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
918	long lret;
919	int ret;
920
921	gem = drm_gem_object_lookup(file_priv, req->handle);
922	if (!gem)
923		return -ENOENT;
924	nvbo = nouveau_gem_object(gem);
925
926	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
927						   no_wait ? 0 : 30 * HZ);
928	if (!lret)
929		ret = -EBUSY;
930	else if (lret > 0)
931		ret = 0;
932	else
933		ret = lret;
934
935	nouveau_bo_sync_for_cpu(nvbo);
936	drm_gem_object_put(gem);
937
938	return ret;
939}
940
941int
942nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
943			   struct drm_file *file_priv)
944{
945	struct drm_nouveau_gem_cpu_fini *req = data;
946	struct drm_gem_object *gem;
947	struct nouveau_bo *nvbo;
948
949	gem = drm_gem_object_lookup(file_priv, req->handle);
950	if (!gem)
951		return -ENOENT;
952	nvbo = nouveau_gem_object(gem);
953
954	nouveau_bo_sync_for_device(nvbo);
955	drm_gem_object_put(gem);
956	return 0;
957}
958
959int
960nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
961		       struct drm_file *file_priv)
962{
963	struct drm_nouveau_gem_info *req = data;
964	struct drm_gem_object *gem;
965	int ret;
966
967	gem = drm_gem_object_lookup(file_priv, req->handle);
968	if (!gem)
969		return -ENOENT;
970
971	ret = nouveau_gem_info(file_priv, gem, req);
972	drm_gem_object_put(gem);
973	return ret;
974}
975