Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.6
  1/*
  2 * Copyright (C) 2008 Ben Skeggs.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26
 27#include "nouveau_drm.h"
 
 
 28#include "nouveau_dma.h"
 29#include "nouveau_fence.h"
 30#include "nouveau_abi16.h"
 31
 32#include "nouveau_ttm.h"
 33#include "nouveau_gem.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35void
 36nouveau_gem_object_del(struct drm_gem_object *gem)
 37{
 38	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 39	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 40	struct ttm_buffer_object *bo = &nvbo->bo;
 41	struct device *dev = drm->dev->dev;
 42	int ret;
 43
 44	ret = pm_runtime_get_sync(dev);
 45	if (WARN_ON(ret < 0 && ret != -EACCES))
 
 46		return;
 
 47
 48	if (gem->import_attach)
 49		drm_prime_gem_destroy(gem, nvbo->bo.sg);
 50
 51	drm_gem_object_release(gem);
 52
 53	/* reset filp so nouveau_bo_del_ttm() can test for it */
 54	gem->filp = NULL;
 55	ttm_bo_unref(&bo);
 56
 57	pm_runtime_mark_last_busy(dev);
 58	pm_runtime_put_autosuspend(dev);
 59}
 60
 61int
 62nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 63{
 64	struct nouveau_cli *cli = nouveau_cli(file_priv);
 65	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 66	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 67	struct nvkm_vma *vma;
 68	struct device *dev = drm->dev->dev;
 
 
 69	int ret;
 70
 71	if (!cli->vm)
 72		return 0;
 73
 74	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
 75	if (ret)
 76		return ret;
 77
 78	vma = nouveau_bo_vma_find(nvbo, cli->vm);
 79	if (!vma) {
 80		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 81		if (!vma) {
 82			ret = -ENOMEM;
 83			goto out;
 84		}
 85
 86		ret = pm_runtime_get_sync(dev);
 87		if (ret < 0 && ret != -EACCES) {
 88			kfree(vma);
 89			goto out;
 90		}
 91
 92		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
 93		if (ret)
 94			kfree(vma);
 95
 96		pm_runtime_mark_last_busy(dev);
 97		pm_runtime_put_autosuspend(dev);
 98	} else {
 99		vma->refcount++;
100	}
101
 
 
 
102out:
103	ttm_bo_unreserve(&nvbo->bo);
104	return ret;
105}
106
 
 
 
 
 
107static void
108nouveau_gem_object_delete(void *data)
109{
110	struct nvkm_vma *vma = data;
111	nvkm_vm_unmap(vma);
112	nvkm_vm_put(vma);
113	kfree(vma);
114}
115
116static void
117nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
118{
119	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
120	struct reservation_object *resv = nvbo->bo.resv;
121	struct reservation_object_list *fobj;
122	struct fence *fence = NULL;
123
124	fobj = reservation_object_get_list(resv);
125
126	list_del(&vma->head);
127
128	if (fobj && fobj->shared_count > 1)
129		ttm_bo_wait(&nvbo->bo, true, false, false);
130	else if (fobj && fobj->shared_count == 1)
131		fence = rcu_dereference_protected(fobj->shared[0],
132						reservation_object_held(resv));
133	else
134		fence = reservation_object_get_excl(nvbo->bo.resv);
135
136	if (fence && mapped) {
137		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
138	} else {
139		if (mapped)
140			nvkm_vm_unmap(vma);
141		nvkm_vm_put(vma);
142		kfree(vma);
 
 
 
 
143	}
 
 
 
 
 
 
 
 
 
 
144}
145
146void
147nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
148{
149	struct nouveau_cli *cli = nouveau_cli(file_priv);
150	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
151	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
152	struct device *dev = drm->dev->dev;
153	struct nvkm_vma *vma;
 
154	int ret;
155
156	if (!cli->vm)
157		return;
158
159	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
160	if (ret)
161		return;
162
163	vma = nouveau_bo_vma_find(nvbo, cli->vm);
164	if (vma) {
165		if (--vma->refcount == 0) {
166			ret = pm_runtime_get_sync(dev);
167			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
168				nouveau_gem_object_unmap(nvbo, vma);
169				pm_runtime_mark_last_busy(dev);
170				pm_runtime_put_autosuspend(dev);
171			}
 
172		}
173	}
174	ttm_bo_unreserve(&nvbo->bo);
175}
176
 
 
 
 
 
 
 
 
 
 
 
 
 
177int
178nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
179		uint32_t tile_mode, uint32_t tile_flags,
180		struct nouveau_bo **pnvbo)
181{
182	struct nouveau_drm *drm = nouveau_drm(dev);
183	struct nouveau_bo *nvbo;
184	u32 flags = 0;
185	int ret;
186
187	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
188		flags |= TTM_PL_FLAG_VRAM;
189	if (domain & NOUVEAU_GEM_DOMAIN_GART)
190		flags |= TTM_PL_FLAG_TT;
191	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
192		flags |= TTM_PL_FLAG_SYSTEM;
193
194	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
195		flags |= TTM_PL_FLAG_UNCACHED;
 
 
196
197	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
198			     tile_flags, NULL, NULL, pnvbo);
 
 
 
 
 
 
 
 
 
 
199	if (ret)
200		return ret;
201	nvbo = *pnvbo;
202
203	/* we restrict allowed domains on nv50+ to only the types
204	 * that were requested at creation time.  not possibly on
205	 * earlier chips without busting the ABI.
206	 */
207	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
208			      NOUVEAU_GEM_DOMAIN_GART;
209	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
210		nvbo->valid_domains &= domain;
211
212	/* Initialize the embedded gem-object. We return a single gem-reference
213	 * to the caller, instead of a normal nouveau_bo ttm reference. */
214	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
215	if (ret) {
216		nouveau_bo_ref(NULL, pnvbo);
217		return -ENOMEM;
218	}
219
220	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
221	return 0;
222}
223
224static int
225nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
226		 struct drm_nouveau_gem_info *rep)
227{
228	struct nouveau_cli *cli = nouveau_cli(file_priv);
229	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
230	struct nvkm_vma *vma;
 
231
232	if (is_power_of_2(nvbo->valid_domains))
233		rep->domain = nvbo->valid_domains;
234	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
235		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
236	else
237		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
238	rep->offset = nvbo->bo.offset;
239	if (cli->vm) {
240		vma = nouveau_bo_vma_find(nvbo, cli->vm);
241		if (!vma)
242			return -EINVAL;
243
244		rep->offset = vma->offset;
245	}
246
247	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
248	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
249	rep->tile_mode = nvbo->tile_mode;
250	rep->tile_flags = nvbo->tile_flags;
 
 
 
 
 
 
 
251	return 0;
252}
253
254int
255nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
256		      struct drm_file *file_priv)
257{
258	struct nouveau_drm *drm = nouveau_drm(dev);
259	struct nouveau_cli *cli = nouveau_cli(file_priv);
260	struct nvkm_fb *fb = nvxx_fb(&drm->device);
261	struct drm_nouveau_gem_new *req = data;
262	struct nouveau_bo *nvbo = NULL;
263	int ret = 0;
264
265	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
266		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
267		return -EINVAL;
268	}
269
270	ret = nouveau_gem_new(dev, req->info.size, req->align,
271			      req->info.domain, req->info.tile_mode,
272			      req->info.tile_flags, &nvbo);
273	if (ret)
274		return ret;
275
276	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
 
277	if (ret == 0) {
278		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
279		if (ret)
280			drm_gem_handle_delete(file_priv, req->info.handle);
281	}
282
283	/* drop reference from allocate - handle holds it now */
284	drm_gem_object_unreference_unlocked(&nvbo->gem);
285	return ret;
286}
287
288static int
289nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
290		       uint32_t write_domains, uint32_t valid_domains)
291{
292	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
293	struct ttm_buffer_object *bo = &nvbo->bo;
294	uint32_t domains = valid_domains & nvbo->valid_domains &
295		(write_domains ? write_domains : read_domains);
296	uint32_t pref_flags = 0, valid_flags = 0;
297
298	if (!domains)
299		return -EINVAL;
300
301	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
302		valid_flags |= TTM_PL_FLAG_VRAM;
303
304	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
305		valid_flags |= TTM_PL_FLAG_TT;
306
307	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
308	    bo->mem.mem_type == TTM_PL_VRAM)
309		pref_flags |= TTM_PL_FLAG_VRAM;
310
311	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
312		 bo->mem.mem_type == TTM_PL_TT)
313		pref_flags |= TTM_PL_FLAG_TT;
314
315	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
316		pref_flags |= TTM_PL_FLAG_VRAM;
317
318	else
319		pref_flags |= TTM_PL_FLAG_TT;
320
321	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
322
323	return 0;
324}
325
326struct validate_op {
327	struct list_head list;
328	struct ww_acquire_ctx ticket;
329};
330
331static void
332validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
 
333			struct drm_nouveau_gem_pushbuf_bo *pbbo)
334{
335	struct nouveau_bo *nvbo;
336	struct drm_nouveau_gem_pushbuf_bo *b;
337
338	while (!list_empty(&op->list)) {
339		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
340		b = &pbbo[nvbo->pbbo_index];
341
342		if (likely(fence))
343			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
344
 
 
 
 
 
 
 
 
 
345		if (unlikely(nvbo->validate_mapped)) {
346			ttm_bo_kunmap(&nvbo->kmap);
347			nvbo->validate_mapped = false;
348		}
349
350		list_del(&nvbo->entry);
351		nvbo->reserved_by = NULL;
352		ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
353		drm_gem_object_unreference_unlocked(&nvbo->gem);
354	}
355}
356
357static void
358validate_fini(struct validate_op *op, struct nouveau_fence *fence,
 
359	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
360{
361	validate_fini_no_ticket(op, fence, pbbo);
362	ww_acquire_fini(&op->ticket);
363}
364
365static int
366validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
367	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
368	      int nr_buffers, struct validate_op *op)
369{
370	struct nouveau_cli *cli = nouveau_cli(file_priv);
371	struct drm_device *dev = chan->drm->dev;
372	int trycnt = 0;
373	int ret, i;
374	struct nouveau_bo *res_bo = NULL;
375	LIST_HEAD(gart_list);
376	LIST_HEAD(vram_list);
377	LIST_HEAD(both_list);
378
379	ww_acquire_init(&op->ticket, &reservation_ww_class);
380retry:
381	if (++trycnt > 100000) {
382		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
383		return -EINVAL;
384	}
385
386	for (i = 0; i < nr_buffers; i++) {
387		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
388		struct drm_gem_object *gem;
389		struct nouveau_bo *nvbo;
390
391		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
392		if (!gem) {
393			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
394			ret = -ENOENT;
395			break;
396		}
397		nvbo = nouveau_gem_object(gem);
398		if (nvbo == res_bo) {
399			res_bo = NULL;
400			drm_gem_object_unreference_unlocked(gem);
401			continue;
402		}
403
404		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
405			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
406				      "validation list\n", b->handle);
407			drm_gem_object_unreference_unlocked(gem);
408			ret = -EINVAL;
409			break;
410		}
411
412		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
413		if (ret) {
414			list_splice_tail_init(&vram_list, &op->list);
415			list_splice_tail_init(&gart_list, &op->list);
416			list_splice_tail_init(&both_list, &op->list);
417			validate_fini_no_ticket(op, NULL, NULL);
418			if (unlikely(ret == -EDEADLK)) {
419				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
420							      &op->ticket);
421				if (!ret)
422					res_bo = nvbo;
423			}
424			if (unlikely(ret)) {
425				if (ret != -ERESTARTSYS)
426					NV_PRINTK(err, cli, "fail reserve\n");
427				break;
428			}
429		}
430
431		b->user_priv = (uint64_t)(unsigned long)nvbo;
 
 
 
 
 
 
 
 
 
 
 
 
 
432		nvbo->reserved_by = file_priv;
433		nvbo->pbbo_index = i;
434		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
435		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
436			list_add_tail(&nvbo->entry, &both_list);
437		else
438		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
439			list_add_tail(&nvbo->entry, &vram_list);
440		else
441		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
442			list_add_tail(&nvbo->entry, &gart_list);
443		else {
444			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
445				 b->valid_domains);
446			list_add_tail(&nvbo->entry, &both_list);
447			ret = -EINVAL;
448			break;
449		}
450		if (nvbo == res_bo)
451			goto retry;
452	}
453
454	ww_acquire_done(&op->ticket);
455	list_splice_tail(&vram_list, &op->list);
456	list_splice_tail(&gart_list, &op->list);
457	list_splice_tail(&both_list, &op->list);
458	if (ret)
459		validate_fini(op, NULL, NULL);
460	return ret;
461
462}
463
464static int
465validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
466	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
467	      uint64_t user_pbbo_ptr)
468{
469	struct nouveau_drm *drm = chan->drm;
470	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
471				(void __force __user *)(uintptr_t)user_pbbo_ptr;
472	struct nouveau_bo *nvbo;
473	int ret, relocs = 0;
474
475	list_for_each_entry(nvbo, list, entry) {
476		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
477
478		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
479					     b->write_domains,
480					     b->valid_domains);
481		if (unlikely(ret)) {
482			NV_PRINTK(err, cli, "fail set_domain\n");
483			return ret;
484		}
485
486		ret = nouveau_bo_validate(nvbo, true, false);
487		if (unlikely(ret)) {
488			if (ret != -ERESTARTSYS)
489				NV_PRINTK(err, cli, "fail ttm_validate\n");
490			return ret;
491		}
492
493		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
494		if (unlikely(ret)) {
495			if (ret != -ERESTARTSYS)
496				NV_PRINTK(err, cli, "fail post-validate sync\n");
497			return ret;
498		}
499
500		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
501			if (nvbo->bo.offset == b->presumed.offset &&
502			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
503			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
504			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
505			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
506				continue;
507
508			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
509				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
510			else
511				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
512			b->presumed.offset = nvbo->bo.offset;
513			b->presumed.valid = 0;
514			relocs++;
515
516			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
517					     &b->presumed, sizeof(b->presumed)))
518				return -EFAULT;
519		}
520	}
521
522	return relocs;
523}
524
525static int
526nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
527			     struct drm_file *file_priv,
528			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
529			     uint64_t user_buffers, int nr_buffers,
530			     struct validate_op *op, int *apply_relocs)
531{
532	struct nouveau_cli *cli = nouveau_cli(file_priv);
533	int ret;
534
535	INIT_LIST_HEAD(&op->list);
536
537	if (nr_buffers == 0)
538		return 0;
539
540	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
541	if (unlikely(ret)) {
542		if (ret != -ERESTARTSYS)
543			NV_PRINTK(err, cli, "validate_init\n");
544		return ret;
545	}
546
547	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
548	if (unlikely(ret < 0)) {
549		if (ret != -ERESTARTSYS)
550			NV_PRINTK(err, cli, "validating bo list\n");
551		validate_fini(op, NULL, NULL);
552		return ret;
 
 
553	}
554	*apply_relocs = ret;
555	return 0;
556}
557
558static inline void
559u_free(void *addr)
560{
561	kvfree(addr);
562}
563
564static inline void *
565u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
566{
567	void *mem;
568	void __user *userptr = (void __force __user *)(uintptr_t)user;
569
570	size *= nmemb;
571
572	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
573	if (!mem)
574		mem = vmalloc(size);
575	if (!mem)
576		return ERR_PTR(-ENOMEM);
577
578	if (copy_from_user(mem, userptr, size)) {
579		u_free(mem);
580		return ERR_PTR(-EFAULT);
581	}
582
583	return mem;
584}
585
586static int
587nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
588				struct drm_nouveau_gem_pushbuf *req,
 
589				struct drm_nouveau_gem_pushbuf_bo *bo)
590{
591	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
592	int ret = 0;
593	unsigned i;
594
595	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
596	if (IS_ERR(reloc))
597		return PTR_ERR(reloc);
598
599	for (i = 0; i < req->nr_relocs; i++) {
600		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
601		struct drm_nouveau_gem_pushbuf_bo *b;
602		struct nouveau_bo *nvbo;
603		uint32_t data;
604
605		if (unlikely(r->bo_index > req->nr_buffers)) {
606			NV_PRINTK(err, cli, "reloc bo index invalid\n");
607			ret = -EINVAL;
608			break;
609		}
610
611		b = &bo[r->bo_index];
612		if (b->presumed.valid)
613			continue;
614
615		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
616			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
617			ret = -EINVAL;
618			break;
619		}
620		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
621
622		if (unlikely(r->reloc_bo_offset + 4 >
623			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
624			NV_PRINTK(err, cli, "reloc outside of bo\n");
625			ret = -EINVAL;
626			break;
627		}
628
629		if (!nvbo->kmap.virtual) {
630			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
631					  &nvbo->kmap);
632			if (ret) {
633				NV_PRINTK(err, cli, "failed kmap for reloc\n");
634				break;
635			}
636			nvbo->validate_mapped = true;
637		}
638
639		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
640			data = b->presumed.offset + r->data;
641		else
642		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
643			data = (b->presumed.offset + r->data) >> 32;
644		else
645			data = r->data;
646
647		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
648			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
649				data |= r->tor;
650			else
651				data |= r->vor;
652		}
653
654		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
655		if (ret) {
656			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
657			break;
658		}
659
660		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
661	}
662
663	u_free(reloc);
664	return ret;
665}
666
667int
668nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
669			  struct drm_file *file_priv)
670{
671	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
672	struct nouveau_cli *cli = nouveau_cli(file_priv);
673	struct nouveau_abi16_chan *temp;
674	struct nouveau_drm *drm = nouveau_drm(dev);
675	struct drm_nouveau_gem_pushbuf *req = data;
676	struct drm_nouveau_gem_pushbuf_push *push;
 
677	struct drm_nouveau_gem_pushbuf_bo *bo;
678	struct nouveau_channel *chan = NULL;
679	struct validate_op op;
680	struct nouveau_fence *fence = NULL;
681	int i, j, ret = 0, do_reloc = 0;
 
682
683	if (unlikely(!abi16))
684		return -ENOMEM;
685
686	list_for_each_entry(temp, &abi16->channels, head) {
687		if (temp->chan->chid == req->channel) {
688			chan = temp->chan;
689			break;
690		}
691	}
692
693	if (!chan)
694		return nouveau_abi16_put(abi16, -ENOENT);
 
 
 
 
695
696	req->vram_available = drm->gem.vram_available;
697	req->gart_available = drm->gem.gart_available;
698	if (unlikely(req->nr_push == 0))
699		goto out_next;
700
701	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
702		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
703			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
704		return nouveau_abi16_put(abi16, -EINVAL);
705	}
706
707	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
708		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
709			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
710		return nouveau_abi16_put(abi16, -EINVAL);
711	}
712
713	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
714		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
715			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
716		return nouveau_abi16_put(abi16, -EINVAL);
717	}
718
719	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
720	if (IS_ERR(push))
721		return nouveau_abi16_put(abi16, PTR_ERR(push));
722
723	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
724	if (IS_ERR(bo)) {
725		u_free(push);
726		return nouveau_abi16_put(abi16, PTR_ERR(bo));
727	}
728
729	/* Ensure all push buffers are on validate list */
730	for (i = 0; i < req->nr_push; i++) {
731		if (push[i].bo_index >= req->nr_buffers) {
732			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
733			ret = -EINVAL;
734			goto out_prevalid;
735		}
736	}
737
738	/* Validate buffer list */
739	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 
740					   req->nr_buffers, &op, &do_reloc);
741	if (ret) {
742		if (ret != -ERESTARTSYS)
743			NV_PRINTK(err, cli, "validate: %d\n", ret);
744		goto out_prevalid;
745	}
746
747	/* Apply any relocations that are required */
748	if (do_reloc) {
749		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
 
 
 
 
 
 
 
 
 
 
 
750		if (ret) {
751			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
752			goto out;
753		}
754	}
755
756	if (chan->dma.ib_max) {
757		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
758		if (ret) {
759			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
760			goto out;
761		}
762
763		for (i = 0; i < req->nr_push; i++) {
764			struct nouveau_bo *nvbo = (void *)(unsigned long)
765				bo[push[i].bo_index].user_priv;
766
767			nv50_dma_push(chan, nvbo, push[i].offset,
768				      push[i].length);
769		}
770	} else
771	if (drm->device.info.chipset >= 0x25) {
772		ret = RING_SPACE(chan, req->nr_push * 2);
773		if (ret) {
774			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
775			goto out;
776		}
777
778		for (i = 0; i < req->nr_push; i++) {
779			struct nouveau_bo *nvbo = (void *)(unsigned long)
780				bo[push[i].bo_index].user_priv;
781
782			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
783			OUT_RING(chan, 0);
784		}
785	} else {
786		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
787		if (ret) {
788			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
789			goto out;
790		}
791
792		for (i = 0; i < req->nr_push; i++) {
793			struct nouveau_bo *nvbo = (void *)(unsigned long)
794				bo[push[i].bo_index].user_priv;
795			uint32_t cmd;
796
797			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
798			cmd |= 0x20000000;
799			if (unlikely(cmd != req->suffix0)) {
800				if (!nvbo->kmap.virtual) {
801					ret = ttm_bo_kmap(&nvbo->bo, 0,
802							  nvbo->bo.mem.
803							  num_pages,
804							  &nvbo->kmap);
805					if (ret) {
806						WIND_RING(chan);
807						goto out;
808					}
809					nvbo->validate_mapped = true;
810				}
811
812				nouveau_bo_wr32(nvbo, (push[i].offset +
813						push[i].length - 8) / 4, cmd);
814			}
815
816			OUT_RING(chan, 0x20000000 |
817				      (nvbo->bo.offset + push[i].offset));
818			OUT_RING(chan, 0);
819			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
820				OUT_RING(chan, 0);
821		}
822	}
823
824	ret = nouveau_fence_new(chan, false, &fence);
825	if (ret) {
826		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
827		WIND_RING(chan);
828		goto out;
829	}
830
 
 
 
 
 
 
 
831out:
832	validate_fini(&op, fence, bo);
833	nouveau_fence_unref(&fence);
834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
835out_prevalid:
 
 
836	u_free(bo);
837	u_free(push);
838
839out_next:
840	if (chan->dma.ib_max) {
841		req->suffix0 = 0x00000000;
842		req->suffix1 = 0x00000000;
843	} else
844	if (drm->device.info.chipset >= 0x25) {
845		req->suffix0 = 0x00020000;
846		req->suffix1 = 0x00000000;
847	} else {
848		req->suffix0 = 0x20000000 |
849			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
850		req->suffix1 = 0x00000000;
851	}
852
853	return nouveau_abi16_put(abi16, ret);
854}
855
856int
857nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
858			   struct drm_file *file_priv)
859{
860	struct drm_nouveau_gem_cpu_prep *req = data;
861	struct drm_gem_object *gem;
862	struct nouveau_bo *nvbo;
863	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
864	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 
865	int ret;
866
867	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
868	if (!gem)
869		return -ENOENT;
870	nvbo = nouveau_gem_object(gem);
871
872	if (no_wait)
873		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
874	else {
875		long lret;
876
877		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
878		if (!lret)
879			ret = -EBUSY;
880		else if (lret > 0)
881			ret = 0;
882		else
883			ret = lret;
884	}
885	nouveau_bo_sync_for_cpu(nvbo);
886	drm_gem_object_unreference_unlocked(gem);
887
888	return ret;
889}
890
891int
892nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
893			   struct drm_file *file_priv)
894{
895	struct drm_nouveau_gem_cpu_fini *req = data;
896	struct drm_gem_object *gem;
897	struct nouveau_bo *nvbo;
898
899	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
900	if (!gem)
901		return -ENOENT;
902	nvbo = nouveau_gem_object(gem);
903
904	nouveau_bo_sync_for_device(nvbo);
905	drm_gem_object_unreference_unlocked(gem);
906	return 0;
907}
908
909int
910nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
911		       struct drm_file *file_priv)
912{
913	struct drm_nouveau_gem_info *req = data;
914	struct drm_gem_object *gem;
915	int ret;
916
917	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
918	if (!gem)
919		return -ENOENT;
920
921	ret = nouveau_gem_info(file_priv, gem, req);
922	drm_gem_object_unreference_unlocked(gem);
923	return ret;
924}
925
v6.2
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <drm/drm_gem_ttm_helper.h>
  28
  29#include "nouveau_drv.h"
  30#include "nouveau_dma.h"
  31#include "nouveau_fence.h"
  32#include "nouveau_abi16.h"
  33
  34#include "nouveau_ttm.h"
  35#include "nouveau_gem.h"
  36#include "nouveau_mem.h"
  37#include "nouveau_vmm.h"
  38
  39#include <nvif/class.h>
  40#include <nvif/push206e.h>
  41
  42static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
  43{
  44	struct vm_area_struct *vma = vmf->vma;
  45	struct ttm_buffer_object *bo = vma->vm_private_data;
  46	pgprot_t prot;
  47	vm_fault_t ret;
  48
  49	ret = ttm_bo_vm_reserve(bo, vmf);
  50	if (ret)
  51		return ret;
  52
  53	ret = nouveau_ttm_fault_reserve_notify(bo);
  54	if (ret)
  55		goto error_unlock;
  56
  57	nouveau_bo_del_io_reserve_lru(bo);
  58	prot = vm_get_page_prot(vma->vm_flags);
  59	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
  60	nouveau_bo_add_io_reserve_lru(bo);
  61	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  62		return ret;
  63
  64error_unlock:
  65	dma_resv_unlock(bo->base.resv);
  66	return ret;
  67}
  68
  69static const struct vm_operations_struct nouveau_ttm_vm_ops = {
  70	.fault = nouveau_ttm_fault,
  71	.open = ttm_bo_vm_open,
  72	.close = ttm_bo_vm_close,
  73	.access = ttm_bo_vm_access
  74};
  75
  76void
  77nouveau_gem_object_del(struct drm_gem_object *gem)
  78{
  79	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  80	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 
  81	struct device *dev = drm->dev->dev;
  82	int ret;
  83
  84	ret = pm_runtime_get_sync(dev);
  85	if (WARN_ON(ret < 0 && ret != -EACCES)) {
  86		pm_runtime_put_autosuspend(dev);
  87		return;
  88	}
  89
  90	if (gem->import_attach)
  91		drm_prime_gem_destroy(gem, nvbo->bo.sg);
  92
  93	ttm_bo_put(&nvbo->bo);
 
 
 
 
  94
  95	pm_runtime_mark_last_busy(dev);
  96	pm_runtime_put_autosuspend(dev);
  97}
  98
  99int
 100nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 101{
 102	struct nouveau_cli *cli = nouveau_cli(file_priv);
 103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 104	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 
 105	struct device *dev = drm->dev->dev;
 106	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 107	struct nouveau_vma *vma;
 108	int ret;
 109
 110	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 111		return 0;
 112
 113	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 114	if (ret)
 115		return ret;
 116
 117	ret = pm_runtime_get_sync(dev);
 118	if (ret < 0 && ret != -EACCES) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119		pm_runtime_put_autosuspend(dev);
 120		goto out;
 
 121	}
 122
 123	ret = nouveau_vma_new(nvbo, vmm, &vma);
 124	pm_runtime_mark_last_busy(dev);
 125	pm_runtime_put_autosuspend(dev);
 126out:
 127	ttm_bo_unreserve(&nvbo->bo);
 128	return ret;
 129}
 130
 131struct nouveau_gem_object_unmap {
 132	struct nouveau_cli_work work;
 133	struct nouveau_vma *vma;
 134};
 135
 136static void
 137nouveau_gem_object_delete(struct nouveau_vma *vma)
 138{
 139	nouveau_fence_unref(&vma->fence);
 140	nouveau_vma_del(&vma);
 
 
 141}
 142
 143static void
 144nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
 145{
 146	struct nouveau_gem_object_unmap *work =
 147		container_of(w, typeof(*work), work);
 148	nouveau_gem_object_delete(work->vma);
 149	kfree(work);
 150}
 
 
 
 
 
 
 
 
 
 
 
 151
 152static void
 153nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 154{
 155	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
 156	struct nouveau_gem_object_unmap *work;
 157
 158	list_del_init(&vma->head);
 159
 160	if (!fence) {
 161		nouveau_gem_object_delete(vma);
 162		return;
 163	}
 164
 165	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
 166		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
 167		nouveau_gem_object_delete(vma);
 168		return;
 169	}
 170
 171	work->work.func = nouveau_gem_object_delete_work;
 172	work->vma = vma;
 173	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 174}
 175
 176void
 177nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 178{
 179	struct nouveau_cli *cli = nouveau_cli(file_priv);
 180	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 181	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 182	struct device *dev = drm->dev->dev;
 183	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
 184	struct nouveau_vma *vma;
 185	int ret;
 186
 187	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 188		return;
 189
 190	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 191	if (ret)
 192		return;
 193
 194	vma = nouveau_vma_find(nvbo, vmm);
 195	if (vma) {
 196		if (--vma->refs == 0) {
 197			ret = pm_runtime_get_sync(dev);
 198			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 199				nouveau_gem_object_unmap(nvbo, vma);
 200				pm_runtime_mark_last_busy(dev);
 
 201			}
 202			pm_runtime_put_autosuspend(dev);
 203		}
 204	}
 205	ttm_bo_unreserve(&nvbo->bo);
 206}
 207
 208const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
 209	.free = nouveau_gem_object_del,
 210	.open = nouveau_gem_object_open,
 211	.close = nouveau_gem_object_close,
 212	.pin = nouveau_gem_prime_pin,
 213	.unpin = nouveau_gem_prime_unpin,
 214	.get_sg_table = nouveau_gem_prime_get_sg_table,
 215	.vmap = drm_gem_ttm_vmap,
 216	.vunmap = drm_gem_ttm_vunmap,
 217	.mmap = drm_gem_ttm_mmap,
 218	.vm_ops = &nouveau_ttm_vm_ops,
 219};
 220
 221int
 222nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 223		uint32_t tile_mode, uint32_t tile_flags,
 224		struct nouveau_bo **pnvbo)
 225{
 226	struct nouveau_drm *drm = cli->drm;
 227	struct nouveau_bo *nvbo;
 
 228	int ret;
 229
 230	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
 231		domain |= NOUVEAU_GEM_DOMAIN_CPU;
 
 
 
 
 232
 233	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
 234				tile_flags);
 235	if (IS_ERR(nvbo))
 236		return PTR_ERR(nvbo);
 237
 238	nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
 239
 240	/* Initialize the embedded gem-object. We return a single gem-reference
 241	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 242	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
 243	if (ret) {
 244		drm_gem_object_release(&nvbo->bo.base);
 245		kfree(nvbo);
 246		return ret;
 247	}
 248
 249	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
 250	if (ret)
 251		return ret;
 
 252
 253	/* we restrict allowed domains on nv50+ to only the types
 254	 * that were requested at creation time.  not possibly on
 255	 * earlier chips without busting the ABI.
 256	 */
 257	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 258			      NOUVEAU_GEM_DOMAIN_GART;
 259	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 260		nvbo->valid_domains &= domain;
 261
 262	*pnvbo = nvbo;
 
 
 
 
 
 
 
 
 263	return 0;
 264}
 265
 266static int
 267nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 268		 struct drm_nouveau_gem_info *rep)
 269{
 270	struct nouveau_cli *cli = nouveau_cli(file_priv);
 271	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 272	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 273	struct nouveau_vma *vma;
 274
 275	if (is_power_of_2(nvbo->valid_domains))
 276		rep->domain = nvbo->valid_domains;
 277	else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 278		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 279	else
 280		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 281	rep->offset = nvbo->offset;
 282	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 283		vma = nouveau_vma_find(nvbo, vmm);
 284		if (!vma)
 285			return -EINVAL;
 286
 287		rep->offset = vma->addr;
 288	}
 289
 290	rep->size = nvbo->bo.base.size;
 291	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
 292	rep->tile_mode = nvbo->mode;
 293	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
 294	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
 295		rep->tile_flags |= nvbo->kind << 8;
 296	else
 297	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 298		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
 299	else
 300		rep->tile_flags |= nvbo->zeta;
 301	return 0;
 302}
 303
 304int
 305nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 306		      struct drm_file *file_priv)
 307{
 
 308	struct nouveau_cli *cli = nouveau_cli(file_priv);
 
 309	struct drm_nouveau_gem_new *req = data;
 310	struct nouveau_bo *nvbo = NULL;
 311	int ret = 0;
 312
 313	ret = nouveau_gem_new(cli, req->info.size, req->align,
 
 
 
 
 
 314			      req->info.domain, req->info.tile_mode,
 315			      req->info.tile_flags, &nvbo);
 316	if (ret)
 317		return ret;
 318
 319	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
 320				    &req->info.handle);
 321	if (ret == 0) {
 322		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
 323		if (ret)
 324			drm_gem_handle_delete(file_priv, req->info.handle);
 325	}
 326
 327	/* drop reference from allocate - handle holds it now */
 328	drm_gem_object_put(&nvbo->bo.base);
 329	return ret;
 330}
 331
 332static int
 333nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 334		       uint32_t write_domains, uint32_t valid_domains)
 335{
 336	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 337	struct ttm_buffer_object *bo = &nvbo->bo;
 338	uint32_t domains = valid_domains & nvbo->valid_domains &
 339		(write_domains ? write_domains : read_domains);
 340	uint32_t pref_domains = 0;
 341
 342	if (!domains)
 343		return -EINVAL;
 344
 345	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 
 
 
 
 346
 347	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 348	    bo->resource->mem_type == TTM_PL_VRAM)
 349		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 350
 351	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 352		 bo->resource->mem_type == TTM_PL_TT)
 353		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 354
 355	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 356		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 357
 358	else
 359		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 360
 361	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
 362
 363	return 0;
 364}
 365
 366struct validate_op {
 367	struct list_head list;
 368	struct ww_acquire_ctx ticket;
 369};
 370
 371static void
 372validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
 373			struct nouveau_fence *fence,
 374			struct drm_nouveau_gem_pushbuf_bo *pbbo)
 375{
 376	struct nouveau_bo *nvbo;
 377	struct drm_nouveau_gem_pushbuf_bo *b;
 378
 379	while (!list_empty(&op->list)) {
 380		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
 381		b = &pbbo[nvbo->pbbo_index];
 382
 383		if (likely(fence)) {
 384			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
 385
 386			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 387				struct nouveau_vma *vma =
 388					(void *)(unsigned long)b->user_priv;
 389				nouveau_fence_unref(&vma->fence);
 390				dma_fence_get(&fence->base);
 391				vma->fence = fence;
 392			}
 393		}
 394
 395		if (unlikely(nvbo->validate_mapped)) {
 396			ttm_bo_kunmap(&nvbo->kmap);
 397			nvbo->validate_mapped = false;
 398		}
 399
 400		list_del(&nvbo->entry);
 401		nvbo->reserved_by = NULL;
 402		ttm_bo_unreserve(&nvbo->bo);
 403		drm_gem_object_put(&nvbo->bo.base);
 404	}
 405}
 406
 407static void
 408validate_fini(struct validate_op *op, struct nouveau_channel *chan,
 409	      struct nouveau_fence *fence,
 410	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
 411{
 412	validate_fini_no_ticket(op, chan, fence, pbbo);
 413	ww_acquire_fini(&op->ticket);
 414}
 415
 416static int
 417validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 418	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
 419	      int nr_buffers, struct validate_op *op)
 420{
 421	struct nouveau_cli *cli = nouveau_cli(file_priv);
 
 422	int trycnt = 0;
 423	int ret = -EINVAL, i;
 424	struct nouveau_bo *res_bo = NULL;
 425	LIST_HEAD(gart_list);
 426	LIST_HEAD(vram_list);
 427	LIST_HEAD(both_list);
 428
 429	ww_acquire_init(&op->ticket, &reservation_ww_class);
 430retry:
 431	if (++trycnt > 100000) {
 432		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 433		return -EINVAL;
 434	}
 435
 436	for (i = 0; i < nr_buffers; i++) {
 437		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 438		struct drm_gem_object *gem;
 439		struct nouveau_bo *nvbo;
 440
 441		gem = drm_gem_object_lookup(file_priv, b->handle);
 442		if (!gem) {
 443			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 444			ret = -ENOENT;
 445			break;
 446		}
 447		nvbo = nouveau_gem_object(gem);
 448		if (nvbo == res_bo) {
 449			res_bo = NULL;
 450			drm_gem_object_put(gem);
 451			continue;
 452		}
 453
 454		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 455			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 456				      "validation list\n", b->handle);
 457			drm_gem_object_put(gem);
 458			ret = -EINVAL;
 459			break;
 460		}
 461
 462		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
 463		if (ret) {
 464			list_splice_tail_init(&vram_list, &op->list);
 465			list_splice_tail_init(&gart_list, &op->list);
 466			list_splice_tail_init(&both_list, &op->list);
 467			validate_fini_no_ticket(op, chan, NULL, NULL);
 468			if (unlikely(ret == -EDEADLK)) {
 469				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
 470							      &op->ticket);
 471				if (!ret)
 472					res_bo = nvbo;
 473			}
 474			if (unlikely(ret)) {
 475				if (ret != -ERESTARTSYS)
 476					NV_PRINTK(err, cli, "fail reserve\n");
 477				break;
 478			}
 479		}
 480
 481		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 482			struct nouveau_vmm *vmm = chan->vmm;
 483			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
 484			if (!vma) {
 485				NV_PRINTK(err, cli, "vma not found!\n");
 486				ret = -EINVAL;
 487				break;
 488			}
 489
 490			b->user_priv = (uint64_t)(unsigned long)vma;
 491		} else {
 492			b->user_priv = (uint64_t)(unsigned long)nvbo;
 493		}
 494
 495		nvbo->reserved_by = file_priv;
 496		nvbo->pbbo_index = i;
 497		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 498		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 499			list_add_tail(&nvbo->entry, &both_list);
 500		else
 501		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 502			list_add_tail(&nvbo->entry, &vram_list);
 503		else
 504		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 505			list_add_tail(&nvbo->entry, &gart_list);
 506		else {
 507			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 508				 b->valid_domains);
 509			list_add_tail(&nvbo->entry, &both_list);
 510			ret = -EINVAL;
 511			break;
 512		}
 513		if (nvbo == res_bo)
 514			goto retry;
 515	}
 516
 517	ww_acquire_done(&op->ticket);
 518	list_splice_tail(&vram_list, &op->list);
 519	list_splice_tail(&gart_list, &op->list);
 520	list_splice_tail(&both_list, &op->list);
 521	if (ret)
 522		validate_fini(op, chan, NULL, NULL);
 523	return ret;
 524
 525}
 526
 527static int
 528validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 529	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
 
 530{
 531	struct nouveau_drm *drm = chan->drm;
 
 
 532	struct nouveau_bo *nvbo;
 533	int ret, relocs = 0;
 534
 535	list_for_each_entry(nvbo, list, entry) {
 536		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 537
 538		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
 539					     b->write_domains,
 540					     b->valid_domains);
 541		if (unlikely(ret)) {
 542			NV_PRINTK(err, cli, "fail set_domain\n");
 543			return ret;
 544		}
 545
 546		ret = nouveau_bo_validate(nvbo, true, false);
 547		if (unlikely(ret)) {
 548			if (ret != -ERESTARTSYS)
 549				NV_PRINTK(err, cli, "fail ttm_validate\n");
 550			return ret;
 551		}
 552
 553		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 554		if (unlikely(ret)) {
 555			if (ret != -ERESTARTSYS)
 556				NV_PRINTK(err, cli, "fail post-validate sync\n");
 557			return ret;
 558		}
 559
 560		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 561			if (nvbo->offset == b->presumed.offset &&
 562			    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 563			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 564			     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 565			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 566				continue;
 567
 568			if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 569				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 570			else
 571				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 572			b->presumed.offset = nvbo->offset;
 573			b->presumed.valid = 0;
 574			relocs++;
 
 
 
 
 575		}
 576	}
 577
 578	return relocs;
 579}
 580
 581static int
 582nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 583			     struct drm_file *file_priv,
 584			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
 585			     int nr_buffers,
 586			     struct validate_op *op, bool *apply_relocs)
 587{
 588	struct nouveau_cli *cli = nouveau_cli(file_priv);
 589	int ret;
 590
 591	INIT_LIST_HEAD(&op->list);
 592
 593	if (nr_buffers == 0)
 594		return 0;
 595
 596	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 597	if (unlikely(ret)) {
 598		if (ret != -ERESTARTSYS)
 599			NV_PRINTK(err, cli, "validate_init\n");
 600		return ret;
 601	}
 602
 603	ret = validate_list(chan, cli, &op->list, pbbo);
 604	if (unlikely(ret < 0)) {
 605		if (ret != -ERESTARTSYS)
 606			NV_PRINTK(err, cli, "validating bo list\n");
 607		validate_fini(op, chan, NULL, NULL);
 608		return ret;
 609	} else if (ret > 0) {
 610		*apply_relocs = true;
 611	}
 612
 613	return 0;
 614}
 615
 616static inline void
 617u_free(void *addr)
 618{
 619	kvfree(addr);
 620}
 621
 622static inline void *
 623u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
 624{
 625	void *mem;
 626	void __user *userptr = (void __force __user *)(uintptr_t)user;
 627
 628	size *= nmemb;
 629
 630	mem = kvmalloc(size, GFP_KERNEL);
 
 
 631	if (!mem)
 632		return ERR_PTR(-ENOMEM);
 633
 634	if (copy_from_user(mem, userptr, size)) {
 635		u_free(mem);
 636		return ERR_PTR(-EFAULT);
 637	}
 638
 639	return mem;
 640}
 641
 642static int
 643nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 644				struct drm_nouveau_gem_pushbuf *req,
 645				struct drm_nouveau_gem_pushbuf_reloc *reloc,
 646				struct drm_nouveau_gem_pushbuf_bo *bo)
 647{
 
 648	int ret = 0;
 649	unsigned i;
 650
 
 
 
 
 651	for (i = 0; i < req->nr_relocs; i++) {
 652		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 653		struct drm_nouveau_gem_pushbuf_bo *b;
 654		struct nouveau_bo *nvbo;
 655		uint32_t data;
 656
 657		if (unlikely(r->bo_index >= req->nr_buffers)) {
 658			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 659			ret = -EINVAL;
 660			break;
 661		}
 662
 663		b = &bo[r->bo_index];
 664		if (b->presumed.valid)
 665			continue;
 666
 667		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
 668			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 669			ret = -EINVAL;
 670			break;
 671		}
 672		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 673
 674		if (unlikely(r->reloc_bo_offset + 4 >
 675			     nvbo->bo.base.size)) {
 676			NV_PRINTK(err, cli, "reloc outside of bo\n");
 677			ret = -EINVAL;
 678			break;
 679		}
 680
 681		if (!nvbo->kmap.virtual) {
 682			ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
 683					  &nvbo->kmap);
 684			if (ret) {
 685				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 686				break;
 687			}
 688			nvbo->validate_mapped = true;
 689		}
 690
 691		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 692			data = b->presumed.offset + r->data;
 693		else
 694		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 695			data = (b->presumed.offset + r->data) >> 32;
 696		else
 697			data = r->data;
 698
 699		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 700			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 701				data |= r->tor;
 702			else
 703				data |= r->vor;
 704		}
 705
 706		ret = ttm_bo_wait(&nvbo->bo, false, false);
 707		if (ret) {
 708			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 709			break;
 710		}
 711
 712		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 713	}
 714
 
 715	return ret;
 716}
 717
 718int
 719nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 720			  struct drm_file *file_priv)
 721{
 722	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
 723	struct nouveau_cli *cli = nouveau_cli(file_priv);
 724	struct nouveau_abi16_chan *temp;
 725	struct nouveau_drm *drm = nouveau_drm(dev);
 726	struct drm_nouveau_gem_pushbuf *req = data;
 727	struct drm_nouveau_gem_pushbuf_push *push;
 728	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 729	struct drm_nouveau_gem_pushbuf_bo *bo;
 730	struct nouveau_channel *chan = NULL;
 731	struct validate_op op;
 732	struct nouveau_fence *fence = NULL;
 733	int i, j, ret = 0;
 734	bool do_reloc = false, sync = false;
 735
 736	if (unlikely(!abi16))
 737		return -ENOMEM;
 738
 739	list_for_each_entry(temp, &abi16->channels, head) {
 740		if (temp->chan->chid == req->channel) {
 741			chan = temp->chan;
 742			break;
 743		}
 744	}
 745
 746	if (!chan)
 747		return nouveau_abi16_put(abi16, -ENOENT);
 748	if (unlikely(atomic_read(&chan->killed)))
 749		return nouveau_abi16_put(abi16, -ENODEV);
 750
 751	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
 752
 753	req->vram_available = drm->gem.vram_available;
 754	req->gart_available = drm->gem.gart_available;
 755	if (unlikely(req->nr_push == 0))
 756		goto out_next;
 757
 758	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 759		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 760			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 761		return nouveau_abi16_put(abi16, -EINVAL);
 762	}
 763
 764	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 765		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 766			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 767		return nouveau_abi16_put(abi16, -EINVAL);
 768	}
 769
 770	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 771		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 772			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 773		return nouveau_abi16_put(abi16, -EINVAL);
 774	}
 775
 776	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 777	if (IS_ERR(push))
 778		return nouveau_abi16_put(abi16, PTR_ERR(push));
 779
 780	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 781	if (IS_ERR(bo)) {
 782		u_free(push);
 783		return nouveau_abi16_put(abi16, PTR_ERR(bo));
 784	}
 785
 786	/* Ensure all push buffers are on validate list */
 787	for (i = 0; i < req->nr_push; i++) {
 788		if (push[i].bo_index >= req->nr_buffers) {
 789			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 790			ret = -EINVAL;
 791			goto out_prevalid;
 792		}
 793	}
 794
 795	/* Validate buffer list */
 796revalidate:
 797	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
 798					   req->nr_buffers, &op, &do_reloc);
 799	if (ret) {
 800		if (ret != -ERESTARTSYS)
 801			NV_PRINTK(err, cli, "validate: %d\n", ret);
 802		goto out_prevalid;
 803	}
 804
 805	/* Apply any relocations that are required */
 806	if (do_reloc) {
 807		if (!reloc) {
 808			validate_fini(&op, chan, NULL, bo);
 809			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 810			if (IS_ERR(reloc)) {
 811				ret = PTR_ERR(reloc);
 812				goto out_prevalid;
 813			}
 814
 815			goto revalidate;
 816		}
 817
 818		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
 819		if (ret) {
 820			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 821			goto out;
 822		}
 823	}
 824
 825	if (chan->dma.ib_max) {
 826		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 827		if (ret) {
 828			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 829			goto out;
 830		}
 831
 832		for (i = 0; i < req->nr_push; i++) {
 833			struct nouveau_vma *vma = (void *)(unsigned long)
 834				bo[push[i].bo_index].user_priv;
 835
 836			nv50_dma_push(chan, vma->addr + push[i].offset,
 837				      push[i].length);
 838		}
 839	} else
 840	if (drm->client.device.info.chipset >= 0x25) {
 841		ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
 842		if (ret) {
 843			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 844			goto out;
 845		}
 846
 847		for (i = 0; i < req->nr_push; i++) {
 848			struct nouveau_bo *nvbo = (void *)(unsigned long)
 849				bo[push[i].bo_index].user_priv;
 850
 851			PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
 852			PUSH_DATA(chan->chan.push, 0);
 853		}
 854	} else {
 855		ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 856		if (ret) {
 857			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 858			goto out;
 859		}
 860
 861		for (i = 0; i < req->nr_push; i++) {
 862			struct nouveau_bo *nvbo = (void *)(unsigned long)
 863				bo[push[i].bo_index].user_priv;
 864			uint32_t cmd;
 865
 866			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 867			cmd |= 0x20000000;
 868			if (unlikely(cmd != req->suffix0)) {
 869				if (!nvbo->kmap.virtual) {
 870					ret = ttm_bo_kmap(&nvbo->bo, 0,
 871							  PFN_UP(nvbo->bo.base.size),
 
 872							  &nvbo->kmap);
 873					if (ret) {
 874						WIND_RING(chan);
 875						goto out;
 876					}
 877					nvbo->validate_mapped = true;
 878				}
 879
 880				nouveau_bo_wr32(nvbo, (push[i].offset +
 881						push[i].length - 8) / 4, cmd);
 882			}
 883
 884			PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
 885			PUSH_DATA(chan->chan.push, 0);
 
 886			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 887				PUSH_DATA(chan->chan.push, 0);
 888		}
 889	}
 890
 891	ret = nouveau_fence_new(chan, false, &fence);
 892	if (ret) {
 893		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 894		WIND_RING(chan);
 895		goto out;
 896	}
 897
 898	if (sync) {
 899		if (!(ret = nouveau_fence_wait(fence, false, false))) {
 900			if ((ret = dma_fence_get_status(&fence->base)) == 1)
 901				ret = 0;
 902		}
 903	}
 904
 905out:
 906	validate_fini(&op, chan, fence, bo);
 907	nouveau_fence_unref(&fence);
 908
 909	if (do_reloc) {
 910		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 911			u64_to_user_ptr(req->buffers);
 912
 913		for (i = 0; i < req->nr_buffers; i++) {
 914			if (bo[i].presumed.valid)
 915				continue;
 916
 917			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
 918					 sizeof(bo[i].presumed))) {
 919				ret = -EFAULT;
 920				break;
 921			}
 922		}
 923	}
 924out_prevalid:
 925	if (!IS_ERR(reloc))
 926		u_free(reloc);
 927	u_free(bo);
 928	u_free(push);
 929
 930out_next:
 931	if (chan->dma.ib_max) {
 932		req->suffix0 = 0x00000000;
 933		req->suffix1 = 0x00000000;
 934	} else
 935	if (drm->client.device.info.chipset >= 0x25) {
 936		req->suffix0 = 0x00020000;
 937		req->suffix1 = 0x00000000;
 938	} else {
 939		req->suffix0 = 0x20000000 |
 940			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
 941		req->suffix1 = 0x00000000;
 942	}
 943
 944	return nouveau_abi16_put(abi16, ret);
 945}
 946
 947int
 948nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 949			   struct drm_file *file_priv)
 950{
 951	struct drm_nouveau_gem_cpu_prep *req = data;
 952	struct drm_gem_object *gem;
 953	struct nouveau_bo *nvbo;
 954	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 955	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 956	long lret;
 957	int ret;
 958
 959	gem = drm_gem_object_lookup(file_priv, req->handle);
 960	if (!gem)
 961		return -ENOENT;
 962	nvbo = nouveau_gem_object(gem);
 963
 964	lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
 965				     dma_resv_usage_rw(write), true,
 966				     no_wait ? 0 : 30 * HZ);
 967	if (!lret)
 968		ret = -EBUSY;
 969	else if (lret > 0)
 970		ret = 0;
 971	else
 972		ret = lret;
 973
 
 
 
 974	nouveau_bo_sync_for_cpu(nvbo);
 975	drm_gem_object_put(gem);
 976
 977	return ret;
 978}
 979
 980int
 981nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 982			   struct drm_file *file_priv)
 983{
 984	struct drm_nouveau_gem_cpu_fini *req = data;
 985	struct drm_gem_object *gem;
 986	struct nouveau_bo *nvbo;
 987
 988	gem = drm_gem_object_lookup(file_priv, req->handle);
 989	if (!gem)
 990		return -ENOENT;
 991	nvbo = nouveau_gem_object(gem);
 992
 993	nouveau_bo_sync_for_device(nvbo);
 994	drm_gem_object_put(gem);
 995	return 0;
 996}
 997
 998int
 999nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1000		       struct drm_file *file_priv)
1001{
1002	struct drm_nouveau_gem_info *req = data;
1003	struct drm_gem_object *gem;
1004	int ret;
1005
1006	gem = drm_gem_object_lookup(file_priv, req->handle);
1007	if (!gem)
1008		return -ENOENT;
1009
1010	ret = nouveau_gem_info(file_priv, gem, req);
1011	drm_gem_object_put(gem);
1012	return ret;
1013}
1014