Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 2008 Ben Skeggs.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26
 27#include "nouveau_drm.h"
 
 
 28#include "nouveau_dma.h"
 29#include "nouveau_fence.h"
 30#include "nouveau_abi16.h"
 31
 32#include "nouveau_ttm.h"
 33#include "nouveau_gem.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35void
 36nouveau_gem_object_del(struct drm_gem_object *gem)
 37{
 38	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 39	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 40	struct ttm_buffer_object *bo = &nvbo->bo;
 41	struct device *dev = drm->dev->dev;
 42	int ret;
 43
 44	ret = pm_runtime_get_sync(dev);
 45	if (WARN_ON(ret < 0 && ret != -EACCES))
 
 46		return;
 
 47
 48	if (gem->import_attach)
 49		drm_prime_gem_destroy(gem, nvbo->bo.sg);
 50
 51	drm_gem_object_release(gem);
 52
 53	/* reset filp so nouveau_bo_del_ttm() can test for it */
 54	gem->filp = NULL;
 55	ttm_bo_unref(&bo);
 56
 57	pm_runtime_mark_last_busy(dev);
 58	pm_runtime_put_autosuspend(dev);
 59}
 60
 61int
 62nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 63{
 64	struct nouveau_cli *cli = nouveau_cli(file_priv);
 65	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 66	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 67	struct nvkm_vma *vma;
 68	struct device *dev = drm->dev->dev;
 
 
 
 69	int ret;
 70
 71	if (!cli->vm)
 72		return 0;
 73
 74	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
 
 
 
 
 75	if (ret)
 76		return ret;
 77
 78	vma = nouveau_bo_vma_find(nvbo, cli->vm);
 79	if (!vma) {
 80		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 81		if (!vma) {
 82			ret = -ENOMEM;
 83			goto out;
 84		}
 85
 86		ret = pm_runtime_get_sync(dev);
 87		if (ret < 0 && ret != -EACCES) {
 88			kfree(vma);
 89			goto out;
 90		}
 91
 92		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
 93		if (ret)
 94			kfree(vma);
 95
 96		pm_runtime_mark_last_busy(dev);
 97		pm_runtime_put_autosuspend(dev);
 98	} else {
 99		vma->refcount++;
100	}
101
 
 
 
 
 
 
 
102out:
103	ttm_bo_unreserve(&nvbo->bo);
104	return ret;
105}
106
 
 
 
 
 
107static void
108nouveau_gem_object_delete(void *data)
109{
110	struct nvkm_vma *vma = data;
111	nvkm_vm_unmap(vma);
112	nvkm_vm_put(vma);
113	kfree(vma);
114}
115
116static void
117nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
118{
119	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
120	struct reservation_object *resv = nvbo->bo.resv;
121	struct reservation_object_list *fobj;
122	struct fence *fence = NULL;
123
124	fobj = reservation_object_get_list(resv);
125
126	list_del(&vma->head);
127
128	if (fobj && fobj->shared_count > 1)
129		ttm_bo_wait(&nvbo->bo, true, false, false);
130	else if (fobj && fobj->shared_count == 1)
131		fence = rcu_dereference_protected(fobj->shared[0],
132						reservation_object_held(resv));
133	else
134		fence = reservation_object_get_excl(nvbo->bo.resv);
135
136	if (fence && mapped) {
137		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
138	} else {
139		if (mapped)
140			nvkm_vm_unmap(vma);
141		nvkm_vm_put(vma);
142		kfree(vma);
 
 
 
 
143	}
 
 
 
 
 
 
 
 
 
 
144}
145
146void
147nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
148{
149	struct nouveau_cli *cli = nouveau_cli(file_priv);
150	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
151	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
152	struct device *dev = drm->dev->dev;
153	struct nvkm_vma *vma;
 
154	int ret;
155
156	if (!cli->vm)
157		return;
158
159	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
 
 
 
160	if (ret)
161		return;
162
163	vma = nouveau_bo_vma_find(nvbo, cli->vm);
164	if (vma) {
165		if (--vma->refcount == 0) {
166			ret = pm_runtime_get_sync(dev);
167			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
168				nouveau_gem_object_unmap(nvbo, vma);
169				pm_runtime_mark_last_busy(dev);
170				pm_runtime_put_autosuspend(dev);
171			}
 
172		}
173	}
174	ttm_bo_unreserve(&nvbo->bo);
175}
176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177int
178nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
179		uint32_t tile_mode, uint32_t tile_flags,
180		struct nouveau_bo **pnvbo)
181{
182	struct nouveau_drm *drm = nouveau_drm(dev);
 
 
183	struct nouveau_bo *nvbo;
184	u32 flags = 0;
185	int ret;
186
187	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
188		flags |= TTM_PL_FLAG_VRAM;
189	if (domain & NOUVEAU_GEM_DOMAIN_GART)
190		flags |= TTM_PL_FLAG_TT;
191	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
192		flags |= TTM_PL_FLAG_SYSTEM;
 
 
 
 
 
 
 
 
193
194	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
195		flags |= TTM_PL_FLAG_UNCACHED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
197	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
198			     tile_flags, NULL, NULL, pnvbo);
199	if (ret)
200		return ret;
201	nvbo = *pnvbo;
202
203	/* we restrict allowed domains on nv50+ to only the types
204	 * that were requested at creation time.  not possibly on
205	 * earlier chips without busting the ABI.
206	 */
207	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
208			      NOUVEAU_GEM_DOMAIN_GART;
209	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
210		nvbo->valid_domains &= domain;
211
212	/* Initialize the embedded gem-object. We return a single gem-reference
213	 * to the caller, instead of a normal nouveau_bo ttm reference. */
214	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
215	if (ret) {
216		nouveau_bo_ref(NULL, pnvbo);
217		return -ENOMEM;
218	}
219
220	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
221	return 0;
222}
223
224static int
225nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
226		 struct drm_nouveau_gem_info *rep)
227{
228	struct nouveau_cli *cli = nouveau_cli(file_priv);
229	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
230	struct nvkm_vma *vma;
 
231
232	if (is_power_of_2(nvbo->valid_domains))
233		rep->domain = nvbo->valid_domains;
234	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
235		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
236	else
237		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
238	rep->offset = nvbo->bo.offset;
239	if (cli->vm) {
240		vma = nouveau_bo_vma_find(nvbo, cli->vm);
 
241		if (!vma)
242			return -EINVAL;
243
244		rep->offset = vma->offset;
245	}
 
246
247	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
248	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
249	rep->tile_mode = nvbo->tile_mode;
250	rep->tile_flags = nvbo->tile_flags;
 
 
 
 
 
 
 
251	return 0;
252}
253
254int
255nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
256		      struct drm_file *file_priv)
257{
258	struct nouveau_drm *drm = nouveau_drm(dev);
259	struct nouveau_cli *cli = nouveau_cli(file_priv);
260	struct nvkm_fb *fb = nvxx_fb(&drm->device);
261	struct drm_nouveau_gem_new *req = data;
262	struct nouveau_bo *nvbo = NULL;
263	int ret = 0;
264
265	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
266		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
267		return -EINVAL;
268	}
269
270	ret = nouveau_gem_new(dev, req->info.size, req->align,
271			      req->info.domain, req->info.tile_mode,
272			      req->info.tile_flags, &nvbo);
273	if (ret)
274		return ret;
275
276	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
 
277	if (ret == 0) {
278		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
279		if (ret)
280			drm_gem_handle_delete(file_priv, req->info.handle);
281	}
282
283	/* drop reference from allocate - handle holds it now */
284	drm_gem_object_unreference_unlocked(&nvbo->gem);
285	return ret;
286}
287
288static int
289nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
290		       uint32_t write_domains, uint32_t valid_domains)
291{
292	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
293	struct ttm_buffer_object *bo = &nvbo->bo;
294	uint32_t domains = valid_domains & nvbo->valid_domains &
295		(write_domains ? write_domains : read_domains);
296	uint32_t pref_flags = 0, valid_flags = 0;
297
298	if (!domains)
299		return -EINVAL;
300
301	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
302		valid_flags |= TTM_PL_FLAG_VRAM;
303
304	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
305		valid_flags |= TTM_PL_FLAG_TT;
306
307	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
308	    bo->mem.mem_type == TTM_PL_VRAM)
309		pref_flags |= TTM_PL_FLAG_VRAM;
310
311	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
312		 bo->mem.mem_type == TTM_PL_TT)
313		pref_flags |= TTM_PL_FLAG_TT;
314
315	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
316		pref_flags |= TTM_PL_FLAG_VRAM;
317
318	else
319		pref_flags |= TTM_PL_FLAG_TT;
320
321	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
322
323	return 0;
324}
325
326struct validate_op {
327	struct list_head list;
328	struct ww_acquire_ctx ticket;
329};
330
331static void
332validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
 
333			struct drm_nouveau_gem_pushbuf_bo *pbbo)
334{
335	struct nouveau_bo *nvbo;
336	struct drm_nouveau_gem_pushbuf_bo *b;
337
338	while (!list_empty(&op->list)) {
339		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
340		b = &pbbo[nvbo->pbbo_index];
341
342		if (likely(fence))
343			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
344
 
 
 
 
 
 
 
 
 
345		if (unlikely(nvbo->validate_mapped)) {
346			ttm_bo_kunmap(&nvbo->kmap);
347			nvbo->validate_mapped = false;
348		}
349
350		list_del(&nvbo->entry);
351		nvbo->reserved_by = NULL;
352		ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
353		drm_gem_object_unreference_unlocked(&nvbo->gem);
354	}
355}
356
357static void
358validate_fini(struct validate_op *op, struct nouveau_fence *fence,
 
359	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
360{
361	validate_fini_no_ticket(op, fence, pbbo);
362	ww_acquire_fini(&op->ticket);
363}
364
365static int
366validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
367	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
368	      int nr_buffers, struct validate_op *op)
369{
370	struct nouveau_cli *cli = nouveau_cli(file_priv);
371	struct drm_device *dev = chan->drm->dev;
372	int trycnt = 0;
373	int ret, i;
374	struct nouveau_bo *res_bo = NULL;
375	LIST_HEAD(gart_list);
376	LIST_HEAD(vram_list);
377	LIST_HEAD(both_list);
378
379	ww_acquire_init(&op->ticket, &reservation_ww_class);
380retry:
381	if (++trycnt > 100000) {
382		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
383		return -EINVAL;
384	}
385
386	for (i = 0; i < nr_buffers; i++) {
387		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
388		struct drm_gem_object *gem;
389		struct nouveau_bo *nvbo;
390
391		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
392		if (!gem) {
393			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
394			ret = -ENOENT;
395			break;
396		}
397		nvbo = nouveau_gem_object(gem);
398		if (nvbo == res_bo) {
399			res_bo = NULL;
400			drm_gem_object_unreference_unlocked(gem);
401			continue;
402		}
403
404		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
405			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
406				      "validation list\n", b->handle);
407			drm_gem_object_unreference_unlocked(gem);
408			ret = -EINVAL;
409			break;
410		}
411
412		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
413		if (ret) {
414			list_splice_tail_init(&vram_list, &op->list);
415			list_splice_tail_init(&gart_list, &op->list);
416			list_splice_tail_init(&both_list, &op->list);
417			validate_fini_no_ticket(op, NULL, NULL);
418			if (unlikely(ret == -EDEADLK)) {
419				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
420							      &op->ticket);
421				if (!ret)
422					res_bo = nvbo;
423			}
424			if (unlikely(ret)) {
425				if (ret != -ERESTARTSYS)
426					NV_PRINTK(err, cli, "fail reserve\n");
427				break;
428			}
429		}
430
431		b->user_priv = (uint64_t)(unsigned long)nvbo;
 
 
 
 
 
 
 
 
 
 
 
 
 
432		nvbo->reserved_by = file_priv;
433		nvbo->pbbo_index = i;
434		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
435		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
436			list_add_tail(&nvbo->entry, &both_list);
437		else
438		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
439			list_add_tail(&nvbo->entry, &vram_list);
440		else
441		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
442			list_add_tail(&nvbo->entry, &gart_list);
443		else {
444			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
445				 b->valid_domains);
446			list_add_tail(&nvbo->entry, &both_list);
447			ret = -EINVAL;
448			break;
449		}
450		if (nvbo == res_bo)
451			goto retry;
452	}
453
454	ww_acquire_done(&op->ticket);
455	list_splice_tail(&vram_list, &op->list);
456	list_splice_tail(&gart_list, &op->list);
457	list_splice_tail(&both_list, &op->list);
458	if (ret)
459		validate_fini(op, NULL, NULL);
460	return ret;
461
462}
463
464static int
465validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
466	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
467	      uint64_t user_pbbo_ptr)
468{
469	struct nouveau_drm *drm = chan->drm;
470	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
471				(void __force __user *)(uintptr_t)user_pbbo_ptr;
472	struct nouveau_bo *nvbo;
473	int ret, relocs = 0;
474
475	list_for_each_entry(nvbo, list, entry) {
476		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
477
478		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
479					     b->write_domains,
480					     b->valid_domains);
481		if (unlikely(ret)) {
482			NV_PRINTK(err, cli, "fail set_domain\n");
483			return ret;
484		}
485
486		ret = nouveau_bo_validate(nvbo, true, false);
487		if (unlikely(ret)) {
488			if (ret != -ERESTARTSYS)
489				NV_PRINTK(err, cli, "fail ttm_validate\n");
490			return ret;
491		}
492
493		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
494		if (unlikely(ret)) {
495			if (ret != -ERESTARTSYS)
496				NV_PRINTK(err, cli, "fail post-validate sync\n");
497			return ret;
498		}
499
500		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
501			if (nvbo->bo.offset == b->presumed.offset &&
502			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
503			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
504			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
505			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
506				continue;
507
508			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
509				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
510			else
511				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
512			b->presumed.offset = nvbo->bo.offset;
513			b->presumed.valid = 0;
514			relocs++;
515
516			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
517					     &b->presumed, sizeof(b->presumed)))
518				return -EFAULT;
519		}
520	}
521
522	return relocs;
523}
524
525static int
526nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
527			     struct drm_file *file_priv,
528			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
529			     uint64_t user_buffers, int nr_buffers,
530			     struct validate_op *op, int *apply_relocs)
531{
532	struct nouveau_cli *cli = nouveau_cli(file_priv);
533	int ret;
534
535	INIT_LIST_HEAD(&op->list);
536
537	if (nr_buffers == 0)
538		return 0;
539
540	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
541	if (unlikely(ret)) {
542		if (ret != -ERESTARTSYS)
543			NV_PRINTK(err, cli, "validate_init\n");
544		return ret;
545	}
546
547	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
548	if (unlikely(ret < 0)) {
549		if (ret != -ERESTARTSYS)
550			NV_PRINTK(err, cli, "validating bo list\n");
551		validate_fini(op, NULL, NULL);
552		return ret;
 
 
553	}
554	*apply_relocs = ret;
555	return 0;
556}
557
558static inline void
559u_free(void *addr)
560{
561	kvfree(addr);
562}
563
564static inline void *
565u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
566{
567	void *mem;
568	void __user *userptr = (void __force __user *)(uintptr_t)user;
569
570	size *= nmemb;
571
572	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
573	if (!mem)
574		mem = vmalloc(size);
575	if (!mem)
576		return ERR_PTR(-ENOMEM);
577
578	if (copy_from_user(mem, userptr, size)) {
579		u_free(mem);
580		return ERR_PTR(-EFAULT);
581	}
582
583	return mem;
584}
585
586static int
587nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
588				struct drm_nouveau_gem_pushbuf *req,
 
589				struct drm_nouveau_gem_pushbuf_bo *bo)
590{
591	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
592	int ret = 0;
593	unsigned i;
594
595	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
596	if (IS_ERR(reloc))
597		return PTR_ERR(reloc);
598
599	for (i = 0; i < req->nr_relocs; i++) {
600		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
601		struct drm_nouveau_gem_pushbuf_bo *b;
602		struct nouveau_bo *nvbo;
603		uint32_t data;
 
604
605		if (unlikely(r->bo_index > req->nr_buffers)) {
606			NV_PRINTK(err, cli, "reloc bo index invalid\n");
607			ret = -EINVAL;
608			break;
609		}
610
611		b = &bo[r->bo_index];
612		if (b->presumed.valid)
613			continue;
614
615		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
616			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
617			ret = -EINVAL;
618			break;
619		}
620		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
621
622		if (unlikely(r->reloc_bo_offset + 4 >
623			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
624			NV_PRINTK(err, cli, "reloc outside of bo\n");
625			ret = -EINVAL;
626			break;
627		}
628
629		if (!nvbo->kmap.virtual) {
630			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
631					  &nvbo->kmap);
632			if (ret) {
633				NV_PRINTK(err, cli, "failed kmap for reloc\n");
634				break;
635			}
636			nvbo->validate_mapped = true;
637		}
638
639		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
640			data = b->presumed.offset + r->data;
641		else
642		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
643			data = (b->presumed.offset + r->data) >> 32;
644		else
645			data = r->data;
646
647		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
648			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
649				data |= r->tor;
650			else
651				data |= r->vor;
652		}
653
654		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
 
 
 
 
 
 
 
 
 
655		if (ret) {
656			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 
657			break;
658		}
659
660		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
661	}
662
663	u_free(reloc);
664	return ret;
665}
666
667int
668nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
669			  struct drm_file *file_priv)
670{
671	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
672	struct nouveau_cli *cli = nouveau_cli(file_priv);
673	struct nouveau_abi16_chan *temp;
674	struct nouveau_drm *drm = nouveau_drm(dev);
675	struct drm_nouveau_gem_pushbuf *req = data;
676	struct drm_nouveau_gem_pushbuf_push *push;
 
677	struct drm_nouveau_gem_pushbuf_bo *bo;
678	struct nouveau_channel *chan = NULL;
679	struct validate_op op;
680	struct nouveau_fence *fence = NULL;
681	int i, j, ret = 0, do_reloc = 0;
 
682
683	if (unlikely(!abi16))
684		return -ENOMEM;
685
 
 
 
686	list_for_each_entry(temp, &abi16->channels, head) {
687		if (temp->chan->chid == req->channel) {
688			chan = temp->chan;
689			break;
690		}
691	}
692
693	if (!chan)
694		return nouveau_abi16_put(abi16, -ENOENT);
 
 
 
 
695
696	req->vram_available = drm->gem.vram_available;
697	req->gart_available = drm->gem.gart_available;
698	if (unlikely(req->nr_push == 0))
699		goto out_next;
700
701	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
702		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
703			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
704		return nouveau_abi16_put(abi16, -EINVAL);
705	}
706
707	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
708		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
709			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
710		return nouveau_abi16_put(abi16, -EINVAL);
711	}
712
713	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
714		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
715			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
716		return nouveau_abi16_put(abi16, -EINVAL);
717	}
718
719	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
720	if (IS_ERR(push))
721		return nouveau_abi16_put(abi16, PTR_ERR(push));
722
723	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
724	if (IS_ERR(bo)) {
725		u_free(push);
726		return nouveau_abi16_put(abi16, PTR_ERR(bo));
727	}
728
729	/* Ensure all push buffers are on validate list */
730	for (i = 0; i < req->nr_push; i++) {
731		if (push[i].bo_index >= req->nr_buffers) {
732			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
733			ret = -EINVAL;
734			goto out_prevalid;
735		}
736	}
737
738	/* Validate buffer list */
739	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 
740					   req->nr_buffers, &op, &do_reloc);
741	if (ret) {
742		if (ret != -ERESTARTSYS)
743			NV_PRINTK(err, cli, "validate: %d\n", ret);
744		goto out_prevalid;
745	}
746
747	/* Apply any relocations that are required */
748	if (do_reloc) {
749		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
 
 
 
 
 
 
 
 
 
 
 
750		if (ret) {
751			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
752			goto out;
753		}
754	}
755
756	if (chan->dma.ib_max) {
757		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
758		if (ret) {
759			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
760			goto out;
761		}
762
763		for (i = 0; i < req->nr_push; i++) {
764			struct nouveau_bo *nvbo = (void *)(unsigned long)
765				bo[push[i].bo_index].user_priv;
 
 
 
766
767			nv50_dma_push(chan, nvbo, push[i].offset,
768				      push[i].length);
769		}
770	} else
771	if (drm->device.info.chipset >= 0x25) {
772		ret = RING_SPACE(chan, req->nr_push * 2);
773		if (ret) {
774			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
775			goto out;
776		}
777
778		for (i = 0; i < req->nr_push; i++) {
779			struct nouveau_bo *nvbo = (void *)(unsigned long)
780				bo[push[i].bo_index].user_priv;
781
782			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
783			OUT_RING(chan, 0);
784		}
785	} else {
786		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
787		if (ret) {
788			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
789			goto out;
790		}
791
792		for (i = 0; i < req->nr_push; i++) {
793			struct nouveau_bo *nvbo = (void *)(unsigned long)
794				bo[push[i].bo_index].user_priv;
795			uint32_t cmd;
796
797			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
798			cmd |= 0x20000000;
799			if (unlikely(cmd != req->suffix0)) {
800				if (!nvbo->kmap.virtual) {
801					ret = ttm_bo_kmap(&nvbo->bo, 0,
802							  nvbo->bo.mem.
803							  num_pages,
804							  &nvbo->kmap);
805					if (ret) {
806						WIND_RING(chan);
807						goto out;
808					}
809					nvbo->validate_mapped = true;
810				}
811
812				nouveau_bo_wr32(nvbo, (push[i].offset +
813						push[i].length - 8) / 4, cmd);
814			}
815
816			OUT_RING(chan, 0x20000000 |
817				      (nvbo->bo.offset + push[i].offset));
818			OUT_RING(chan, 0);
819			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
820				OUT_RING(chan, 0);
821		}
822	}
823
824	ret = nouveau_fence_new(chan, false, &fence);
825	if (ret) {
826		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
827		WIND_RING(chan);
828		goto out;
829	}
830
 
 
 
 
 
 
 
831out:
832	validate_fini(&op, fence, bo);
833	nouveau_fence_unref(&fence);
834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
835out_prevalid:
 
 
836	u_free(bo);
837	u_free(push);
838
839out_next:
840	if (chan->dma.ib_max) {
841		req->suffix0 = 0x00000000;
842		req->suffix1 = 0x00000000;
843	} else
844	if (drm->device.info.chipset >= 0x25) {
845		req->suffix0 = 0x00020000;
846		req->suffix1 = 0x00000000;
847	} else {
848		req->suffix0 = 0x20000000 |
849			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
850		req->suffix1 = 0x00000000;
851	}
852
853	return nouveau_abi16_put(abi16, ret);
854}
855
856int
857nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
858			   struct drm_file *file_priv)
859{
860	struct drm_nouveau_gem_cpu_prep *req = data;
861	struct drm_gem_object *gem;
862	struct nouveau_bo *nvbo;
863	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
864	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 
865	int ret;
866
867	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
868	if (!gem)
869		return -ENOENT;
870	nvbo = nouveau_gem_object(gem);
871
872	if (no_wait)
873		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
874	else {
875		long lret;
 
 
 
 
 
876
877		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
878		if (!lret)
879			ret = -EBUSY;
880		else if (lret > 0)
881			ret = 0;
882		else
883			ret = lret;
884	}
885	nouveau_bo_sync_for_cpu(nvbo);
886	drm_gem_object_unreference_unlocked(gem);
887
888	return ret;
889}
890
891int
892nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
893			   struct drm_file *file_priv)
894{
895	struct drm_nouveau_gem_cpu_fini *req = data;
896	struct drm_gem_object *gem;
897	struct nouveau_bo *nvbo;
898
899	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
900	if (!gem)
901		return -ENOENT;
902	nvbo = nouveau_gem_object(gem);
903
904	nouveau_bo_sync_for_device(nvbo);
905	drm_gem_object_unreference_unlocked(gem);
906	return 0;
907}
908
909int
910nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
911		       struct drm_file *file_priv)
912{
913	struct drm_nouveau_gem_info *req = data;
914	struct drm_gem_object *gem;
915	int ret;
916
917	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
918	if (!gem)
919		return -ENOENT;
920
921	ret = nouveau_gem_info(file_priv, gem, req);
922	drm_gem_object_unreference_unlocked(gem);
923	return ret;
924}
925
v6.13.7
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <drm/drm_gem_ttm_helper.h>
  28
  29#include "nouveau_drv.h"
  30#include "nouveau_dma.h"
  31#include "nouveau_fence.h"
  32#include "nouveau_abi16.h"
  33
  34#include "nouveau_ttm.h"
  35#include "nouveau_gem.h"
  36#include "nouveau_mem.h"
  37#include "nouveau_vmm.h"
  38
  39#include <nvif/class.h>
  40#include <nvif/push206e.h>
  41
  42static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
  43{
  44	struct vm_area_struct *vma = vmf->vma;
  45	struct ttm_buffer_object *bo = vma->vm_private_data;
  46	pgprot_t prot;
  47	vm_fault_t ret;
  48
  49	ret = ttm_bo_vm_reserve(bo, vmf);
  50	if (ret)
  51		return ret;
  52
  53	ret = nouveau_ttm_fault_reserve_notify(bo);
  54	if (ret)
  55		goto error_unlock;
  56
  57	nouveau_bo_del_io_reserve_lru(bo);
  58	prot = vm_get_page_prot(vma->vm_flags);
  59	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
  60	nouveau_bo_add_io_reserve_lru(bo);
  61	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  62		return ret;
  63
  64error_unlock:
  65	dma_resv_unlock(bo->base.resv);
  66	return ret;
  67}
  68
  69static const struct vm_operations_struct nouveau_ttm_vm_ops = {
  70	.fault = nouveau_ttm_fault,
  71	.open = ttm_bo_vm_open,
  72	.close = ttm_bo_vm_close,
  73	.access = ttm_bo_vm_access
  74};
  75
  76void
  77nouveau_gem_object_del(struct drm_gem_object *gem)
  78{
  79	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  80	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 
  81	struct device *dev = drm->dev->dev;
  82	int ret;
  83
  84	ret = pm_runtime_get_sync(dev);
  85	if (WARN_ON(ret < 0 && ret != -EACCES)) {
  86		pm_runtime_put_autosuspend(dev);
  87		return;
  88	}
  89
  90	if (gem->import_attach)
  91		drm_prime_gem_destroy(gem, nvbo->bo.sg);
  92
  93	ttm_bo_put(&nvbo->bo);
 
 
 
 
  94
  95	pm_runtime_mark_last_busy(dev);
  96	pm_runtime_put_autosuspend(dev);
  97}
  98
  99int
 100nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 101{
 102	struct nouveau_cli *cli = nouveau_cli(file_priv);
 103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 104	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 
 105	struct device *dev = drm->dev->dev;
 106	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
 107	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 108	struct nouveau_vma *vma;
 109	int ret;
 110
 111	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 112		return 0;
 113
 114	if (nvbo->no_share && uvmm &&
 115	    drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
 116		return -EPERM;
 117
 118	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 119	if (ret)
 120		return ret;
 121
 122	ret = pm_runtime_get_sync(dev);
 123	if (ret < 0 && ret != -EACCES) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 124		pm_runtime_put_autosuspend(dev);
 125		goto out;
 
 126	}
 127
 128	/* only create a VMA on binding */
 129	if (!nouveau_cli_uvmm(cli))
 130		ret = nouveau_vma_new(nvbo, vmm, &vma);
 131	else
 132		ret = 0;
 133	pm_runtime_mark_last_busy(dev);
 134	pm_runtime_put_autosuspend(dev);
 135out:
 136	ttm_bo_unreserve(&nvbo->bo);
 137	return ret;
 138}
 139
 140struct nouveau_gem_object_unmap {
 141	struct nouveau_cli_work work;
 142	struct nouveau_vma *vma;
 143};
 144
 145static void
 146nouveau_gem_object_delete(struct nouveau_vma *vma)
 147{
 148	nouveau_fence_unref(&vma->fence);
 149	nouveau_vma_del(&vma);
 
 
 150}
 151
 152static void
 153nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
 154{
 155	struct nouveau_gem_object_unmap *work =
 156		container_of(w, typeof(*work), work);
 157	nouveau_gem_object_delete(work->vma);
 158	kfree(work);
 159}
 
 
 
 
 
 
 
 
 
 
 
 160
 161static void
 162nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 163{
 164	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
 165	struct nouveau_gem_object_unmap *work;
 166
 167	list_del_init(&vma->head);
 168
 169	if (!fence) {
 170		nouveau_gem_object_delete(vma);
 171		return;
 172	}
 173
 174	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
 175		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
 176		nouveau_gem_object_delete(vma);
 177		return;
 178	}
 179
 180	work->work.func = nouveau_gem_object_delete_work;
 181	work->vma = vma;
 182	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 183}
 184
 185void
 186nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 187{
 188	struct nouveau_cli *cli = nouveau_cli(file_priv);
 189	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 190	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 191	struct device *dev = drm->dev->dev;
 192	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 193	struct nouveau_vma *vma;
 194	int ret;
 195
 196	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 197		return;
 198
 199	if (nouveau_cli_uvmm(cli))
 200		return;
 201
 202	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 203	if (ret)
 204		return;
 205
 206	vma = nouveau_vma_find(nvbo, vmm);
 207	if (vma) {
 208		if (--vma->refs == 0) {
 209			ret = pm_runtime_get_sync(dev);
 210			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 211				nouveau_gem_object_unmap(nvbo, vma);
 212				pm_runtime_mark_last_busy(dev);
 
 213			}
 214			pm_runtime_put_autosuspend(dev);
 215		}
 216	}
 217	ttm_bo_unreserve(&nvbo->bo);
 218}
 219
 220const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
 221	.free = nouveau_gem_object_del,
 222	.open = nouveau_gem_object_open,
 223	.close = nouveau_gem_object_close,
 224	.export = nouveau_gem_prime_export,
 225	.pin = nouveau_gem_prime_pin,
 226	.unpin = nouveau_gem_prime_unpin,
 227	.get_sg_table = nouveau_gem_prime_get_sg_table,
 228	.vmap = drm_gem_ttm_vmap,
 229	.vunmap = drm_gem_ttm_vunmap,
 230	.mmap = drm_gem_ttm_mmap,
 231	.vm_ops = &nouveau_ttm_vm_ops,
 232};
 233
 234int
 235nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 236		uint32_t tile_mode, uint32_t tile_flags,
 237		struct nouveau_bo **pnvbo)
 238{
 239	struct nouveau_drm *drm = cli->drm;
 240	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
 241	struct dma_resv *resv = NULL;
 242	struct nouveau_bo *nvbo;
 
 243	int ret;
 244
 245	if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
 246		if (unlikely(!uvmm))
 247			return -EINVAL;
 248
 249		resv = drm_gpuvm_resv(&uvmm->base);
 250	}
 251
 252	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
 253		domain |= NOUVEAU_GEM_DOMAIN_CPU;
 254
 255	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
 256				tile_flags, false);
 257	if (IS_ERR(nvbo))
 258		return PTR_ERR(nvbo);
 259
 260	nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
 261	nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
 262
 263	/* Initialize the embedded gem-object. We return a single gem-reference
 264	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 265	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
 266	if (ret) {
 267		drm_gem_object_release(&nvbo->bo.base);
 268		kfree(nvbo);
 269		return ret;
 270	}
 271
 272	if (resv)
 273		dma_resv_lock(resv, NULL);
 274
 275	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
 276
 277	if (resv)
 278		dma_resv_unlock(resv);
 279
 
 
 280	if (ret)
 281		return ret;
 
 282
 283	/* we restrict allowed domains on nv50+ to only the types
 284	 * that were requested at creation time.  not possibly on
 285	 * earlier chips without busting the ABI.
 286	 */
 287	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 288			      NOUVEAU_GEM_DOMAIN_GART;
 289	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 290		nvbo->valid_domains &= domain;
 291
 292	if (nvbo->no_share) {
 293		nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
 294		drm_gem_object_get(nvbo->r_obj);
 
 
 
 295	}
 296
 297	*pnvbo = nvbo;
 298	return 0;
 299}
 300
 301static int
 302nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 303		 struct drm_nouveau_gem_info *rep)
 304{
 305	struct nouveau_cli *cli = nouveau_cli(file_priv);
 306	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 307	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 308	struct nouveau_vma *vma;
 309
 310	if (is_power_of_2(nvbo->valid_domains))
 311		rep->domain = nvbo->valid_domains;
 312	else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 313		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 314	else
 315		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 316	rep->offset = nvbo->offset;
 317	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
 318	    !nouveau_cli_uvmm(cli)) {
 319		vma = nouveau_vma_find(nvbo, vmm);
 320		if (!vma)
 321			return -EINVAL;
 322
 323		rep->offset = vma->addr;
 324	} else
 325		rep->offset = 0;
 326
 327	rep->size = nvbo->bo.base.size;
 328	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
 329	rep->tile_mode = nvbo->mode;
 330	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
 331	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
 332		rep->tile_flags |= nvbo->kind << 8;
 333	else
 334	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 335		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
 336	else
 337		rep->tile_flags |= nvbo->zeta;
 338	return 0;
 339}
 340
 341int
 342nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 343		      struct drm_file *file_priv)
 344{
 
 345	struct nouveau_cli *cli = nouveau_cli(file_priv);
 
 346	struct drm_nouveau_gem_new *req = data;
 347	struct nouveau_bo *nvbo = NULL;
 348	int ret = 0;
 349
 350	/* If uvmm wasn't initialized until now disable it completely to prevent
 351	 * userspace from mixing up UAPIs.
 352	 */
 353	nouveau_cli_disable_uvmm_noinit(cli);
 354
 355	ret = nouveau_gem_new(cli, req->info.size, req->align,
 356			      req->info.domain, req->info.tile_mode,
 357			      req->info.tile_flags, &nvbo);
 358	if (ret)
 359		return ret;
 360
 361	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
 362				    &req->info.handle);
 363	if (ret == 0) {
 364		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
 365		if (ret)
 366			drm_gem_handle_delete(file_priv, req->info.handle);
 367	}
 368
 369	/* drop reference from allocate - handle holds it now */
 370	drm_gem_object_put(&nvbo->bo.base);
 371	return ret;
 372}
 373
 374static int
 375nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 376		       uint32_t write_domains, uint32_t valid_domains)
 377{
 378	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 379	struct ttm_buffer_object *bo = &nvbo->bo;
 380	uint32_t domains = valid_domains & nvbo->valid_domains &
 381		(write_domains ? write_domains : read_domains);
 382	uint32_t pref_domains = 0;
 383
 384	if (!domains)
 385		return -EINVAL;
 386
 387	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 
 
 
 
 388
 389	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 390	    bo->resource->mem_type == TTM_PL_VRAM)
 391		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 392
 393	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 394		 bo->resource->mem_type == TTM_PL_TT)
 395		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 396
 397	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 398		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 399
 400	else
 401		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 402
 403	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
 404
 405	return 0;
 406}
 407
 408struct validate_op {
 409	struct list_head list;
 410	struct ww_acquire_ctx ticket;
 411};
 412
 413static void
 414validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
 415			struct nouveau_fence *fence,
 416			struct drm_nouveau_gem_pushbuf_bo *pbbo)
 417{
 418	struct nouveau_bo *nvbo;
 419	struct drm_nouveau_gem_pushbuf_bo *b;
 420
 421	while (!list_empty(&op->list)) {
 422		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
 423		b = &pbbo[nvbo->pbbo_index];
 424
 425		if (likely(fence)) {
 426			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
 427
 428			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 429				struct nouveau_vma *vma =
 430					(void *)(unsigned long)b->user_priv;
 431				nouveau_fence_unref(&vma->fence);
 432				dma_fence_get(&fence->base);
 433				vma->fence = fence;
 434			}
 435		}
 436
 437		if (unlikely(nvbo->validate_mapped)) {
 438			ttm_bo_kunmap(&nvbo->kmap);
 439			nvbo->validate_mapped = false;
 440		}
 441
 442		list_del(&nvbo->entry);
 443		nvbo->reserved_by = NULL;
 444		ttm_bo_unreserve(&nvbo->bo);
 445		drm_gem_object_put(&nvbo->bo.base);
 446	}
 447}
 448
 449static void
 450validate_fini(struct validate_op *op, struct nouveau_channel *chan,
 451	      struct nouveau_fence *fence,
 452	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
 453{
 454	validate_fini_no_ticket(op, chan, fence, pbbo);
 455	ww_acquire_fini(&op->ticket);
 456}
 457
 458static int
 459validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 460	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
 461	      int nr_buffers, struct validate_op *op)
 462{
 463	struct nouveau_cli *cli = nouveau_cli(file_priv);
 
 464	int trycnt = 0;
 465	int ret = -EINVAL, i;
 466	struct nouveau_bo *res_bo = NULL;
 467	LIST_HEAD(gart_list);
 468	LIST_HEAD(vram_list);
 469	LIST_HEAD(both_list);
 470
 471	ww_acquire_init(&op->ticket, &reservation_ww_class);
 472retry:
 473	if (++trycnt > 100000) {
 474		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 475		return -EINVAL;
 476	}
 477
 478	for (i = 0; i < nr_buffers; i++) {
 479		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 480		struct drm_gem_object *gem;
 481		struct nouveau_bo *nvbo;
 482
 483		gem = drm_gem_object_lookup(file_priv, b->handle);
 484		if (!gem) {
 485			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 486			ret = -ENOENT;
 487			break;
 488		}
 489		nvbo = nouveau_gem_object(gem);
 490		if (nvbo == res_bo) {
 491			res_bo = NULL;
 492			drm_gem_object_put(gem);
 493			continue;
 494		}
 495
 496		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 497			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 498				      "validation list\n", b->handle);
 499			drm_gem_object_put(gem);
 500			ret = -EINVAL;
 501			break;
 502		}
 503
 504		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
 505		if (ret) {
 506			list_splice_tail_init(&vram_list, &op->list);
 507			list_splice_tail_init(&gart_list, &op->list);
 508			list_splice_tail_init(&both_list, &op->list);
 509			validate_fini_no_ticket(op, chan, NULL, NULL);
 510			if (unlikely(ret == -EDEADLK)) {
 511				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
 512							      &op->ticket);
 513				if (!ret)
 514					res_bo = nvbo;
 515			}
 516			if (unlikely(ret)) {
 517				if (ret != -ERESTARTSYS)
 518					NV_PRINTK(err, cli, "fail reserve\n");
 519				break;
 520			}
 521		}
 522
 523		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 524			struct nouveau_vmm *vmm = chan->vmm;
 525			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
 526			if (!vma) {
 527				NV_PRINTK(err, cli, "vma not found!\n");
 528				ret = -EINVAL;
 529				break;
 530			}
 531
 532			b->user_priv = (uint64_t)(unsigned long)vma;
 533		} else {
 534			b->user_priv = (uint64_t)(unsigned long)nvbo;
 535		}
 536
 537		nvbo->reserved_by = file_priv;
 538		nvbo->pbbo_index = i;
 539		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 540		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 541			list_add_tail(&nvbo->entry, &both_list);
 542		else
 543		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 544			list_add_tail(&nvbo->entry, &vram_list);
 545		else
 546		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 547			list_add_tail(&nvbo->entry, &gart_list);
 548		else {
 549			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 550				 b->valid_domains);
 551			list_add_tail(&nvbo->entry, &both_list);
 552			ret = -EINVAL;
 553			break;
 554		}
 555		if (nvbo == res_bo)
 556			goto retry;
 557	}
 558
 559	ww_acquire_done(&op->ticket);
 560	list_splice_tail(&vram_list, &op->list);
 561	list_splice_tail(&gart_list, &op->list);
 562	list_splice_tail(&both_list, &op->list);
 563	if (ret)
 564		validate_fini(op, chan, NULL, NULL);
 565	return ret;
 566
 567}
 568
 569static int
 570validate_list(struct nouveau_channel *chan,
 571	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
 572{
 573	struct nouveau_cli *cli = chan->cli;
 574	struct nouveau_drm *drm = cli->drm;
 
 
 575	struct nouveau_bo *nvbo;
 576	int ret, relocs = 0;
 577
 578	list_for_each_entry(nvbo, list, entry) {
 579		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 580
 581		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
 582					     b->write_domains,
 583					     b->valid_domains);
 584		if (unlikely(ret)) {
 585			NV_PRINTK(err, cli, "fail set_domain\n");
 586			return ret;
 587		}
 588
 589		ret = nouveau_bo_validate(nvbo, true, false);
 590		if (unlikely(ret)) {
 591			if (ret != -ERESTARTSYS)
 592				NV_PRINTK(err, cli, "fail ttm_validate\n");
 593			return ret;
 594		}
 595
 596		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 597		if (unlikely(ret)) {
 598			if (ret != -ERESTARTSYS)
 599				NV_PRINTK(err, cli, "fail post-validate sync\n");
 600			return ret;
 601		}
 602
 603		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 604			if (nvbo->offset == b->presumed.offset &&
 605			    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 606			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 607			     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 608			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 609				continue;
 610
 611			if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 612				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 613			else
 614				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 615			b->presumed.offset = nvbo->offset;
 616			b->presumed.valid = 0;
 617			relocs++;
 
 
 
 
 618		}
 619	}
 620
 621	return relocs;
 622}
 623
 624static int
 625nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 626			     struct drm_file *file_priv,
 627			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
 628			     int nr_buffers,
 629			     struct validate_op *op, bool *apply_relocs)
 630{
 631	struct nouveau_cli *cli = nouveau_cli(file_priv);
 632	int ret;
 633
 634	INIT_LIST_HEAD(&op->list);
 635
 636	if (nr_buffers == 0)
 637		return 0;
 638
 639	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 640	if (unlikely(ret)) {
 641		if (ret != -ERESTARTSYS)
 642			NV_PRINTK(err, cli, "validate_init\n");
 643		return ret;
 644	}
 645
 646	ret = validate_list(chan, &op->list, pbbo);
 647	if (unlikely(ret < 0)) {
 648		if (ret != -ERESTARTSYS)
 649			NV_PRINTK(err, cli, "validating bo list\n");
 650		validate_fini(op, chan, NULL, NULL);
 651		return ret;
 652	} else if (ret > 0) {
 653		*apply_relocs = true;
 654	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655
 656	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 657}
 658
 659static int
 660nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 661				struct drm_nouveau_gem_pushbuf *req,
 662				struct drm_nouveau_gem_pushbuf_reloc *reloc,
 663				struct drm_nouveau_gem_pushbuf_bo *bo)
 664{
 
 665	int ret = 0;
 666	unsigned i;
 667
 
 
 
 
 668	for (i = 0; i < req->nr_relocs; i++) {
 669		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 670		struct drm_nouveau_gem_pushbuf_bo *b;
 671		struct nouveau_bo *nvbo;
 672		uint32_t data;
 673		long lret;
 674
 675		if (unlikely(r->bo_index >= req->nr_buffers)) {
 676			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 677			ret = -EINVAL;
 678			break;
 679		}
 680
 681		b = &bo[r->bo_index];
 682		if (b->presumed.valid)
 683			continue;
 684
 685		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
 686			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 687			ret = -EINVAL;
 688			break;
 689		}
 690		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 691
 692		if (unlikely(r->reloc_bo_offset + 4 >
 693			     nvbo->bo.base.size)) {
 694			NV_PRINTK(err, cli, "reloc outside of bo\n");
 695			ret = -EINVAL;
 696			break;
 697		}
 698
 699		if (!nvbo->kmap.virtual) {
 700			ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
 701					  &nvbo->kmap);
 702			if (ret) {
 703				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 704				break;
 705			}
 706			nvbo->validate_mapped = true;
 707		}
 708
 709		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 710			data = b->presumed.offset + r->data;
 711		else
 712		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 713			data = (b->presumed.offset + r->data) >> 32;
 714		else
 715			data = r->data;
 716
 717		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 718			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 719				data |= r->tor;
 720			else
 721				data |= r->vor;
 722		}
 723
 724		lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
 725					     DMA_RESV_USAGE_BOOKKEEP,
 726					     false, 15 * HZ);
 727		if (!lret)
 728			ret = -EBUSY;
 729		else if (lret > 0)
 730			ret = 0;
 731		else
 732			ret = lret;
 733
 734		if (ret) {
 735			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n",
 736				  ret);
 737			break;
 738		}
 739
 740		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 741	}
 742
 
 743	return ret;
 744}
 745
 746int
 747nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 748			  struct drm_file *file_priv)
 749{
 750	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
 751	struct nouveau_cli *cli = nouveau_cli(file_priv);
 752	struct nouveau_abi16_chan *temp;
 753	struct nouveau_drm *drm = nouveau_drm(dev);
 754	struct drm_nouveau_gem_pushbuf *req = data;
 755	struct drm_nouveau_gem_pushbuf_push *push;
 756	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 757	struct drm_nouveau_gem_pushbuf_bo *bo;
 758	struct nouveau_channel *chan = NULL;
 759	struct validate_op op;
 760	struct nouveau_fence *fence = NULL;
 761	int i, j, ret = 0;
 762	bool do_reloc = false, sync = false;
 763
 764	if (unlikely(!abi16))
 765		return -ENOMEM;
 766
 767	if (unlikely(nouveau_cli_uvmm(cli)))
 768		return nouveau_abi16_put(abi16, -ENOSYS);
 769
 770	list_for_each_entry(temp, &abi16->channels, head) {
 771		if (temp->chan->chid == req->channel) {
 772			chan = temp->chan;
 773			break;
 774		}
 775	}
 776
 777	if (!chan)
 778		return nouveau_abi16_put(abi16, -ENOENT);
 779	if (unlikely(atomic_read(&chan->killed)))
 780		return nouveau_abi16_put(abi16, -ENODEV);
 781
 782	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
 783
 784	req->vram_available = drm->gem.vram_available;
 785	req->gart_available = drm->gem.gart_available;
 786	if (unlikely(req->nr_push == 0))
 787		goto out_next;
 788
 789	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 790		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 791			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 792		return nouveau_abi16_put(abi16, -EINVAL);
 793	}
 794
 795	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 796		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 797			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 798		return nouveau_abi16_put(abi16, -EINVAL);
 799	}
 800
 801	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 802		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 803			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 804		return nouveau_abi16_put(abi16, -EINVAL);
 805	}
 806
 807	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 808	if (IS_ERR(push))
 809		return nouveau_abi16_put(abi16, PTR_ERR(push));
 810
 811	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 812	if (IS_ERR(bo)) {
 813		u_free(push);
 814		return nouveau_abi16_put(abi16, PTR_ERR(bo));
 815	}
 816
 817	/* Ensure all push buffers are on validate list */
 818	for (i = 0; i < req->nr_push; i++) {
 819		if (push[i].bo_index >= req->nr_buffers) {
 820			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 821			ret = -EINVAL;
 822			goto out_prevalid;
 823		}
 824	}
 825
 826	/* Validate buffer list */
 827revalidate:
 828	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
 829					   req->nr_buffers, &op, &do_reloc);
 830	if (ret) {
 831		if (ret != -ERESTARTSYS)
 832			NV_PRINTK(err, cli, "validate: %d\n", ret);
 833		goto out_prevalid;
 834	}
 835
 836	/* Apply any relocations that are required */
 837	if (do_reloc) {
 838		if (!reloc) {
 839			validate_fini(&op, chan, NULL, bo);
 840			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 841			if (IS_ERR(reloc)) {
 842				ret = PTR_ERR(reloc);
 843				goto out_prevalid;
 844			}
 845
 846			goto revalidate;
 847		}
 848
 849		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
 850		if (ret) {
 851			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 852			goto out;
 853		}
 854	}
 855
 856	if (chan->dma.ib_max) {
 857		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 858		if (ret) {
 859			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 860			goto out;
 861		}
 862
 863		for (i = 0; i < req->nr_push; i++) {
 864			struct nouveau_vma *vma = (void *)(unsigned long)
 865				bo[push[i].bo_index].user_priv;
 866			u64 addr = vma->addr + push[i].offset;
 867			u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
 868			bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
 869
 870			nv50_dma_push(chan, addr, length, no_prefetch);
 
 871		}
 872	} else
 873	if (drm->client.device.info.chipset >= 0x25) {
 874		ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
 875		if (ret) {
 876			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 877			goto out;
 878		}
 879
 880		for (i = 0; i < req->nr_push; i++) {
 881			struct nouveau_bo *nvbo = (void *)(unsigned long)
 882				bo[push[i].bo_index].user_priv;
 883
 884			PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset);
 885			PUSH_DATA(&chan->chan.push, 0);
 886		}
 887	} else {
 888		ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 889		if (ret) {
 890			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 891			goto out;
 892		}
 893
 894		for (i = 0; i < req->nr_push; i++) {
 895			struct nouveau_bo *nvbo = (void *)(unsigned long)
 896				bo[push[i].bo_index].user_priv;
 897			uint32_t cmd;
 898
 899			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 900			cmd |= 0x20000000;
 901			if (unlikely(cmd != req->suffix0)) {
 902				if (!nvbo->kmap.virtual) {
 903					ret = ttm_bo_kmap(&nvbo->bo, 0,
 904							  PFN_UP(nvbo->bo.base.size),
 
 905							  &nvbo->kmap);
 906					if (ret) {
 907						WIND_RING(chan);
 908						goto out;
 909					}
 910					nvbo->validate_mapped = true;
 911				}
 912
 913				nouveau_bo_wr32(nvbo, (push[i].offset +
 914						push[i].length - 8) / 4, cmd);
 915			}
 916
 917			PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset);
 918			PUSH_DATA(&chan->chan.push, 0);
 
 919			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 920				PUSH_DATA(&chan->chan.push, 0);
 921		}
 922	}
 923
 924	ret = nouveau_fence_new(&fence, chan);
 925	if (ret) {
 926		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 927		WIND_RING(chan);
 928		goto out;
 929	}
 930
 931	if (sync) {
 932		if (!(ret = nouveau_fence_wait(fence, false, false))) {
 933			if ((ret = dma_fence_get_status(&fence->base)) == 1)
 934				ret = 0;
 935		}
 936	}
 937
 938out:
 939	validate_fini(&op, chan, fence, bo);
 940	nouveau_fence_unref(&fence);
 941
 942	if (do_reloc) {
 943		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 944			u64_to_user_ptr(req->buffers);
 945
 946		for (i = 0; i < req->nr_buffers; i++) {
 947			if (bo[i].presumed.valid)
 948				continue;
 949
 950			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
 951					 sizeof(bo[i].presumed))) {
 952				ret = -EFAULT;
 953				break;
 954			}
 955		}
 956	}
 957out_prevalid:
 958	if (!IS_ERR(reloc))
 959		u_free(reloc);
 960	u_free(bo);
 961	u_free(push);
 962
 963out_next:
 964	if (chan->dma.ib_max) {
 965		req->suffix0 = 0x00000000;
 966		req->suffix1 = 0x00000000;
 967	} else
 968	if (drm->client.device.info.chipset >= 0x25) {
 969		req->suffix0 = 0x00020000;
 970		req->suffix1 = 0x00000000;
 971	} else {
 972		req->suffix0 = 0x20000000 |
 973			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
 974		req->suffix1 = 0x00000000;
 975	}
 976
 977	return nouveau_abi16_put(abi16, ret);
 978}
 979
 980int
 981nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 982			   struct drm_file *file_priv)
 983{
 984	struct drm_nouveau_gem_cpu_prep *req = data;
 985	struct drm_gem_object *gem;
 986	struct nouveau_bo *nvbo;
 987	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 988	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 989	long lret;
 990	int ret;
 991
 992	gem = drm_gem_object_lookup(file_priv, req->handle);
 993	if (!gem)
 994		return -ENOENT;
 995	nvbo = nouveau_gem_object(gem);
 996
 997	lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
 998				     dma_resv_usage_rw(write), true,
 999				     no_wait ? 0 : 30 * HZ);
1000	if (!lret)
1001		ret = -EBUSY;
1002	else if (lret > 0)
1003		ret = 0;
1004	else
1005		ret = lret;
1006
 
 
 
 
 
 
 
 
1007	nouveau_bo_sync_for_cpu(nvbo);
1008	drm_gem_object_put(gem);
1009
1010	return ret;
1011}
1012
1013int
1014nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
1015			   struct drm_file *file_priv)
1016{
1017	struct drm_nouveau_gem_cpu_fini *req = data;
1018	struct drm_gem_object *gem;
1019	struct nouveau_bo *nvbo;
1020
1021	gem = drm_gem_object_lookup(file_priv, req->handle);
1022	if (!gem)
1023		return -ENOENT;
1024	nvbo = nouveau_gem_object(gem);
1025
1026	nouveau_bo_sync_for_device(nvbo);
1027	drm_gem_object_put(gem);
1028	return 0;
1029}
1030
1031int
1032nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1033		       struct drm_file *file_priv)
1034{
1035	struct drm_nouveau_gem_info *req = data;
1036	struct drm_gem_object *gem;
1037	int ret;
1038
1039	gem = drm_gem_object_lookup(file_priv, req->handle);
1040	if (!gem)
1041		return -ENOENT;
1042
1043	ret = nouveau_gem_info(file_priv, gem, req);
1044	drm_gem_object_put(gem);
1045	return ret;
1046}
1047