Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright (C) 2008 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <drm/drm_gem_ttm_helper.h>
  28
  29#include "nouveau_drv.h"
  30#include "nouveau_dma.h"
  31#include "nouveau_fence.h"
  32#include "nouveau_abi16.h"
  33
  34#include "nouveau_ttm.h"
  35#include "nouveau_gem.h"
  36#include "nouveau_mem.h"
  37#include "nouveau_vmm.h"
  38
  39#include <nvif/class.h>
  40#include <nvif/push206e.h>
  41
  42static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
  43{
  44	struct vm_area_struct *vma = vmf->vma;
  45	struct ttm_buffer_object *bo = vma->vm_private_data;
  46	pgprot_t prot;
  47	vm_fault_t ret;
  48
  49	ret = ttm_bo_vm_reserve(bo, vmf);
  50	if (ret)
  51		return ret;
  52
  53	ret = nouveau_ttm_fault_reserve_notify(bo);
  54	if (ret)
  55		goto error_unlock;
  56
  57	nouveau_bo_del_io_reserve_lru(bo);
  58	prot = vm_get_page_prot(vma->vm_flags);
  59	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
  60	nouveau_bo_add_io_reserve_lru(bo);
  61	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  62		return ret;
  63
  64error_unlock:
  65	dma_resv_unlock(bo->base.resv);
  66	return ret;
  67}
  68
  69static const struct vm_operations_struct nouveau_ttm_vm_ops = {
  70	.fault = nouveau_ttm_fault,
  71	.open = ttm_bo_vm_open,
  72	.close = ttm_bo_vm_close,
  73	.access = ttm_bo_vm_access
  74};
  75
  76void
  77nouveau_gem_object_del(struct drm_gem_object *gem)
  78{
  79	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
  80	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  81	struct device *dev = drm->dev->dev;
  82	int ret;
  83
  84	ret = pm_runtime_get_sync(dev);
  85	if (WARN_ON(ret < 0 && ret != -EACCES)) {
  86		pm_runtime_put_autosuspend(dev);
  87		return;
  88	}
  89
  90	if (gem->import_attach)
  91		drm_prime_gem_destroy(gem, nvbo->bo.sg);
  92
  93	ttm_bo_put(&nvbo->bo);
  94
  95	pm_runtime_mark_last_busy(dev);
  96	pm_runtime_put_autosuspend(dev);
  97}
  98
  99int
 100nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 101{
 102	struct nouveau_cli *cli = nouveau_cli(file_priv);
 103	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 104	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 105	struct device *dev = drm->dev->dev;
 106	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
 107	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 108	struct nouveau_vma *vma;
 109	int ret;
 110
 111	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 112		return 0;
 113
 114	if (nvbo->no_share && uvmm &&
 115	    drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
 116		return -EPERM;
 117
 118	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 119	if (ret)
 120		return ret;
 121
 122	ret = pm_runtime_get_sync(dev);
 123	if (ret < 0 && ret != -EACCES) {
 124		pm_runtime_put_autosuspend(dev);
 125		goto out;
 126	}
 127
 128	/* only create a VMA on binding */
 129	if (!nouveau_cli_uvmm(cli))
 130		ret = nouveau_vma_new(nvbo, vmm, &vma);
 131	else
 132		ret = 0;
 133	pm_runtime_mark_last_busy(dev);
 134	pm_runtime_put_autosuspend(dev);
 135out:
 136	ttm_bo_unreserve(&nvbo->bo);
 137	return ret;
 138}
 139
 140struct nouveau_gem_object_unmap {
 141	struct nouveau_cli_work work;
 142	struct nouveau_vma *vma;
 143};
 144
 145static void
 146nouveau_gem_object_delete(struct nouveau_vma *vma)
 147{
 148	nouveau_fence_unref(&vma->fence);
 149	nouveau_vma_del(&vma);
 150}
 151
 152static void
 153nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
 154{
 155	struct nouveau_gem_object_unmap *work =
 156		container_of(w, typeof(*work), work);
 157	nouveau_gem_object_delete(work->vma);
 158	kfree(work);
 159}
 160
 161static void
 162nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 163{
 164	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
 165	struct nouveau_gem_object_unmap *work;
 166
 167	list_del_init(&vma->head);
 168
 169	if (!fence) {
 170		nouveau_gem_object_delete(vma);
 171		return;
 172	}
 173
 174	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
 175		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
 176		nouveau_gem_object_delete(vma);
 177		return;
 178	}
 179
 180	work->work.func = nouveau_gem_object_delete_work;
 181	work->vma = vma;
 182	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
 183}
 184
 185void
 186nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 187{
 188	struct nouveau_cli *cli = nouveau_cli(file_priv);
 189	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 190	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 191	struct device *dev = drm->dev->dev;
 192	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 193	struct nouveau_vma *vma;
 194	int ret;
 195
 196	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 197		return;
 198
 199	if (nouveau_cli_uvmm(cli))
 200		return;
 201
 202	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 203	if (ret)
 204		return;
 205
 206	vma = nouveau_vma_find(nvbo, vmm);
 207	if (vma) {
 208		if (--vma->refs == 0) {
 209			ret = pm_runtime_get_sync(dev);
 210			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
 211				nouveau_gem_object_unmap(nvbo, vma);
 212				pm_runtime_mark_last_busy(dev);
 
 213			}
 214			pm_runtime_put_autosuspend(dev);
 215		}
 216	}
 217	ttm_bo_unreserve(&nvbo->bo);
 218}
 219
 220const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
 221	.free = nouveau_gem_object_del,
 222	.open = nouveau_gem_object_open,
 223	.close = nouveau_gem_object_close,
 224	.export = nouveau_gem_prime_export,
 225	.pin = nouveau_gem_prime_pin,
 226	.unpin = nouveau_gem_prime_unpin,
 227	.get_sg_table = nouveau_gem_prime_get_sg_table,
 228	.vmap = drm_gem_ttm_vmap,
 229	.vunmap = drm_gem_ttm_vunmap,
 230	.mmap = drm_gem_ttm_mmap,
 231	.vm_ops = &nouveau_ttm_vm_ops,
 232};
 233
 234int
 235nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 236		uint32_t tile_mode, uint32_t tile_flags,
 237		struct nouveau_bo **pnvbo)
 238{
 239	struct nouveau_drm *drm = cli->drm;
 240	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
 241	struct dma_resv *resv = NULL;
 242	struct nouveau_bo *nvbo;
 
 243	int ret;
 244
 245	if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
 246		if (unlikely(!uvmm))
 247			return -EINVAL;
 248
 249		resv = drm_gpuvm_resv(&uvmm->base);
 250	}
 251
 252	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
 253		domain |= NOUVEAU_GEM_DOMAIN_CPU;
 254
 255	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
 256				tile_flags, false);
 257	if (IS_ERR(nvbo))
 258		return PTR_ERR(nvbo);
 259
 260	nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
 261	nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
 262
 263	/* Initialize the embedded gem-object. We return a single gem-reference
 264	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 265	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
 266	if (ret) {
 267		drm_gem_object_release(&nvbo->bo.base);
 268		kfree(nvbo);
 269		return ret;
 270	}
 271
 272	if (resv)
 273		dma_resv_lock(resv, NULL);
 274
 275	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
 276
 277	if (resv)
 278		dma_resv_unlock(resv);
 279
 280	if (ret)
 281		return ret;
 
 282
 283	/* we restrict allowed domains on nv50+ to only the types
 284	 * that were requested at creation time.  not possibly on
 285	 * earlier chips without busting the ABI.
 286	 */
 287	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
 288			      NOUVEAU_GEM_DOMAIN_GART;
 289	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 290		nvbo->valid_domains &= domain;
 291
 292	if (nvbo->no_share) {
 293		nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
 294		drm_gem_object_get(nvbo->r_obj);
 295	}
 296
 297	*pnvbo = nvbo;
 298	return 0;
 299}
 300
 301static int
 302nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
 303		 struct drm_nouveau_gem_info *rep)
 304{
 305	struct nouveau_cli *cli = nouveau_cli(file_priv);
 306	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 307	struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
 308	struct nouveau_vma *vma;
 309
 310	if (is_power_of_2(nvbo->valid_domains))
 311		rep->domain = nvbo->valid_domains;
 312	else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 313		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
 314	else
 315		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
 316	rep->offset = nvbo->offset;
 317	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
 318	    !nouveau_cli_uvmm(cli)) {
 319		vma = nouveau_vma_find(nvbo, vmm);
 320		if (!vma)
 321			return -EINVAL;
 322
 323		rep->offset = vma->addr;
 324	} else
 325		rep->offset = 0;
 326
 327	rep->size = nvbo->bo.base.size;
 328	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
 329	rep->tile_mode = nvbo->mode;
 330	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
 331	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
 332		rep->tile_flags |= nvbo->kind << 8;
 333	else
 334	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 335		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
 336	else
 337		rep->tile_flags |= nvbo->zeta;
 338	return 0;
 339}
 340
 341int
 342nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 343		      struct drm_file *file_priv)
 344{
 345	struct nouveau_cli *cli = nouveau_cli(file_priv);
 346	struct drm_nouveau_gem_new *req = data;
 347	struct nouveau_bo *nvbo = NULL;
 348	int ret = 0;
 349
 350	/* If uvmm wasn't initialized until now disable it completely to prevent
 351	 * userspace from mixing up UAPIs.
 352	 */
 353	nouveau_cli_disable_uvmm_noinit(cli);
 354
 355	ret = nouveau_gem_new(cli, req->info.size, req->align,
 356			      req->info.domain, req->info.tile_mode,
 357			      req->info.tile_flags, &nvbo);
 358	if (ret)
 359		return ret;
 360
 361	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
 362				    &req->info.handle);
 363	if (ret == 0) {
 364		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
 365		if (ret)
 366			drm_gem_handle_delete(file_priv, req->info.handle);
 367	}
 368
 369	/* drop reference from allocate - handle holds it now */
 370	drm_gem_object_put(&nvbo->bo.base);
 371	return ret;
 372}
 373
 374static int
 375nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 376		       uint32_t write_domains, uint32_t valid_domains)
 377{
 378	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 379	struct ttm_buffer_object *bo = &nvbo->bo;
 380	uint32_t domains = valid_domains & nvbo->valid_domains &
 381		(write_domains ? write_domains : read_domains);
 382	uint32_t pref_domains = 0;
 383
 384	if (!domains)
 385		return -EINVAL;
 386
 387	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
 
 
 
 
 388
 389	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 390	    bo->resource->mem_type == TTM_PL_VRAM)
 391		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 392
 393	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
 394		 bo->resource->mem_type == TTM_PL_TT)
 395		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 396
 397	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
 398		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
 399
 400	else
 401		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
 402
 403	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
 404
 405	return 0;
 406}
 407
 408struct validate_op {
 409	struct list_head list;
 410	struct ww_acquire_ctx ticket;
 411};
 412
 413static void
 414validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
 415			struct nouveau_fence *fence,
 416			struct drm_nouveau_gem_pushbuf_bo *pbbo)
 417{
 418	struct nouveau_bo *nvbo;
 419	struct drm_nouveau_gem_pushbuf_bo *b;
 420
 421	while (!list_empty(&op->list)) {
 422		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
 423		b = &pbbo[nvbo->pbbo_index];
 424
 425		if (likely(fence)) {
 426			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
 427
 428			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 429				struct nouveau_vma *vma =
 430					(void *)(unsigned long)b->user_priv;
 431				nouveau_fence_unref(&vma->fence);
 432				dma_fence_get(&fence->base);
 433				vma->fence = fence;
 434			}
 435		}
 436
 437		if (unlikely(nvbo->validate_mapped)) {
 438			ttm_bo_kunmap(&nvbo->kmap);
 439			nvbo->validate_mapped = false;
 440		}
 441
 442		list_del(&nvbo->entry);
 443		nvbo->reserved_by = NULL;
 444		ttm_bo_unreserve(&nvbo->bo);
 445		drm_gem_object_put(&nvbo->bo.base);
 446	}
 447}
 448
 449static void
 450validate_fini(struct validate_op *op, struct nouveau_channel *chan,
 451	      struct nouveau_fence *fence,
 452	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
 453{
 454	validate_fini_no_ticket(op, chan, fence, pbbo);
 455	ww_acquire_fini(&op->ticket);
 456}
 457
 458static int
 459validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 460	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
 461	      int nr_buffers, struct validate_op *op)
 462{
 463	struct nouveau_cli *cli = nouveau_cli(file_priv);
 464	int trycnt = 0;
 465	int ret = -EINVAL, i;
 466	struct nouveau_bo *res_bo = NULL;
 467	LIST_HEAD(gart_list);
 468	LIST_HEAD(vram_list);
 469	LIST_HEAD(both_list);
 470
 471	ww_acquire_init(&op->ticket, &reservation_ww_class);
 472retry:
 473	if (++trycnt > 100000) {
 474		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 475		return -EINVAL;
 476	}
 477
 478	for (i = 0; i < nr_buffers; i++) {
 479		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
 480		struct drm_gem_object *gem;
 481		struct nouveau_bo *nvbo;
 482
 483		gem = drm_gem_object_lookup(file_priv, b->handle);
 484		if (!gem) {
 485			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 486			ret = -ENOENT;
 487			break;
 488		}
 489		nvbo = nouveau_gem_object(gem);
 490		if (nvbo == res_bo) {
 491			res_bo = NULL;
 492			drm_gem_object_put(gem);
 493			continue;
 494		}
 495
 496		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 497			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 498				      "validation list\n", b->handle);
 499			drm_gem_object_put(gem);
 500			ret = -EINVAL;
 501			break;
 502		}
 503
 504		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
 505		if (ret) {
 506			list_splice_tail_init(&vram_list, &op->list);
 507			list_splice_tail_init(&gart_list, &op->list);
 508			list_splice_tail_init(&both_list, &op->list);
 509			validate_fini_no_ticket(op, chan, NULL, NULL);
 510			if (unlikely(ret == -EDEADLK)) {
 511				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
 512							      &op->ticket);
 513				if (!ret)
 514					res_bo = nvbo;
 515			}
 516			if (unlikely(ret)) {
 517				if (ret != -ERESTARTSYS)
 518					NV_PRINTK(err, cli, "fail reserve\n");
 519				break;
 520			}
 521		}
 522
 523		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 524			struct nouveau_vmm *vmm = chan->vmm;
 525			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
 526			if (!vma) {
 527				NV_PRINTK(err, cli, "vma not found!\n");
 528				ret = -EINVAL;
 529				break;
 530			}
 531
 532			b->user_priv = (uint64_t)(unsigned long)vma;
 533		} else {
 534			b->user_priv = (uint64_t)(unsigned long)nvbo;
 535		}
 536
 537		nvbo->reserved_by = file_priv;
 538		nvbo->pbbo_index = i;
 539		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
 540		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
 541			list_add_tail(&nvbo->entry, &both_list);
 542		else
 543		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
 544			list_add_tail(&nvbo->entry, &vram_list);
 545		else
 546		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 547			list_add_tail(&nvbo->entry, &gart_list);
 548		else {
 549			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 550				 b->valid_domains);
 551			list_add_tail(&nvbo->entry, &both_list);
 552			ret = -EINVAL;
 553			break;
 554		}
 555		if (nvbo == res_bo)
 556			goto retry;
 557	}
 558
 559	ww_acquire_done(&op->ticket);
 560	list_splice_tail(&vram_list, &op->list);
 561	list_splice_tail(&gart_list, &op->list);
 562	list_splice_tail(&both_list, &op->list);
 563	if (ret)
 564		validate_fini(op, chan, NULL, NULL);
 565	return ret;
 566
 567}
 568
 569static int
 570validate_list(struct nouveau_channel *chan,
 571	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
 572{
 573	struct nouveau_cli *cli = chan->cli;
 574	struct nouveau_drm *drm = cli->drm;
 
 
 575	struct nouveau_bo *nvbo;
 576	int ret, relocs = 0;
 577
 578	list_for_each_entry(nvbo, list, entry) {
 579		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 580
 581		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
 582					     b->write_domains,
 583					     b->valid_domains);
 584		if (unlikely(ret)) {
 585			NV_PRINTK(err, cli, "fail set_domain\n");
 586			return ret;
 587		}
 588
 589		ret = nouveau_bo_validate(nvbo, true, false);
 590		if (unlikely(ret)) {
 591			if (ret != -ERESTARTSYS)
 592				NV_PRINTK(err, cli, "fail ttm_validate\n");
 593			return ret;
 594		}
 595
 596		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 597		if (unlikely(ret)) {
 598			if (ret != -ERESTARTSYS)
 599				NV_PRINTK(err, cli, "fail post-validate sync\n");
 600			return ret;
 601		}
 602
 603		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 604			if (nvbo->offset == b->presumed.offset &&
 605			    ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
 606			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
 607			     (nvbo->bo.resource->mem_type == TTM_PL_TT &&
 608			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
 609				continue;
 610
 611			if (nvbo->bo.resource->mem_type == TTM_PL_TT)
 612				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
 613			else
 614				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
 615			b->presumed.offset = nvbo->offset;
 616			b->presumed.valid = 0;
 617			relocs++;
 
 
 
 
 618		}
 619	}
 620
 621	return relocs;
 622}
 623
 624static int
 625nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 626			     struct drm_file *file_priv,
 627			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
 628			     int nr_buffers,
 629			     struct validate_op *op, bool *apply_relocs)
 630{
 631	struct nouveau_cli *cli = nouveau_cli(file_priv);
 632	int ret;
 633
 634	INIT_LIST_HEAD(&op->list);
 635
 636	if (nr_buffers == 0)
 637		return 0;
 638
 639	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 640	if (unlikely(ret)) {
 641		if (ret != -ERESTARTSYS)
 642			NV_PRINTK(err, cli, "validate_init\n");
 643		return ret;
 644	}
 645
 646	ret = validate_list(chan, &op->list, pbbo);
 647	if (unlikely(ret < 0)) {
 648		if (ret != -ERESTARTSYS)
 649			NV_PRINTK(err, cli, "validating bo list\n");
 650		validate_fini(op, chan, NULL, NULL);
 651		return ret;
 652	} else if (ret > 0) {
 653		*apply_relocs = true;
 654	}
 655
 656	return 0;
 657}
 658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 659static int
 660nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 661				struct drm_nouveau_gem_pushbuf *req,
 662				struct drm_nouveau_gem_pushbuf_reloc *reloc,
 663				struct drm_nouveau_gem_pushbuf_bo *bo)
 664{
 
 665	int ret = 0;
 666	unsigned i;
 667
 
 
 
 
 668	for (i = 0; i < req->nr_relocs; i++) {
 669		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
 670		struct drm_nouveau_gem_pushbuf_bo *b;
 671		struct nouveau_bo *nvbo;
 672		uint32_t data;
 673		long lret;
 674
 675		if (unlikely(r->bo_index >= req->nr_buffers)) {
 676			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 677			ret = -EINVAL;
 678			break;
 679		}
 680
 681		b = &bo[r->bo_index];
 682		if (b->presumed.valid)
 683			continue;
 684
 685		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
 686			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 687			ret = -EINVAL;
 688			break;
 689		}
 690		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
 691
 692		if (unlikely(r->reloc_bo_offset + 4 >
 693			     nvbo->bo.base.size)) {
 694			NV_PRINTK(err, cli, "reloc outside of bo\n");
 695			ret = -EINVAL;
 696			break;
 697		}
 698
 699		if (!nvbo->kmap.virtual) {
 700			ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
 701					  &nvbo->kmap);
 702			if (ret) {
 703				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 704				break;
 705			}
 706			nvbo->validate_mapped = true;
 707		}
 708
 709		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
 710			data = b->presumed.offset + r->data;
 711		else
 712		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
 713			data = (b->presumed.offset + r->data) >> 32;
 714		else
 715			data = r->data;
 716
 717		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
 718			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
 719				data |= r->tor;
 720			else
 721				data |= r->vor;
 722		}
 723
 724		lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
 725					     DMA_RESV_USAGE_BOOKKEEP,
 726					     false, 15 * HZ);
 727		if (!lret)
 728			ret = -EBUSY;
 729		else if (lret > 0)
 730			ret = 0;
 731		else
 732			ret = lret;
 733
 734		if (ret) {
 735			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n",
 736				  ret);
 737			break;
 738		}
 739
 740		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
 741	}
 742
 
 743	return ret;
 744}
 745
 746int
 747nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 748			  struct drm_file *file_priv)
 749{
 750	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
 751	struct nouveau_cli *cli = nouveau_cli(file_priv);
 752	struct nouveau_abi16_chan *temp;
 753	struct nouveau_drm *drm = nouveau_drm(dev);
 754	struct drm_nouveau_gem_pushbuf *req = data;
 755	struct drm_nouveau_gem_pushbuf_push *push;
 756	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
 757	struct drm_nouveau_gem_pushbuf_bo *bo;
 758	struct nouveau_channel *chan = NULL;
 759	struct validate_op op;
 760	struct nouveau_fence *fence = NULL;
 761	int i, j, ret = 0;
 762	bool do_reloc = false, sync = false;
 763
 764	if (unlikely(!abi16))
 765		return -ENOMEM;
 766
 767	if (unlikely(nouveau_cli_uvmm(cli)))
 768		return nouveau_abi16_put(abi16, -ENOSYS);
 769
 770	list_for_each_entry(temp, &abi16->channels, head) {
 771		if (temp->chan->chid == req->channel) {
 772			chan = temp->chan;
 773			break;
 774		}
 775	}
 776
 777	if (!chan)
 778		return nouveau_abi16_put(abi16, -ENOENT);
 779	if (unlikely(atomic_read(&chan->killed)))
 780		return nouveau_abi16_put(abi16, -ENODEV);
 781
 782	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
 783
 784	req->vram_available = drm->gem.vram_available;
 785	req->gart_available = drm->gem.gart_available;
 786	if (unlikely(req->nr_push == 0))
 787		goto out_next;
 788
 789	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 790		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 791			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 792		return nouveau_abi16_put(abi16, -EINVAL);
 793	}
 794
 795	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 796		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 797			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 798		return nouveau_abi16_put(abi16, -EINVAL);
 799	}
 800
 801	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 802		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 803			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 804		return nouveau_abi16_put(abi16, -EINVAL);
 805	}
 806
 807	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
 808	if (IS_ERR(push))
 809		return nouveau_abi16_put(abi16, PTR_ERR(push));
 810
 811	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 812	if (IS_ERR(bo)) {
 813		u_free(push);
 814		return nouveau_abi16_put(abi16, PTR_ERR(bo));
 815	}
 816
 817	/* Ensure all push buffers are on validate list */
 818	for (i = 0; i < req->nr_push; i++) {
 819		if (push[i].bo_index >= req->nr_buffers) {
 820			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 821			ret = -EINVAL;
 822			goto out_prevalid;
 823		}
 824	}
 825
 826	/* Validate buffer list */
 827revalidate:
 828	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
 829					   req->nr_buffers, &op, &do_reloc);
 830	if (ret) {
 831		if (ret != -ERESTARTSYS)
 832			NV_PRINTK(err, cli, "validate: %d\n", ret);
 833		goto out_prevalid;
 834	}
 835
 836	/* Apply any relocations that are required */
 837	if (do_reloc) {
 838		if (!reloc) {
 839			validate_fini(&op, chan, NULL, bo);
 840			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
 841			if (IS_ERR(reloc)) {
 842				ret = PTR_ERR(reloc);
 843				goto out_prevalid;
 844			}
 845
 846			goto revalidate;
 847		}
 848
 849		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
 850		if (ret) {
 851			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 852			goto out;
 853		}
 854	}
 855
 856	if (chan->dma.ib_max) {
 857		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 858		if (ret) {
 859			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 860			goto out;
 861		}
 862
 863		for (i = 0; i < req->nr_push; i++) {
 864			struct nouveau_vma *vma = (void *)(unsigned long)
 865				bo[push[i].bo_index].user_priv;
 866			u64 addr = vma->addr + push[i].offset;
 867			u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
 868			bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
 869
 870			nv50_dma_push(chan, addr, length, no_prefetch);
 
 871		}
 872	} else
 873	if (drm->client.device.info.chipset >= 0x25) {
 874		ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
 875		if (ret) {
 876			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 877			goto out;
 878		}
 879
 880		for (i = 0; i < req->nr_push; i++) {
 881			struct nouveau_bo *nvbo = (void *)(unsigned long)
 882				bo[push[i].bo_index].user_priv;
 883
 884			PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset);
 885			PUSH_DATA(&chan->chan.push, 0);
 886		}
 887	} else {
 888		ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 889		if (ret) {
 890			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 891			goto out;
 892		}
 893
 894		for (i = 0; i < req->nr_push; i++) {
 895			struct nouveau_bo *nvbo = (void *)(unsigned long)
 896				bo[push[i].bo_index].user_priv;
 897			uint32_t cmd;
 898
 899			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
 900			cmd |= 0x20000000;
 901			if (unlikely(cmd != req->suffix0)) {
 902				if (!nvbo->kmap.virtual) {
 903					ret = ttm_bo_kmap(&nvbo->bo, 0,
 904							  PFN_UP(nvbo->bo.base.size),
 
 905							  &nvbo->kmap);
 906					if (ret) {
 907						WIND_RING(chan);
 908						goto out;
 909					}
 910					nvbo->validate_mapped = true;
 911				}
 912
 913				nouveau_bo_wr32(nvbo, (push[i].offset +
 914						push[i].length - 8) / 4, cmd);
 915			}
 916
 917			PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset);
 918			PUSH_DATA(&chan->chan.push, 0);
 
 919			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
 920				PUSH_DATA(&chan->chan.push, 0);
 921		}
 922	}
 923
 924	ret = nouveau_fence_new(&fence, chan);
 925	if (ret) {
 926		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 927		WIND_RING(chan);
 928		goto out;
 929	}
 930
 931	if (sync) {
 932		if (!(ret = nouveau_fence_wait(fence, false, false))) {
 933			if ((ret = dma_fence_get_status(&fence->base)) == 1)
 934				ret = 0;
 935		}
 936	}
 937
 938out:
 939	validate_fini(&op, chan, fence, bo);
 940	nouveau_fence_unref(&fence);
 941
 942	if (do_reloc) {
 943		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
 944			u64_to_user_ptr(req->buffers);
 945
 946		for (i = 0; i < req->nr_buffers; i++) {
 947			if (bo[i].presumed.valid)
 948				continue;
 949
 950			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
 951					 sizeof(bo[i].presumed))) {
 952				ret = -EFAULT;
 953				break;
 954			}
 955		}
 956	}
 957out_prevalid:
 958	if (!IS_ERR(reloc))
 959		u_free(reloc);
 960	u_free(bo);
 961	u_free(push);
 962
 963out_next:
 964	if (chan->dma.ib_max) {
 965		req->suffix0 = 0x00000000;
 966		req->suffix1 = 0x00000000;
 967	} else
 968	if (drm->client.device.info.chipset >= 0x25) {
 969		req->suffix0 = 0x00020000;
 970		req->suffix1 = 0x00000000;
 971	} else {
 972		req->suffix0 = 0x20000000 |
 973			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
 974		req->suffix1 = 0x00000000;
 975	}
 976
 977	return nouveau_abi16_put(abi16, ret);
 978}
 979
 980int
 981nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
 982			   struct drm_file *file_priv)
 983{
 984	struct drm_nouveau_gem_cpu_prep *req = data;
 985	struct drm_gem_object *gem;
 986	struct nouveau_bo *nvbo;
 987	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
 988	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
 989	long lret;
 990	int ret;
 991
 992	gem = drm_gem_object_lookup(file_priv, req->handle);
 993	if (!gem)
 994		return -ENOENT;
 995	nvbo = nouveau_gem_object(gem);
 996
 997	lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
 998				     dma_resv_usage_rw(write), true,
 999				     no_wait ? 0 : 30 * HZ);
1000	if (!lret)
1001		ret = -EBUSY;
1002	else if (lret > 0)
1003		ret = 0;
1004	else
1005		ret = lret;
1006
1007	nouveau_bo_sync_for_cpu(nvbo);
1008	drm_gem_object_put(gem);
1009
1010	return ret;
1011}
1012
1013int
1014nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
1015			   struct drm_file *file_priv)
1016{
1017	struct drm_nouveau_gem_cpu_fini *req = data;
1018	struct drm_gem_object *gem;
1019	struct nouveau_bo *nvbo;
1020
1021	gem = drm_gem_object_lookup(file_priv, req->handle);
1022	if (!gem)
1023		return -ENOENT;
1024	nvbo = nouveau_gem_object(gem);
1025
1026	nouveau_bo_sync_for_device(nvbo);
1027	drm_gem_object_put(gem);
1028	return 0;
1029}
1030
1031int
1032nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1033		       struct drm_file *file_priv)
1034{
1035	struct drm_nouveau_gem_info *req = data;
1036	struct drm_gem_object *gem;
1037	int ret;
1038
1039	gem = drm_gem_object_lookup(file_priv, req->handle);
1040	if (!gem)
1041		return -ENOENT;
1042
1043	ret = nouveau_gem_info(file_priv, gem, req);
1044	drm_gem_object_put(gem);
1045	return ret;
1046}
1047
v5.4
  1/*
  2 * Copyright (C) 2008 Ben Skeggs.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26
 
 
 27#include "nouveau_drv.h"
 28#include "nouveau_dma.h"
 29#include "nouveau_fence.h"
 30#include "nouveau_abi16.h"
 31
 32#include "nouveau_ttm.h"
 33#include "nouveau_gem.h"
 34#include "nouveau_mem.h"
 35#include "nouveau_vmm.h"
 36
 37#include <nvif/class.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38
 39void
 40nouveau_gem_object_del(struct drm_gem_object *gem)
 41{
 42	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 43	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 44	struct device *dev = drm->dev->dev;
 45	int ret;
 46
 47	ret = pm_runtime_get_sync(dev);
 48	if (WARN_ON(ret < 0 && ret != -EACCES))
 
 49		return;
 
 50
 51	if (gem->import_attach)
 52		drm_prime_gem_destroy(gem, nvbo->bo.sg);
 53
 54	ttm_bo_put(&nvbo->bo);
 55
 56	pm_runtime_mark_last_busy(dev);
 57	pm_runtime_put_autosuspend(dev);
 58}
 59
 60int
 61nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 62{
 63	struct nouveau_cli *cli = nouveau_cli(file_priv);
 64	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 65	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 66	struct device *dev = drm->dev->dev;
 67	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
 
 68	struct nouveau_vma *vma;
 69	int ret;
 70
 71	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
 72		return 0;
 73
 
 
 
 
 74	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 75	if (ret)
 76		return ret;
 77
 78	ret = pm_runtime_get_sync(dev);
 79	if (ret < 0 && ret != -EACCES)
 
 80		goto out;
 
 81
 82	ret = nouveau_vma_new(nvbo, vmm, &vma);
 
 
 
 
 83	pm_runtime_mark_last_busy(dev);
 84	pm_runtime_put_autosuspend(dev);
 85out:
 86	ttm_bo_unreserve(&nvbo->bo);
 87	return ret;
 88}
 89
 90struct nouveau_gem_object_unmap {
 91	struct nouveau_cli_work work;
 92	struct nouveau_vma *vma;
 93};
 94
 95static void
 96nouveau_gem_object_delete(struct nouveau_vma *vma)
 97{
 98	nouveau_fence_unref(&vma->fence);
 99	nouveau_vma_del(&vma);
100}
101
102static void
103nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
104{
105	struct nouveau_gem_object_unmap *work =
106		container_of(w, typeof(*work), work);
107	nouveau_gem_object_delete(work->vma);
108	kfree(work);
109}
110
111static void
112nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
113{
114	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
115	struct nouveau_gem_object_unmap *work;
116
117	list_del_init(&vma->head);
118
119	if (!fence) {
120		nouveau_gem_object_delete(vma);
121		return;
122	}
123
124	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
125		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
126		nouveau_gem_object_delete(vma);
127		return;
128	}
129
130	work->work.func = nouveau_gem_object_delete_work;
131	work->vma = vma;
132	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
133}
134
135void
136nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
137{
138	struct nouveau_cli *cli = nouveau_cli(file_priv);
139	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
140	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
141	struct device *dev = drm->dev->dev;
142	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
143	struct nouveau_vma *vma;
144	int ret;
145
146	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
147		return;
148
 
 
 
149	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
150	if (ret)
151		return;
152
153	vma = nouveau_vma_find(nvbo, vmm);
154	if (vma) {
155		if (--vma->refs == 0) {
156			ret = pm_runtime_get_sync(dev);
157			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
158				nouveau_gem_object_unmap(nvbo, vma);
159				pm_runtime_mark_last_busy(dev);
160				pm_runtime_put_autosuspend(dev);
161			}
 
162		}
163	}
164	ttm_bo_unreserve(&nvbo->bo);
165}
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167int
168nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
169		uint32_t tile_mode, uint32_t tile_flags,
170		struct nouveau_bo **pnvbo)
171{
172	struct nouveau_drm *drm = cli->drm;
 
 
173	struct nouveau_bo *nvbo;
174	u32 flags = 0;
175	int ret;
176
177	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
178		flags |= TTM_PL_FLAG_VRAM;
179	if (domain & NOUVEAU_GEM_DOMAIN_GART)
180		flags |= TTM_PL_FLAG_TT;
181	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
182		flags |= TTM_PL_FLAG_SYSTEM;
183
184	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
185		flags |= TTM_PL_FLAG_UNCACHED;
186
187	nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
188				tile_flags);
189	if (IS_ERR(nvbo))
190		return PTR_ERR(nvbo);
191
 
 
 
192	/* Initialize the embedded gem-object. We return a single gem-reference
193	 * to the caller, instead of a normal nouveau_bo ttm reference. */
194	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
195	if (ret) {
196		nouveau_bo_ref(NULL, &nvbo);
 
197		return ret;
198	}
199
200	ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
201	if (ret) {
202		nouveau_bo_ref(NULL, &nvbo);
 
 
 
 
 
 
203		return ret;
204	}
205
206	/* we restrict allowed domains on nv50+ to only the types
207	 * that were requested at creation time.  not possibly on
208	 * earlier chips without busting the ABI.
209	 */
210	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
211			      NOUVEAU_GEM_DOMAIN_GART;
212	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
213		nvbo->valid_domains &= domain;
214
215	nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
 
 
 
 
216	*pnvbo = nvbo;
217	return 0;
218}
219
220static int
221nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
222		 struct drm_nouveau_gem_info *rep)
223{
224	struct nouveau_cli *cli = nouveau_cli(file_priv);
225	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
226	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
227	struct nouveau_vma *vma;
228
229	if (is_power_of_2(nvbo->valid_domains))
230		rep->domain = nvbo->valid_domains;
231	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
232		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
233	else
234		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
235	rep->offset = nvbo->bo.offset;
236	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
 
237		vma = nouveau_vma_find(nvbo, vmm);
238		if (!vma)
239			return -EINVAL;
240
241		rep->offset = vma->addr;
242	}
 
243
244	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
245	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
246	rep->tile_mode = nvbo->mode;
247	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
248	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
249		rep->tile_flags |= nvbo->kind << 8;
250	else
251	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
252		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
253	else
254		rep->tile_flags |= nvbo->zeta;
255	return 0;
256}
257
258int
259nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
260		      struct drm_file *file_priv)
261{
262	struct nouveau_cli *cli = nouveau_cli(file_priv);
263	struct drm_nouveau_gem_new *req = data;
264	struct nouveau_bo *nvbo = NULL;
265	int ret = 0;
266
 
 
 
 
 
267	ret = nouveau_gem_new(cli, req->info.size, req->align,
268			      req->info.domain, req->info.tile_mode,
269			      req->info.tile_flags, &nvbo);
270	if (ret)
271		return ret;
272
273	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
274				    &req->info.handle);
275	if (ret == 0) {
276		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
277		if (ret)
278			drm_gem_handle_delete(file_priv, req->info.handle);
279	}
280
281	/* drop reference from allocate - handle holds it now */
282	drm_gem_object_put_unlocked(&nvbo->bo.base);
283	return ret;
284}
285
286static int
287nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
288		       uint32_t write_domains, uint32_t valid_domains)
289{
290	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
291	struct ttm_buffer_object *bo = &nvbo->bo;
292	uint32_t domains = valid_domains & nvbo->valid_domains &
293		(write_domains ? write_domains : read_domains);
294	uint32_t pref_flags = 0, valid_flags = 0;
295
296	if (!domains)
297		return -EINVAL;
298
299	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
300		valid_flags |= TTM_PL_FLAG_VRAM;
301
302	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
303		valid_flags |= TTM_PL_FLAG_TT;
304
305	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
306	    bo->mem.mem_type == TTM_PL_VRAM)
307		pref_flags |= TTM_PL_FLAG_VRAM;
308
309	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
310		 bo->mem.mem_type == TTM_PL_TT)
311		pref_flags |= TTM_PL_FLAG_TT;
312
313	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
314		pref_flags |= TTM_PL_FLAG_VRAM;
315
316	else
317		pref_flags |= TTM_PL_FLAG_TT;
318
319	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
320
321	return 0;
322}
323
324struct validate_op {
325	struct list_head list;
326	struct ww_acquire_ctx ticket;
327};
328
329static void
330validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
331			struct nouveau_fence *fence,
332			struct drm_nouveau_gem_pushbuf_bo *pbbo)
333{
334	struct nouveau_bo *nvbo;
335	struct drm_nouveau_gem_pushbuf_bo *b;
336
337	while (!list_empty(&op->list)) {
338		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
339		b = &pbbo[nvbo->pbbo_index];
340
341		if (likely(fence)) {
342			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
343
344			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
345				struct nouveau_vma *vma =
346					(void *)(unsigned long)b->user_priv;
347				nouveau_fence_unref(&vma->fence);
348				dma_fence_get(&fence->base);
349				vma->fence = fence;
350			}
351		}
352
353		if (unlikely(nvbo->validate_mapped)) {
354			ttm_bo_kunmap(&nvbo->kmap);
355			nvbo->validate_mapped = false;
356		}
357
358		list_del(&nvbo->entry);
359		nvbo->reserved_by = NULL;
360		ttm_bo_unreserve(&nvbo->bo);
361		drm_gem_object_put_unlocked(&nvbo->bo.base);
362	}
363}
364
365static void
366validate_fini(struct validate_op *op, struct nouveau_channel *chan,
367	      struct nouveau_fence *fence,
368	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
369{
370	validate_fini_no_ticket(op, chan, fence, pbbo);
371	ww_acquire_fini(&op->ticket);
372}
373
374static int
375validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
376	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
377	      int nr_buffers, struct validate_op *op)
378{
379	struct nouveau_cli *cli = nouveau_cli(file_priv);
380	int trycnt = 0;
381	int ret = -EINVAL, i;
382	struct nouveau_bo *res_bo = NULL;
383	LIST_HEAD(gart_list);
384	LIST_HEAD(vram_list);
385	LIST_HEAD(both_list);
386
387	ww_acquire_init(&op->ticket, &reservation_ww_class);
388retry:
389	if (++trycnt > 100000) {
390		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
391		return -EINVAL;
392	}
393
394	for (i = 0; i < nr_buffers; i++) {
395		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
396		struct drm_gem_object *gem;
397		struct nouveau_bo *nvbo;
398
399		gem = drm_gem_object_lookup(file_priv, b->handle);
400		if (!gem) {
401			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
402			ret = -ENOENT;
403			break;
404		}
405		nvbo = nouveau_gem_object(gem);
406		if (nvbo == res_bo) {
407			res_bo = NULL;
408			drm_gem_object_put_unlocked(gem);
409			continue;
410		}
411
412		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
413			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
414				      "validation list\n", b->handle);
415			drm_gem_object_put_unlocked(gem);
416			ret = -EINVAL;
417			break;
418		}
419
420		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
421		if (ret) {
422			list_splice_tail_init(&vram_list, &op->list);
423			list_splice_tail_init(&gart_list, &op->list);
424			list_splice_tail_init(&both_list, &op->list);
425			validate_fini_no_ticket(op, chan, NULL, NULL);
426			if (unlikely(ret == -EDEADLK)) {
427				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
428							      &op->ticket);
429				if (!ret)
430					res_bo = nvbo;
431			}
432			if (unlikely(ret)) {
433				if (ret != -ERESTARTSYS)
434					NV_PRINTK(err, cli, "fail reserve\n");
435				break;
436			}
437		}
438
439		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
440			struct nouveau_vmm *vmm = chan->vmm;
441			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
442			if (!vma) {
443				NV_PRINTK(err, cli, "vma not found!\n");
444				ret = -EINVAL;
445				break;
446			}
447
448			b->user_priv = (uint64_t)(unsigned long)vma;
449		} else {
450			b->user_priv = (uint64_t)(unsigned long)nvbo;
451		}
452
453		nvbo->reserved_by = file_priv;
454		nvbo->pbbo_index = i;
455		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
456		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
457			list_add_tail(&nvbo->entry, &both_list);
458		else
459		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
460			list_add_tail(&nvbo->entry, &vram_list);
461		else
462		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
463			list_add_tail(&nvbo->entry, &gart_list);
464		else {
465			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
466				 b->valid_domains);
467			list_add_tail(&nvbo->entry, &both_list);
468			ret = -EINVAL;
469			break;
470		}
471		if (nvbo == res_bo)
472			goto retry;
473	}
474
475	ww_acquire_done(&op->ticket);
476	list_splice_tail(&vram_list, &op->list);
477	list_splice_tail(&gart_list, &op->list);
478	list_splice_tail(&both_list, &op->list);
479	if (ret)
480		validate_fini(op, chan, NULL, NULL);
481	return ret;
482
483}
484
485static int
486validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
487	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
488	      uint64_t user_pbbo_ptr)
489{
490	struct nouveau_drm *drm = chan->drm;
491	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
492				(void __force __user *)(uintptr_t)user_pbbo_ptr;
493	struct nouveau_bo *nvbo;
494	int ret, relocs = 0;
495
496	list_for_each_entry(nvbo, list, entry) {
497		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
498
499		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
500					     b->write_domains,
501					     b->valid_domains);
502		if (unlikely(ret)) {
503			NV_PRINTK(err, cli, "fail set_domain\n");
504			return ret;
505		}
506
507		ret = nouveau_bo_validate(nvbo, true, false);
508		if (unlikely(ret)) {
509			if (ret != -ERESTARTSYS)
510				NV_PRINTK(err, cli, "fail ttm_validate\n");
511			return ret;
512		}
513
514		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
515		if (unlikely(ret)) {
516			if (ret != -ERESTARTSYS)
517				NV_PRINTK(err, cli, "fail post-validate sync\n");
518			return ret;
519		}
520
521		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
522			if (nvbo->bo.offset == b->presumed.offset &&
523			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
524			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
525			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
526			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
527				continue;
528
529			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
530				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
531			else
532				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
533			b->presumed.offset = nvbo->bo.offset;
534			b->presumed.valid = 0;
535			relocs++;
536
537			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
538					     &b->presumed, sizeof(b->presumed)))
539				return -EFAULT;
540		}
541	}
542
543	return relocs;
544}
545
546static int
547nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
548			     struct drm_file *file_priv,
549			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
550			     uint64_t user_buffers, int nr_buffers,
551			     struct validate_op *op, int *apply_relocs)
552{
553	struct nouveau_cli *cli = nouveau_cli(file_priv);
554	int ret;
555
556	INIT_LIST_HEAD(&op->list);
557
558	if (nr_buffers == 0)
559		return 0;
560
561	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
562	if (unlikely(ret)) {
563		if (ret != -ERESTARTSYS)
564			NV_PRINTK(err, cli, "validate_init\n");
565		return ret;
566	}
567
568	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
569	if (unlikely(ret < 0)) {
570		if (ret != -ERESTARTSYS)
571			NV_PRINTK(err, cli, "validating bo list\n");
572		validate_fini(op, chan, NULL, NULL);
573		return ret;
 
 
574	}
575	*apply_relocs = ret;
576	return 0;
577}
578
579static inline void
580u_free(void *addr)
581{
582	kvfree(addr);
583}
584
585static inline void *
586u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
587{
588	void *mem;
589	void __user *userptr = (void __force __user *)(uintptr_t)user;
590
591	size *= nmemb;
592
593	mem = kvmalloc(size, GFP_KERNEL);
594	if (!mem)
595		return ERR_PTR(-ENOMEM);
596
597	if (copy_from_user(mem, userptr, size)) {
598		u_free(mem);
599		return ERR_PTR(-EFAULT);
600	}
601
602	return mem;
603}
604
605static int
606nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
607				struct drm_nouveau_gem_pushbuf *req,
 
608				struct drm_nouveau_gem_pushbuf_bo *bo)
609{
610	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
611	int ret = 0;
612	unsigned i;
613
614	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
615	if (IS_ERR(reloc))
616		return PTR_ERR(reloc);
617
618	for (i = 0; i < req->nr_relocs; i++) {
619		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
620		struct drm_nouveau_gem_pushbuf_bo *b;
621		struct nouveau_bo *nvbo;
622		uint32_t data;
 
623
624		if (unlikely(r->bo_index >= req->nr_buffers)) {
625			NV_PRINTK(err, cli, "reloc bo index invalid\n");
626			ret = -EINVAL;
627			break;
628		}
629
630		b = &bo[r->bo_index];
631		if (b->presumed.valid)
632			continue;
633
634		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
635			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
636			ret = -EINVAL;
637			break;
638		}
639		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
640
641		if (unlikely(r->reloc_bo_offset + 4 >
642			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
643			NV_PRINTK(err, cli, "reloc outside of bo\n");
644			ret = -EINVAL;
645			break;
646		}
647
648		if (!nvbo->kmap.virtual) {
649			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
650					  &nvbo->kmap);
651			if (ret) {
652				NV_PRINTK(err, cli, "failed kmap for reloc\n");
653				break;
654			}
655			nvbo->validate_mapped = true;
656		}
657
658		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
659			data = b->presumed.offset + r->data;
660		else
661		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
662			data = (b->presumed.offset + r->data) >> 32;
663		else
664			data = r->data;
665
666		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
667			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
668				data |= r->tor;
669			else
670				data |= r->vor;
671		}
672
673		ret = ttm_bo_wait(&nvbo->bo, false, false);
 
 
 
 
 
 
 
 
 
674		if (ret) {
675			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 
676			break;
677		}
678
679		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
680	}
681
682	u_free(reloc);
683	return ret;
684}
685
686int
687nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
688			  struct drm_file *file_priv)
689{
690	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
691	struct nouveau_cli *cli = nouveau_cli(file_priv);
692	struct nouveau_abi16_chan *temp;
693	struct nouveau_drm *drm = nouveau_drm(dev);
694	struct drm_nouveau_gem_pushbuf *req = data;
695	struct drm_nouveau_gem_pushbuf_push *push;
 
696	struct drm_nouveau_gem_pushbuf_bo *bo;
697	struct nouveau_channel *chan = NULL;
698	struct validate_op op;
699	struct nouveau_fence *fence = NULL;
700	int i, j, ret = 0, do_reloc = 0;
 
701
702	if (unlikely(!abi16))
703		return -ENOMEM;
704
 
 
 
705	list_for_each_entry(temp, &abi16->channels, head) {
706		if (temp->chan->chid == req->channel) {
707			chan = temp->chan;
708			break;
709		}
710	}
711
712	if (!chan)
713		return nouveau_abi16_put(abi16, -ENOENT);
 
 
 
 
714
715	req->vram_available = drm->gem.vram_available;
716	req->gart_available = drm->gem.gart_available;
717	if (unlikely(req->nr_push == 0))
718		goto out_next;
719
720	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
721		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
722			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
723		return nouveau_abi16_put(abi16, -EINVAL);
724	}
725
726	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
727		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
728			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
729		return nouveau_abi16_put(abi16, -EINVAL);
730	}
731
732	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
733		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
734			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
735		return nouveau_abi16_put(abi16, -EINVAL);
736	}
737
738	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
739	if (IS_ERR(push))
740		return nouveau_abi16_put(abi16, PTR_ERR(push));
741
742	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
743	if (IS_ERR(bo)) {
744		u_free(push);
745		return nouveau_abi16_put(abi16, PTR_ERR(bo));
746	}
747
748	/* Ensure all push buffers are on validate list */
749	for (i = 0; i < req->nr_push; i++) {
750		if (push[i].bo_index >= req->nr_buffers) {
751			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
752			ret = -EINVAL;
753			goto out_prevalid;
754		}
755	}
756
757	/* Validate buffer list */
758	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 
759					   req->nr_buffers, &op, &do_reloc);
760	if (ret) {
761		if (ret != -ERESTARTSYS)
762			NV_PRINTK(err, cli, "validate: %d\n", ret);
763		goto out_prevalid;
764	}
765
766	/* Apply any relocations that are required */
767	if (do_reloc) {
768		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
 
 
 
 
 
 
 
 
 
 
 
769		if (ret) {
770			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
771			goto out;
772		}
773	}
774
775	if (chan->dma.ib_max) {
776		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
777		if (ret) {
778			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
779			goto out;
780		}
781
782		for (i = 0; i < req->nr_push; i++) {
783			struct nouveau_vma *vma = (void *)(unsigned long)
784				bo[push[i].bo_index].user_priv;
 
 
 
785
786			nv50_dma_push(chan, vma->addr + push[i].offset,
787				      push[i].length);
788		}
789	} else
790	if (drm->client.device.info.chipset >= 0x25) {
791		ret = RING_SPACE(chan, req->nr_push * 2);
792		if (ret) {
793			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
794			goto out;
795		}
796
797		for (i = 0; i < req->nr_push; i++) {
798			struct nouveau_bo *nvbo = (void *)(unsigned long)
799				bo[push[i].bo_index].user_priv;
800
801			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
802			OUT_RING(chan, 0);
803		}
804	} else {
805		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
806		if (ret) {
807			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
808			goto out;
809		}
810
811		for (i = 0; i < req->nr_push; i++) {
812			struct nouveau_bo *nvbo = (void *)(unsigned long)
813				bo[push[i].bo_index].user_priv;
814			uint32_t cmd;
815
816			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
817			cmd |= 0x20000000;
818			if (unlikely(cmd != req->suffix0)) {
819				if (!nvbo->kmap.virtual) {
820					ret = ttm_bo_kmap(&nvbo->bo, 0,
821							  nvbo->bo.mem.
822							  num_pages,
823							  &nvbo->kmap);
824					if (ret) {
825						WIND_RING(chan);
826						goto out;
827					}
828					nvbo->validate_mapped = true;
829				}
830
831				nouveau_bo_wr32(nvbo, (push[i].offset +
832						push[i].length - 8) / 4, cmd);
833			}
834
835			OUT_RING(chan, 0x20000000 |
836				      (nvbo->bo.offset + push[i].offset));
837			OUT_RING(chan, 0);
838			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
839				OUT_RING(chan, 0);
840		}
841	}
842
843	ret = nouveau_fence_new(chan, false, &fence);
844	if (ret) {
845		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
846		WIND_RING(chan);
847		goto out;
848	}
849
 
 
 
 
 
 
 
850out:
851	validate_fini(&op, chan, fence, bo);
852	nouveau_fence_unref(&fence);
853
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854out_prevalid:
 
 
855	u_free(bo);
856	u_free(push);
857
858out_next:
859	if (chan->dma.ib_max) {
860		req->suffix0 = 0x00000000;
861		req->suffix1 = 0x00000000;
862	} else
863	if (drm->client.device.info.chipset >= 0x25) {
864		req->suffix0 = 0x00020000;
865		req->suffix1 = 0x00000000;
866	} else {
867		req->suffix0 = 0x20000000 |
868			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
869		req->suffix1 = 0x00000000;
870	}
871
872	return nouveau_abi16_put(abi16, ret);
873}
874
875int
876nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
877			   struct drm_file *file_priv)
878{
879	struct drm_nouveau_gem_cpu_prep *req = data;
880	struct drm_gem_object *gem;
881	struct nouveau_bo *nvbo;
882	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
883	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
884	long lret;
885	int ret;
886
887	gem = drm_gem_object_lookup(file_priv, req->handle);
888	if (!gem)
889		return -ENOENT;
890	nvbo = nouveau_gem_object(gem);
891
892	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
893						   no_wait ? 0 : 30 * HZ);
 
894	if (!lret)
895		ret = -EBUSY;
896	else if (lret > 0)
897		ret = 0;
898	else
899		ret = lret;
900
901	nouveau_bo_sync_for_cpu(nvbo);
902	drm_gem_object_put_unlocked(gem);
903
904	return ret;
905}
906
907int
908nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
909			   struct drm_file *file_priv)
910{
911	struct drm_nouveau_gem_cpu_fini *req = data;
912	struct drm_gem_object *gem;
913	struct nouveau_bo *nvbo;
914
915	gem = drm_gem_object_lookup(file_priv, req->handle);
916	if (!gem)
917		return -ENOENT;
918	nvbo = nouveau_gem_object(gem);
919
920	nouveau_bo_sync_for_device(nvbo);
921	drm_gem_object_put_unlocked(gem);
922	return 0;
923}
924
925int
926nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
927		       struct drm_file *file_priv)
928{
929	struct drm_nouveau_gem_info *req = data;
930	struct drm_gem_object *gem;
931	int ret;
932
933	gem = drm_gem_object_lookup(file_priv, req->handle);
934	if (!gem)
935		return -ENOENT;
936
937	ret = nouveau_gem_info(file_priv, gem, req);
938	drm_gem_object_put_unlocked(gem);
939	return ret;
940}
941