Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * Copyright 2007 Dave Airlied
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 */
  24/*
  25 * Authors: Dave Airlied <airlied@linux.ie>
  26 *	    Ben Skeggs   <darktama@iinet.net.au>
  27 *	    Jeremy Kolb  <jkolb@brandeis.edu>
  28 */
  29
  30#include <linux/dma-mapping.h>
  31#include <linux/swiotlb.h>
  32
  33#include "nouveau_drv.h"
  34#include "nouveau_dma.h"
  35#include "nouveau_fence.h"
  36
  37#include "nouveau_bo.h"
  38#include "nouveau_ttm.h"
  39#include "nouveau_gem.h"
  40#include "nouveau_mem.h"
  41#include "nouveau_vmm.h"
  42
  43#include <nvif/class.h>
  44#include <nvif/if500b.h>
  45#include <nvif/if900b.h>
  46
  47/*
  48 * NV10-NV40 tiling helpers
  49 */
  50
  51static void
  52nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
  53			   u32 addr, u32 size, u32 pitch, u32 flags)
  54{
  55	struct nouveau_drm *drm = nouveau_drm(dev);
  56	int i = reg - drm->tile.reg;
  57	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
 
  58	struct nvkm_fb_tile *tile = &fb->tile.region[i];
  59
  60	nouveau_fence_unref(&reg->fence);
  61
  62	if (tile->pitch)
  63		nvkm_fb_tile_fini(fb, i, tile);
  64
  65	if (pitch)
  66		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
  67
  68	nvkm_fb_tile_prog(fb, i, tile);
  69}
  70
  71static struct nouveau_drm_tile *
  72nv10_bo_get_tile_region(struct drm_device *dev, int i)
  73{
  74	struct nouveau_drm *drm = nouveau_drm(dev);
  75	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
  76
  77	spin_lock(&drm->tile.lock);
  78
  79	if (!tile->used &&
  80	    (!tile->fence || nouveau_fence_done(tile->fence)))
  81		tile->used = true;
  82	else
  83		tile = NULL;
  84
  85	spin_unlock(&drm->tile.lock);
  86	return tile;
  87}
  88
  89static void
  90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
  91			struct dma_fence *fence)
  92{
  93	struct nouveau_drm *drm = nouveau_drm(dev);
  94
  95	if (tile) {
  96		spin_lock(&drm->tile.lock);
  97		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
  98		tile->used = false;
  99		spin_unlock(&drm->tile.lock);
 100	}
 101}
 102
 103static struct nouveau_drm_tile *
 104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
 105		   u32 size, u32 pitch, u32 zeta)
 106{
 107	struct nouveau_drm *drm = nouveau_drm(dev);
 108	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
 109	struct nouveau_drm_tile *tile, *found = NULL;
 110	int i;
 111
 112	for (i = 0; i < fb->tile.regions; i++) {
 113		tile = nv10_bo_get_tile_region(dev, i);
 114
 115		if (pitch && !found) {
 116			found = tile;
 117			continue;
 118
 119		} else if (tile && fb->tile.region[i].pitch) {
 120			/* Kill an unused tile region. */
 121			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
 122		}
 123
 124		nv10_bo_put_tile_region(dev, tile, NULL);
 125	}
 126
 127	if (found)
 128		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
 
 129	return found;
 130}
 131
 132static void
 133nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 134{
 135	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 136	struct drm_device *dev = drm->dev;
 137	struct nouveau_bo *nvbo = nouveau_bo(bo);
 138
 
 
 139	WARN_ON(nvbo->pin_refcnt > 0);
 140	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
 141
 142	/*
 143	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
 144	 * initialized, so don't attempt to release it.
 145	 */
 146	if (bo->base.dev)
 147		drm_gem_object_release(&bo->base);
 148
 149	kfree(nvbo);
 150}
 151
 152static inline u64
 153roundup_64(u64 x, u32 y)
 154{
 155	x += y - 1;
 156	do_div(x, y);
 157	return x * y;
 158}
 159
 160static void
 161nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 162		       int *align, u64 *size)
 163{
 164	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 165	struct nvif_device *device = &drm->client.device;
 166
 167	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
 168		if (nvbo->mode) {
 169			if (device->info.chipset >= 0x40) {
 170				*align = 65536;
 171				*size = roundup_64(*size, 64 * nvbo->mode);
 172
 173			} else if (device->info.chipset >= 0x30) {
 174				*align = 32768;
 175				*size = roundup_64(*size, 64 * nvbo->mode);
 176
 177			} else if (device->info.chipset >= 0x20) {
 178				*align = 16384;
 179				*size = roundup_64(*size, 64 * nvbo->mode);
 180
 181			} else if (device->info.chipset >= 0x10) {
 182				*align = 16384;
 183				*size = roundup_64(*size, 32 * nvbo->mode);
 184			}
 185		}
 186	} else {
 187		*size = roundup_64(*size, (1 << nvbo->page));
 188		*align = max((1 <<  nvbo->page), *align);
 189	}
 190
 191	*size = roundup_64(*size, PAGE_SIZE);
 192}
 193
 194struct nouveau_bo *
 195nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
 196		 u32 tile_mode, u32 tile_flags)
 
 
 197{
 198	struct nouveau_drm *drm = cli->drm;
 199	struct nouveau_bo *nvbo;
 200	struct nvif_mmu *mmu = &cli->mmu;
 201	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
 202	int i, pi = -1;
 203
 204	if (!*size) {
 205		NV_WARN(drm, "skipped size %016llx\n", *size);
 206		return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 207	}
 208
 
 
 
 209	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 210	if (!nvbo)
 211		return ERR_PTR(-ENOMEM);
 212	INIT_LIST_HEAD(&nvbo->head);
 213	INIT_LIST_HEAD(&nvbo->entry);
 214	INIT_LIST_HEAD(&nvbo->vma_list);
 
 
 215	nvbo->bo.bdev = &drm->ttm.bdev;
 216
 217	/* This is confusing, and doesn't actually mean we want an uncached
 218	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
 219	 * into in nouveau_gem_new().
 220	 */
 221	if (flags & TTM_PL_FLAG_UNCACHED) {
 222		/* Determine if we can get a cache-coherent map, forcing
 223		 * uncached mapping if we can't.
 224		 */
 225		if (!nouveau_drm_use_coherent_gpu_mapping(drm))
 226			nvbo->force_coherent = true;
 227	}
 228
 229	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
 230		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
 231		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
 232			kfree(nvbo);
 233			return ERR_PTR(-EINVAL);
 234		}
 235
 236		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
 237	} else
 238	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 239		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
 240		nvbo->comp = (tile_flags & 0x00030000) >> 16;
 241		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
 242			kfree(nvbo);
 243			return ERR_PTR(-EINVAL);
 244		}
 245	} else {
 246		nvbo->zeta = (tile_flags & 0x00000007);
 247	}
 248	nvbo->mode = tile_mode;
 249	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
 250
 251	/* Determine the desirable target GPU page size for the buffer. */
 252	for (i = 0; i < vmm->page_nr; i++) {
 253		/* Because we cannot currently allow VMM maps to fail
 254		 * during buffer migration, we need to determine page
 255		 * size for the buffer up-front, and pre-allocate its
 256		 * page tables.
 257		 *
 258		 * Skip page sizes that can't support needed domains.
 259		 */
 260		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
 261		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
 262			continue;
 263		if ((flags & TTM_PL_FLAG_TT) &&
 264		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
 265			continue;
 266
 267		/* Select this page size if it's the first that supports
 268		 * the potential memory domains, or when it's compatible
 269		 * with the requested compression settings.
 270		 */
 271		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
 272			pi = i;
 273
 274		/* Stop once the buffer is larger than the current page size. */
 275		if (*size >= 1ULL << vmm->page[i].shift)
 276			break;
 277	}
 278
 279	if (WARN_ON(pi < 0))
 280		return ERR_PTR(-EINVAL);
 281
 282	/* Disable compression if suitable settings couldn't be found. */
 283	if (nvbo->comp && !vmm->page[pi].comp) {
 284		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
 285			nvbo->kind = mmu->kind[nvbo->kind];
 286		nvbo->comp = 0;
 287	}
 288	nvbo->page = vmm->page[pi].shift;
 289
 290	nouveau_bo_fixup_align(nvbo, flags, align, size);
 291
 292	return nvbo;
 293}
 294
 295int
 296nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
 297		struct sg_table *sg, struct dma_resv *robj)
 298{
 299	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
 300	size_t acc_size;
 301	int ret;
 302
 303	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
 304
 
 305	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
 306	nouveau_bo_placement_set(nvbo, flags, 0);
 307
 308	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
 309			  &nvbo->placement, align >> PAGE_SHIFT, false,
 310			  acc_size, sg, robj, nouveau_bo_del_ttm);
 
 
 
 
 311	if (ret) {
 312		/* ttm will call nouveau_bo_del_ttm if it fails.. */
 313		return ret;
 314	}
 315
 316	return 0;
 317}
 318
 319int
 320nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 321	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
 322	       struct sg_table *sg, struct dma_resv *robj,
 323	       struct nouveau_bo **pnvbo)
 324{
 325	struct nouveau_bo *nvbo;
 326	int ret;
 327
 328	nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
 329				tile_flags);
 330	if (IS_ERR(nvbo))
 331		return PTR_ERR(nvbo);
 332
 333	ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
 334	if (ret)
 335		return ret;
 336
 337	*pnvbo = nvbo;
 338	return 0;
 339}
 340
 341static void
 342set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
 343{
 344	*n = 0;
 345
 346	if (type & TTM_PL_FLAG_VRAM)
 347		pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
 348	if (type & TTM_PL_FLAG_TT)
 349		pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
 350	if (type & TTM_PL_FLAG_SYSTEM)
 351		pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
 352}
 353
 354static void
 355set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 356{
 357	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 358	u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
 359	unsigned i, fpfn, lpfn;
 360
 361	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
 362	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
 363	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 364		/*
 365		 * Make sure that the color and depth buffers are handled
 366		 * by independent memory controller units. Up to a 9x
 367		 * speed up when alpha-blending and depth-test are enabled
 368		 * at the same time.
 369		 */
 370		if (nvbo->zeta) {
 371			fpfn = vram_pages / 2;
 372			lpfn = ~0;
 373		} else {
 374			fpfn = 0;
 375			lpfn = vram_pages / 2;
 376		}
 377		for (i = 0; i < nvbo->placement.num_placement; ++i) {
 378			nvbo->placements[i].fpfn = fpfn;
 379			nvbo->placements[i].lpfn = lpfn;
 380		}
 381		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
 382			nvbo->busy_placements[i].fpfn = fpfn;
 383			nvbo->busy_placements[i].lpfn = lpfn;
 384		}
 385	}
 386}
 387
 388void
 389nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
 390{
 391	struct ttm_placement *pl = &nvbo->placement;
 392	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
 393						 TTM_PL_MASK_CACHING) |
 394			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
 395
 396	pl->placement = nvbo->placements;
 397	set_placement_list(nvbo->placements, &pl->num_placement,
 398			   type, flags);
 399
 400	pl->busy_placement = nvbo->busy_placements;
 401	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
 402			   type | busy, flags);
 403
 404	set_placement_range(nvbo, type);
 405}
 406
 407int
 408nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
 409{
 410	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 411	struct ttm_buffer_object *bo = &nvbo->bo;
 412	bool force = false, evict = false;
 413	int ret;
 414
 415	ret = ttm_bo_reserve(bo, false, false, NULL);
 416	if (ret)
 417		return ret;
 418
 419	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
 420	    memtype == TTM_PL_FLAG_VRAM && contig) {
 421		if (!nvbo->contig) {
 422			nvbo->contig = true;
 
 
 
 
 
 423			force = true;
 424			evict = true;
 425		}
 426	}
 427
 428	if (nvbo->pin_refcnt) {
 429		if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
 430			NV_ERROR(drm, "bo %p pinned elsewhere: "
 431				      "0x%08x vs 0x%08x\n", bo,
 432				 1 << bo->mem.mem_type, memtype);
 433			ret = -EBUSY;
 434		}
 435		nvbo->pin_refcnt++;
 436		goto out;
 437	}
 438
 439	if (evict) {
 440		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
 441		ret = nouveau_bo_validate(nvbo, false, false);
 442		if (ret)
 443			goto out;
 444	}
 445
 446	nvbo->pin_refcnt++;
 447	nouveau_bo_placement_set(nvbo, memtype, 0);
 448
 449	/* drop pin_refcnt temporarily, so we don't trip the assertion
 450	 * in nouveau_bo_move() that makes sure we're not trying to
 451	 * move a pinned buffer
 452	 */
 453	nvbo->pin_refcnt--;
 454	ret = nouveau_bo_validate(nvbo, false, false);
 455	if (ret)
 456		goto out;
 457	nvbo->pin_refcnt++;
 458
 459	switch (bo->mem.mem_type) {
 460	case TTM_PL_VRAM:
 461		drm->gem.vram_available -= bo->mem.size;
 462		break;
 463	case TTM_PL_TT:
 464		drm->gem.gart_available -= bo->mem.size;
 465		break;
 466	default:
 467		break;
 468	}
 469
 470out:
 471	if (force && ret)
 472		nvbo->contig = false;
 473	ttm_bo_unreserve(bo);
 474	return ret;
 475}
 476
 477int
 478nouveau_bo_unpin(struct nouveau_bo *nvbo)
 479{
 480	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 481	struct ttm_buffer_object *bo = &nvbo->bo;
 482	int ret, ref;
 483
 484	ret = ttm_bo_reserve(bo, false, false, NULL);
 485	if (ret)
 486		return ret;
 487
 488	ref = --nvbo->pin_refcnt;
 489	WARN_ON_ONCE(ref < 0);
 490	if (ref)
 491		goto out;
 492
 493	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 494
 495	ret = nouveau_bo_validate(nvbo, false, false);
 496	if (ret == 0) {
 497		switch (bo->mem.mem_type) {
 498		case TTM_PL_VRAM:
 499			drm->gem.vram_available += bo->mem.size;
 500			break;
 501		case TTM_PL_TT:
 502			drm->gem.gart_available += bo->mem.size;
 503			break;
 504		default:
 505			break;
 506		}
 507	}
 508
 509out:
 510	ttm_bo_unreserve(bo);
 511	return ret;
 512}
 513
 514int
 515nouveau_bo_map(struct nouveau_bo *nvbo)
 516{
 517	int ret;
 518
 519	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 520	if (ret)
 521		return ret;
 522
 523	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
 524
 525	ttm_bo_unreserve(&nvbo->bo);
 526	return ret;
 527}
 528
 529void
 530nouveau_bo_unmap(struct nouveau_bo *nvbo)
 531{
 532	if (!nvbo)
 533		return;
 534
 535	ttm_bo_kunmap(&nvbo->kmap);
 536}
 537
 538void
 539nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 540{
 541	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 
 542	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 543	int i;
 544
 545	if (!ttm_dma)
 546		return;
 547
 548	/* Don't waste time looping if the object is coherent */
 549	if (nvbo->force_coherent)
 550		return;
 551
 552	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
 553		dma_sync_single_for_device(drm->dev->dev,
 554					   ttm_dma->dma_address[i],
 555					   PAGE_SIZE, DMA_TO_DEVICE);
 556}
 557
 558void
 559nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 560{
 561	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 
 562	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 563	int i;
 564
 565	if (!ttm_dma)
 566		return;
 567
 568	/* Don't waste time looping if the object is coherent */
 569	if (nvbo->force_coherent)
 570		return;
 571
 572	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
 573		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
 574					PAGE_SIZE, DMA_FROM_DEVICE);
 575}
 576
 577int
 578nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
 579		    bool no_wait_gpu)
 580{
 581	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
 582	int ret;
 583
 584	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
 
 585	if (ret)
 586		return ret;
 587
 588	nouveau_bo_sync_for_device(nvbo);
 589
 590	return 0;
 591}
 592
 593void
 594nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
 595{
 596	bool is_iomem;
 597	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 598
 599	mem += index;
 600
 601	if (is_iomem)
 602		iowrite16_native(val, (void __force __iomem *)mem);
 603	else
 604		*mem = val;
 605}
 606
 607u32
 608nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
 609{
 610	bool is_iomem;
 611	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 612
 613	mem += index;
 614
 615	if (is_iomem)
 616		return ioread32_native((void __force __iomem *)mem);
 617	else
 618		return *mem;
 619}
 620
 621void
 622nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
 623{
 624	bool is_iomem;
 625	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 626
 627	mem += index;
 628
 629	if (is_iomem)
 630		iowrite32_native(val, (void __force __iomem *)mem);
 631	else
 632		*mem = val;
 633}
 634
 635static struct ttm_tt *
 636nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
 
 637{
 638#if IS_ENABLED(CONFIG_AGP)
 639	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 640
 641	if (drm->agp.bridge) {
 642		return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
 
 643	}
 644#endif
 645
 646	return nouveau_sgdma_create_ttm(bo, page_flags);
 647}
 648
 649static int
 650nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 651{
 652	/* We'll do this from user space. */
 653	return 0;
 654}
 655
 656static int
 657nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 658			 struct ttm_mem_type_manager *man)
 659{
 660	struct nouveau_drm *drm = nouveau_bdev(bdev);
 661	struct nvif_mmu *mmu = &drm->client.mmu;
 662
 663	switch (type) {
 664	case TTM_PL_SYSTEM:
 665		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 666		man->available_caching = TTM_PL_MASK_CACHING;
 667		man->default_caching = TTM_PL_FLAG_CACHED;
 668		break;
 669	case TTM_PL_VRAM:
 670		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 671			     TTM_MEMTYPE_FLAG_MAPPABLE;
 672		man->available_caching = TTM_PL_FLAG_UNCACHED |
 673					 TTM_PL_FLAG_WC;
 674		man->default_caching = TTM_PL_FLAG_WC;
 675
 676		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 677			/* Some BARs do not support being ioremapped WC */
 678			const u8 type = mmu->type[drm->ttm.type_vram].type;
 679			if (type & NVIF_MEM_UNCACHED) {
 680				man->available_caching = TTM_PL_FLAG_UNCACHED;
 681				man->default_caching = TTM_PL_FLAG_UNCACHED;
 682			}
 683
 684			man->func = &nouveau_vram_manager;
 685			man->io_reserve_fastpath = false;
 686			man->use_io_reserve_lru = true;
 687		} else {
 688			man->func = &ttm_bo_manager_func;
 689		}
 690		break;
 691	case TTM_PL_TT:
 692		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 693			man->func = &nouveau_gart_manager;
 694		else
 695		if (!drm->agp.bridge)
 696			man->func = &nv04_gart_manager;
 697		else
 698			man->func = &ttm_bo_manager_func;
 699
 700		if (drm->agp.bridge) {
 701			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 702			man->available_caching = TTM_PL_FLAG_UNCACHED |
 703				TTM_PL_FLAG_WC;
 704			man->default_caching = TTM_PL_FLAG_WC;
 705		} else {
 706			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
 707				     TTM_MEMTYPE_FLAG_CMA;
 708			man->available_caching = TTM_PL_MASK_CACHING;
 709			man->default_caching = TTM_PL_FLAG_CACHED;
 710		}
 711
 712		break;
 713	default:
 714		return -EINVAL;
 715	}
 716	return 0;
 717}
 718
 719static void
 720nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 721{
 722	struct nouveau_bo *nvbo = nouveau_bo(bo);
 723
 724	switch (bo->mem.mem_type) {
 725	case TTM_PL_VRAM:
 726		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
 727					 TTM_PL_FLAG_SYSTEM);
 728		break;
 729	default:
 730		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
 731		break;
 732	}
 733
 734	*pl = nvbo->placement;
 735}
 736
 737
 738static int
 739nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
 740{
 741	int ret = RING_SPACE(chan, 2);
 742	if (ret == 0) {
 743		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
 744		OUT_RING  (chan, handle & 0x0000ffff);
 745		FIRE_RING (chan);
 746	}
 747	return ret;
 748}
 749
 750static int
 751nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 752		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 753{
 754	struct nouveau_mem *mem = nouveau_mem(old_reg);
 755	int ret = RING_SPACE(chan, 10);
 756	if (ret == 0) {
 757		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
 758		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
 759		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
 760		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
 761		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 762		OUT_RING  (chan, PAGE_SIZE);
 763		OUT_RING  (chan, PAGE_SIZE);
 764		OUT_RING  (chan, PAGE_SIZE);
 765		OUT_RING  (chan, new_reg->num_pages);
 766		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
 767	}
 768	return ret;
 769}
 770
 771static int
 772nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
 773{
 774	int ret = RING_SPACE(chan, 2);
 775	if (ret == 0) {
 776		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
 777		OUT_RING  (chan, handle);
 778	}
 779	return ret;
 780}
 781
 782static int
 783nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 784		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 785{
 786	struct nouveau_mem *mem = nouveau_mem(old_reg);
 787	u64 src_offset = mem->vma[0].addr;
 788	u64 dst_offset = mem->vma[1].addr;
 789	u32 page_count = new_reg->num_pages;
 790	int ret;
 791
 792	page_count = new_reg->num_pages;
 793	while (page_count) {
 794		int line_count = (page_count > 8191) ? 8191 : page_count;
 795
 796		ret = RING_SPACE(chan, 11);
 797		if (ret)
 798			return ret;
 799
 800		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
 801		OUT_RING  (chan, upper_32_bits(src_offset));
 802		OUT_RING  (chan, lower_32_bits(src_offset));
 803		OUT_RING  (chan, upper_32_bits(dst_offset));
 804		OUT_RING  (chan, lower_32_bits(dst_offset));
 805		OUT_RING  (chan, PAGE_SIZE);
 806		OUT_RING  (chan, PAGE_SIZE);
 807		OUT_RING  (chan, PAGE_SIZE);
 808		OUT_RING  (chan, line_count);
 809		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
 810		OUT_RING  (chan, 0x00000110);
 811
 812		page_count -= line_count;
 813		src_offset += (PAGE_SIZE * line_count);
 814		dst_offset += (PAGE_SIZE * line_count);
 815	}
 816
 817	return 0;
 818}
 819
 820static int
 821nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 822		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 823{
 824	struct nouveau_mem *mem = nouveau_mem(old_reg);
 825	u64 src_offset = mem->vma[0].addr;
 826	u64 dst_offset = mem->vma[1].addr;
 827	u32 page_count = new_reg->num_pages;
 828	int ret;
 829
 830	page_count = new_reg->num_pages;
 831	while (page_count) {
 832		int line_count = (page_count > 2047) ? 2047 : page_count;
 833
 834		ret = RING_SPACE(chan, 12);
 835		if (ret)
 836			return ret;
 837
 838		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
 839		OUT_RING  (chan, upper_32_bits(dst_offset));
 840		OUT_RING  (chan, lower_32_bits(dst_offset));
 841		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
 842		OUT_RING  (chan, upper_32_bits(src_offset));
 843		OUT_RING  (chan, lower_32_bits(src_offset));
 844		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
 845		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
 846		OUT_RING  (chan, PAGE_SIZE); /* line_length */
 847		OUT_RING  (chan, line_count);
 848		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
 849		OUT_RING  (chan, 0x00100110);
 850
 851		page_count -= line_count;
 852		src_offset += (PAGE_SIZE * line_count);
 853		dst_offset += (PAGE_SIZE * line_count);
 854	}
 855
 856	return 0;
 857}
 858
 859static int
 860nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 861		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 862{
 863	struct nouveau_mem *mem = nouveau_mem(old_reg);
 864	u64 src_offset = mem->vma[0].addr;
 865	u64 dst_offset = mem->vma[1].addr;
 866	u32 page_count = new_reg->num_pages;
 867	int ret;
 868
 869	page_count = new_reg->num_pages;
 870	while (page_count) {
 871		int line_count = (page_count > 8191) ? 8191 : page_count;
 872
 873		ret = RING_SPACE(chan, 11);
 874		if (ret)
 875			return ret;
 876
 877		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
 878		OUT_RING  (chan, upper_32_bits(src_offset));
 879		OUT_RING  (chan, lower_32_bits(src_offset));
 880		OUT_RING  (chan, upper_32_bits(dst_offset));
 881		OUT_RING  (chan, lower_32_bits(dst_offset));
 882		OUT_RING  (chan, PAGE_SIZE);
 883		OUT_RING  (chan, PAGE_SIZE);
 884		OUT_RING  (chan, PAGE_SIZE);
 885		OUT_RING  (chan, line_count);
 886		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
 887		OUT_RING  (chan, 0x00000110);
 888
 889		page_count -= line_count;
 890		src_offset += (PAGE_SIZE * line_count);
 891		dst_offset += (PAGE_SIZE * line_count);
 892	}
 893
 894	return 0;
 895}
 896
 897static int
 898nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 899		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 900{
 901	struct nouveau_mem *mem = nouveau_mem(old_reg);
 902	int ret = RING_SPACE(chan, 7);
 903	if (ret == 0) {
 904		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
 905		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
 906		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
 907		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
 908		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 909		OUT_RING  (chan, 0x00000000 /* COPY */);
 910		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
 911	}
 912	return ret;
 913}
 914
 915static int
 916nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 917		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 918{
 919	struct nouveau_mem *mem = nouveau_mem(old_reg);
 920	int ret = RING_SPACE(chan, 7);
 921	if (ret == 0) {
 922		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
 923		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
 924		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
 925		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
 926		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
 927		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
 928		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
 929	}
 930	return ret;
 931}
 932
 933static int
 934nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
 935{
 936	int ret = RING_SPACE(chan, 6);
 937	if (ret == 0) {
 938		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
 939		OUT_RING  (chan, handle);
 940		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
 941		OUT_RING  (chan, chan->drm->ntfy.handle);
 942		OUT_RING  (chan, chan->vram.handle);
 943		OUT_RING  (chan, chan->vram.handle);
 944	}
 945
 946	return ret;
 947}
 948
 949static int
 950nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 951		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
 952{
 953	struct nouveau_mem *mem = nouveau_mem(old_reg);
 954	u64 length = (new_reg->num_pages << PAGE_SHIFT);
 955	u64 src_offset = mem->vma[0].addr;
 956	u64 dst_offset = mem->vma[1].addr;
 957	int src_tiled = !!mem->kind;
 958	int dst_tiled = !!nouveau_mem(new_reg)->kind;
 959	int ret;
 960
 961	while (length) {
 962		u32 amount, stride, height;
 963
 964		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
 965		if (ret)
 966			return ret;
 967
 968		amount  = min(length, (u64)(4 * 1024 * 1024));
 969		stride  = 16 * 4;
 970		height  = amount / stride;
 971
 972		if (src_tiled) {
 973			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
 974			OUT_RING  (chan, 0);
 975			OUT_RING  (chan, 0);
 976			OUT_RING  (chan, stride);
 977			OUT_RING  (chan, height);
 978			OUT_RING  (chan, 1);
 979			OUT_RING  (chan, 0);
 980			OUT_RING  (chan, 0);
 981		} else {
 982			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
 983			OUT_RING  (chan, 1);
 984		}
 985		if (dst_tiled) {
 986			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
 987			OUT_RING  (chan, 0);
 988			OUT_RING  (chan, 0);
 989			OUT_RING  (chan, stride);
 990			OUT_RING  (chan, height);
 991			OUT_RING  (chan, 1);
 992			OUT_RING  (chan, 0);
 993			OUT_RING  (chan, 0);
 994		} else {
 995			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
 996			OUT_RING  (chan, 1);
 997		}
 998
 999		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
1000		OUT_RING  (chan, upper_32_bits(src_offset));
1001		OUT_RING  (chan, upper_32_bits(dst_offset));
1002		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
1003		OUT_RING  (chan, lower_32_bits(src_offset));
1004		OUT_RING  (chan, lower_32_bits(dst_offset));
1005		OUT_RING  (chan, stride);
1006		OUT_RING  (chan, stride);
1007		OUT_RING  (chan, stride);
1008		OUT_RING  (chan, height);
1009		OUT_RING  (chan, 0x00000101);
1010		OUT_RING  (chan, 0x00000000);
1011		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1012		OUT_RING  (chan, 0);
1013
1014		length -= amount;
1015		src_offset += amount;
1016		dst_offset += amount;
1017	}
1018
1019	return 0;
1020}
1021
1022static int
1023nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
1024{
1025	int ret = RING_SPACE(chan, 4);
1026	if (ret == 0) {
1027		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
1028		OUT_RING  (chan, handle);
1029		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
1030		OUT_RING  (chan, chan->drm->ntfy.handle);
1031	}
1032
1033	return ret;
1034}
1035
1036static inline uint32_t
1037nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
1038		      struct nouveau_channel *chan, struct ttm_mem_reg *reg)
1039{
1040	if (reg->mem_type == TTM_PL_TT)
1041		return NvDmaTT;
1042	return chan->vram.handle;
1043}
1044
1045static int
1046nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
1047		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
1048{
1049	u32 src_offset = old_reg->start << PAGE_SHIFT;
1050	u32 dst_offset = new_reg->start << PAGE_SHIFT;
1051	u32 page_count = new_reg->num_pages;
1052	int ret;
1053
1054	ret = RING_SPACE(chan, 3);
1055	if (ret)
1056		return ret;
1057
1058	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
1059	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
1060	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
1061
1062	page_count = new_reg->num_pages;
1063	while (page_count) {
1064		int line_count = (page_count > 2047) ? 2047 : page_count;
1065
1066		ret = RING_SPACE(chan, 11);
1067		if (ret)
1068			return ret;
1069
1070		BEGIN_NV04(chan, NvSubCopy,
1071				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1072		OUT_RING  (chan, src_offset);
1073		OUT_RING  (chan, dst_offset);
1074		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
1075		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
1076		OUT_RING  (chan, PAGE_SIZE); /* line_length */
1077		OUT_RING  (chan, line_count);
1078		OUT_RING  (chan, 0x00000101);
1079		OUT_RING  (chan, 0x00000000);
1080		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1081		OUT_RING  (chan, 0);
1082
1083		page_count -= line_count;
1084		src_offset += (PAGE_SIZE * line_count);
1085		dst_offset += (PAGE_SIZE * line_count);
1086	}
1087
1088	return 0;
1089}
1090
1091static int
1092nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1093		     struct ttm_mem_reg *reg)
1094{
1095	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1096	struct nouveau_mem *new_mem = nouveau_mem(reg);
1097	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
1098	int ret;
1099
1100	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
1101			   old_mem->mem.size, &old_mem->vma[0]);
1102	if (ret)
1103		return ret;
1104
1105	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
1106			   new_mem->mem.size, &old_mem->vma[1]);
1107	if (ret)
1108		goto done;
1109
1110	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
1111	if (ret)
1112		goto done;
1113
1114	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
1115done:
1116	if (ret) {
1117		nvif_vmm_put(vmm, &old_mem->vma[1]);
1118		nvif_vmm_put(vmm, &old_mem->vma[0]);
1119	}
 
 
 
1120	return 0;
1121}
1122
1123static int
1124nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1125		     bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1126{
1127	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1128	struct nouveau_channel *chan = drm->ttm.chan;
1129	struct nouveau_cli *cli = (void *)chan->user.client;
1130	struct nouveau_fence *fence;
1131	int ret;
1132
1133	/* create temporary vmas for the transfer and attach them to the
1134	 * old nvkm_mem node, these will get cleaned up after ttm has
1135	 * destroyed the ttm_mem_reg
1136	 */
1137	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1138		ret = nouveau_bo_move_prep(drm, bo, new_reg);
1139		if (ret)
1140			return ret;
1141	}
1142
1143	mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1144	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1145	if (ret == 0) {
1146		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
1147		if (ret == 0) {
1148			ret = nouveau_fence_new(chan, false, &fence);
1149			if (ret == 0) {
1150				ret = ttm_bo_move_accel_cleanup(bo,
1151								&fence->base,
1152								evict,
1153								new_reg);
1154				nouveau_fence_unref(&fence);
1155			}
1156		}
1157	}
1158	mutex_unlock(&cli->mutex);
1159	return ret;
1160}
1161
1162void
1163nouveau_bo_move_init(struct nouveau_drm *drm)
1164{
1165	static const struct {
1166		const char *name;
1167		int engine;
1168		s32 oclass;
1169		int (*exec)(struct nouveau_channel *,
1170			    struct ttm_buffer_object *,
1171			    struct ttm_mem_reg *, struct ttm_mem_reg *);
1172		int (*init)(struct nouveau_channel *, u32 handle);
1173	} _methods[] = {
1174		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
1175		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
1176		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
1177		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
1178		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1179		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1180		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1181		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1182		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1183		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1184		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1185		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1186		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1187		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1188		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1189		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1190		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1191		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1192		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1193		{},
1194		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1195	}, *mthd = _methods;
1196	const char *name = "CPU";
1197	int ret;
1198
1199	do {
1200		struct nouveau_channel *chan;
1201
1202		if (mthd->engine)
1203			chan = drm->cechan;
1204		else
1205			chan = drm->channel;
1206		if (chan == NULL)
1207			continue;
1208
1209		ret = nvif_object_init(&chan->user,
1210				       mthd->oclass | (mthd->engine << 16),
1211				       mthd->oclass, NULL, 0,
1212				       &drm->ttm.copy);
1213		if (ret == 0) {
1214			ret = mthd->init(chan, drm->ttm.copy.handle);
1215			if (ret) {
1216				nvif_object_fini(&drm->ttm.copy);
1217				continue;
1218			}
1219
1220			drm->ttm.move = mthd->exec;
1221			drm->ttm.chan = chan;
1222			name = mthd->name;
1223			break;
1224		}
1225	} while ((++mthd)->exec);
1226
1227	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1228}
1229
1230static int
1231nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1232		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1233{
1234	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1235	struct ttm_place placement_memtype = {
1236		.fpfn = 0,
1237		.lpfn = 0,
1238		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1239	};
1240	struct ttm_placement placement;
1241	struct ttm_mem_reg tmp_reg;
1242	int ret;
1243
1244	placement.num_placement = placement.num_busy_placement = 1;
1245	placement.placement = placement.busy_placement = &placement_memtype;
1246
1247	tmp_reg = *new_reg;
1248	tmp_reg.mm_node = NULL;
1249	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1250	if (ret)
1251		return ret;
1252
1253	ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
1254	if (ret)
1255		goto out;
1256
1257	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
1258	if (ret)
1259		goto out;
1260
1261	ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
1262out:
1263	ttm_bo_mem_put(bo, &tmp_reg);
1264	return ret;
1265}
1266
1267static int
1268nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1269		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1270{
1271	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1272	struct ttm_place placement_memtype = {
1273		.fpfn = 0,
1274		.lpfn = 0,
1275		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1276	};
1277	struct ttm_placement placement;
1278	struct ttm_mem_reg tmp_reg;
1279	int ret;
1280
1281	placement.num_placement = placement.num_busy_placement = 1;
1282	placement.placement = placement.busy_placement = &placement_memtype;
1283
1284	tmp_reg = *new_reg;
1285	tmp_reg.mm_node = NULL;
1286	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1287	if (ret)
1288		return ret;
1289
1290	ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
1291	if (ret)
1292		goto out;
1293
1294	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
1295	if (ret)
1296		goto out;
1297
1298out:
1299	ttm_bo_mem_put(bo, &tmp_reg);
1300	return ret;
1301}
1302
1303static void
1304nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1305		     struct ttm_mem_reg *new_reg)
1306{
1307	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
1308	struct nouveau_bo *nvbo = nouveau_bo(bo);
1309	struct nouveau_vma *vma;
1310
1311	/* ttm can now (stupidly) pass the driver bos it didn't create... */
1312	if (bo->destroy != nouveau_bo_del_ttm)
1313		return;
1314
1315	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
1316	    mem->mem.page == nvbo->page) {
1317		list_for_each_entry(vma, &nvbo->vma_list, head) {
1318			nouveau_vma_map(vma, mem);
1319		}
1320	} else {
1321		list_for_each_entry(vma, &nvbo->vma_list, head) {
1322			WARN_ON(ttm_bo_wait(bo, false, false));
1323			nouveau_vma_unmap(vma);
1324		}
1325	}
1326}
1327
1328static int
1329nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1330		   struct nouveau_drm_tile **new_tile)
1331{
1332	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1333	struct drm_device *dev = drm->dev;
1334	struct nouveau_bo *nvbo = nouveau_bo(bo);
1335	u64 offset = new_reg->start << PAGE_SHIFT;
1336
1337	*new_tile = NULL;
1338	if (new_reg->mem_type != TTM_PL_VRAM)
1339		return 0;
1340
1341	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1342		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1343					       nvbo->mode, nvbo->zeta);
 
1344	}
1345
1346	return 0;
1347}
1348
1349static void
1350nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1351		      struct nouveau_drm_tile *new_tile,
1352		      struct nouveau_drm_tile **old_tile)
1353{
1354	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1355	struct drm_device *dev = drm->dev;
1356	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
1357
1358	nv10_bo_put_tile_region(dev, *old_tile, fence);
1359	*old_tile = new_tile;
1360}
1361
1362static int
1363nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
1364		struct ttm_operation_ctx *ctx,
1365		struct ttm_mem_reg *new_reg)
1366{
1367	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1368	struct nouveau_bo *nvbo = nouveau_bo(bo);
1369	struct ttm_mem_reg *old_reg = &bo->mem;
1370	struct nouveau_drm_tile *new_tile = NULL;
1371	int ret = 0;
1372
1373	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1374	if (ret)
1375		return ret;
1376
1377	if (nvbo->pin_refcnt)
1378		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1379
1380	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1381		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1382		if (ret)
1383			return ret;
1384	}
1385
1386	/* Fake bo copy. */
1387	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1388		BUG_ON(bo->mem.mm_node != NULL);
1389		bo->mem = *new_reg;
1390		new_reg->mm_node = NULL;
1391		goto out;
1392	}
1393
1394	/* Hardware assisted copy. */
1395	if (drm->ttm.move) {
1396		if (new_reg->mem_type == TTM_PL_SYSTEM)
1397			ret = nouveau_bo_move_flipd(bo, evict,
1398						    ctx->interruptible,
1399						    ctx->no_wait_gpu, new_reg);
1400		else if (old_reg->mem_type == TTM_PL_SYSTEM)
1401			ret = nouveau_bo_move_flips(bo, evict,
1402						    ctx->interruptible,
1403						    ctx->no_wait_gpu, new_reg);
1404		else
1405			ret = nouveau_bo_move_m2mf(bo, evict,
1406						   ctx->interruptible,
1407						   ctx->no_wait_gpu, new_reg);
1408		if (!ret)
1409			goto out;
1410	}
1411
1412	/* Fallback to software copy. */
1413	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1414	if (ret == 0)
1415		ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1416
1417out:
1418	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1419		if (ret)
1420			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1421		else
1422			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1423	}
1424
1425	return ret;
1426}
1427
1428static int
1429nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1430{
1431	struct nouveau_bo *nvbo = nouveau_bo(bo);
1432
1433	return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1434					  filp->private_data);
1435}
1436
1437static int
1438nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1439{
1440	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1441	struct nouveau_drm *drm = nouveau_bdev(bdev);
1442	struct nvkm_device *device = nvxx_device(&drm->client.device);
1443	struct nouveau_mem *mem = nouveau_mem(reg);
 
1444
1445	reg->bus.addr = NULL;
1446	reg->bus.offset = 0;
1447	reg->bus.size = reg->num_pages << PAGE_SHIFT;
1448	reg->bus.base = 0;
1449	reg->bus.is_iomem = false;
1450	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1451		return -EINVAL;
1452	switch (reg->mem_type) {
1453	case TTM_PL_SYSTEM:
1454		/* System memory */
1455		return 0;
1456	case TTM_PL_TT:
1457#if IS_ENABLED(CONFIG_AGP)
1458		if (drm->agp.bridge) {
1459			reg->bus.offset = reg->start << PAGE_SHIFT;
1460			reg->bus.base = drm->agp.base;
1461			reg->bus.is_iomem = !drm->agp.cma;
1462		}
1463#endif
1464		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
1465			/* untiled */
1466			break;
1467		/* fall through - tiled memory */
1468	case TTM_PL_VRAM:
1469		reg->bus.offset = reg->start << PAGE_SHIFT;
1470		reg->bus.base = device->func->resource_addr(device, 1);
1471		reg->bus.is_iomem = true;
1472		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1473			union {
1474				struct nv50_mem_map_v0 nv50;
1475				struct gf100_mem_map_v0 gf100;
1476			} args;
1477			u64 handle, length;
1478			u32 argc = 0;
1479			int ret;
1480
1481			switch (mem->mem.object.oclass) {
1482			case NVIF_CLASS_MEM_NV50:
1483				args.nv50.version = 0;
1484				args.nv50.ro = 0;
1485				args.nv50.kind = mem->kind;
1486				args.nv50.comp = mem->comp;
1487				argc = sizeof(args.nv50);
1488				break;
1489			case NVIF_CLASS_MEM_GF100:
1490				args.gf100.version = 0;
1491				args.gf100.ro = 0;
1492				args.gf100.kind = mem->kind;
1493				argc = sizeof(args.gf100);
1494				break;
1495			default:
1496				WARN_ON(1);
1497				break;
1498			}
1499
1500			ret = nvif_object_map_handle(&mem->mem.object,
1501						     &args, argc,
1502						     &handle, &length);
1503			if (ret != 1)
1504				return ret ? ret : -EINVAL;
1505
1506			reg->bus.base = 0;
1507			reg->bus.offset = handle;
1508		}
1509		break;
1510	default:
1511		return -EINVAL;
1512	}
1513	return 0;
1514}
1515
1516static void
1517nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1518{
1519	struct nouveau_drm *drm = nouveau_bdev(bdev);
1520	struct nouveau_mem *mem = nouveau_mem(reg);
1521
1522	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1523		switch (reg->mem_type) {
1524		case TTM_PL_TT:
1525			if (mem->kind)
1526				nvif_object_unmap_handle(&mem->mem.object);
1527			break;
1528		case TTM_PL_VRAM:
1529			nvif_object_unmap_handle(&mem->mem.object);
1530			break;
1531		default:
1532			break;
1533		}
1534	}
1535}
1536
1537static int
1538nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1539{
1540	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1541	struct nouveau_bo *nvbo = nouveau_bo(bo);
1542	struct nvkm_device *device = nvxx_device(&drm->client.device);
1543	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1544	int i, ret;
1545
1546	/* as long as the bo isn't in vram, and isn't tiled, we've got
1547	 * nothing to do here.
1548	 */
1549	if (bo->mem.mem_type != TTM_PL_VRAM) {
1550		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1551		    !nvbo->kind)
1552			return 0;
1553
1554		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1555			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1556
1557			ret = nouveau_bo_validate(nvbo, false, false);
1558			if (ret)
1559				return ret;
1560		}
1561		return 0;
1562	}
1563
1564	/* make sure bo is in mappable vram */
1565	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1566	    bo->mem.start + bo->mem.num_pages < mappable)
1567		return 0;
1568
1569	for (i = 0; i < nvbo->placement.num_placement; ++i) {
1570		nvbo->placements[i].fpfn = 0;
1571		nvbo->placements[i].lpfn = mappable;
1572	}
1573
1574	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1575		nvbo->busy_placements[i].fpfn = 0;
1576		nvbo->busy_placements[i].lpfn = mappable;
1577	}
1578
1579	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1580	return nouveau_bo_validate(nvbo, false, false);
1581}
1582
1583static int
1584nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1585{
1586	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1587	struct nouveau_drm *drm;
1588	struct device *dev;
 
 
1589	unsigned i;
1590	int r;
1591	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1592
1593	if (ttm->state != tt_unpopulated)
1594		return 0;
1595
1596	if (slave && ttm->sg) {
1597		/* make userspace faulting work */
1598		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1599						 ttm_dma->dma_address, ttm->num_pages);
1600		ttm->state = tt_unbound;
1601		return 0;
1602	}
1603
1604	drm = nouveau_bdev(ttm->bdev);
1605	dev = drm->dev->dev;
 
 
1606
1607#if IS_ENABLED(CONFIG_AGP)
1608	if (drm->agp.bridge) {
1609		return ttm_agp_tt_populate(ttm, ctx);
1610	}
1611#endif
1612
1613#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1614	if (swiotlb_nr_tbl()) {
1615		return ttm_dma_populate((void *)ttm, dev, ctx);
1616	}
1617#endif
1618
1619	r = ttm_pool_populate(ttm, ctx);
1620	if (r) {
1621		return r;
1622	}
1623
1624	for (i = 0; i < ttm->num_pages; i++) {
1625		dma_addr_t addr;
1626
1627		addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
1628				    DMA_BIDIRECTIONAL);
1629
1630		if (dma_mapping_error(dev, addr)) {
1631			while (i--) {
1632				dma_unmap_page(dev, ttm_dma->dma_address[i],
1633					       PAGE_SIZE, DMA_BIDIRECTIONAL);
1634				ttm_dma->dma_address[i] = 0;
1635			}
1636			ttm_pool_unpopulate(ttm);
1637			return -EFAULT;
1638		}
1639
1640		ttm_dma->dma_address[i] = addr;
1641	}
1642	return 0;
1643}
1644
1645static void
1646nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1647{
1648	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1649	struct nouveau_drm *drm;
1650	struct device *dev;
 
 
1651	unsigned i;
1652	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1653
1654	if (slave)
1655		return;
1656
1657	drm = nouveau_bdev(ttm->bdev);
1658	dev = drm->dev->dev;
 
 
1659
1660#if IS_ENABLED(CONFIG_AGP)
1661	if (drm->agp.bridge) {
1662		ttm_agp_tt_unpopulate(ttm);
1663		return;
1664	}
1665#endif
1666
1667#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1668	if (swiotlb_nr_tbl()) {
1669		ttm_dma_unpopulate((void *)ttm, dev);
1670		return;
1671	}
1672#endif
1673
1674	for (i = 0; i < ttm->num_pages; i++) {
1675		if (ttm_dma->dma_address[i]) {
1676			dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
1677				       DMA_BIDIRECTIONAL);
1678		}
1679	}
1680
1681	ttm_pool_unpopulate(ttm);
1682}
1683
1684void
1685nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1686{
1687	struct dma_resv *resv = nvbo->bo.base.resv;
1688
1689	if (exclusive)
1690		dma_resv_add_excl_fence(resv, &fence->base);
1691	else if (fence)
1692		dma_resv_add_shared_fence(resv, &fence->base);
1693}
1694
1695struct ttm_bo_driver nouveau_bo_driver = {
1696	.ttm_tt_create = &nouveau_ttm_tt_create,
1697	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1698	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1699	.invalidate_caches = nouveau_bo_invalidate_caches,
1700	.init_mem_type = nouveau_bo_init_mem_type,
1701	.eviction_valuable = ttm_bo_eviction_valuable,
1702	.evict_flags = nouveau_bo_evict_flags,
1703	.move_notify = nouveau_bo_move_ntfy,
1704	.move = nouveau_bo_move,
1705	.verify_access = nouveau_bo_verify_access,
1706	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1707	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1708	.io_mem_free = &nouveau_ttm_io_mem_free,
 
 
1709};
v4.10.11
   1/*
   2 * Copyright 2007 Dave Airlied
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 */
  24/*
  25 * Authors: Dave Airlied <airlied@linux.ie>
  26 *	    Ben Skeggs   <darktama@iinet.net.au>
  27 *	    Jeremy Kolb  <jkolb@brandeis.edu>
  28 */
  29
  30#include <linux/dma-mapping.h>
  31#include <linux/swiotlb.h>
  32
  33#include "nouveau_drv.h"
  34#include "nouveau_dma.h"
  35#include "nouveau_fence.h"
  36
  37#include "nouveau_bo.h"
  38#include "nouveau_ttm.h"
  39#include "nouveau_gem.h"
 
 
 
 
 
 
  40
  41/*
  42 * NV10-NV40 tiling helpers
  43 */
  44
  45static void
  46nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
  47			   u32 addr, u32 size, u32 pitch, u32 flags)
  48{
  49	struct nouveau_drm *drm = nouveau_drm(dev);
  50	int i = reg - drm->tile.reg;
  51	struct nvkm_device *device = nvxx_device(&drm->device);
  52	struct nvkm_fb *fb = device->fb;
  53	struct nvkm_fb_tile *tile = &fb->tile.region[i];
  54
  55	nouveau_fence_unref(&reg->fence);
  56
  57	if (tile->pitch)
  58		nvkm_fb_tile_fini(fb, i, tile);
  59
  60	if (pitch)
  61		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
  62
  63	nvkm_fb_tile_prog(fb, i, tile);
  64}
  65
  66static struct nouveau_drm_tile *
  67nv10_bo_get_tile_region(struct drm_device *dev, int i)
  68{
  69	struct nouveau_drm *drm = nouveau_drm(dev);
  70	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
  71
  72	spin_lock(&drm->tile.lock);
  73
  74	if (!tile->used &&
  75	    (!tile->fence || nouveau_fence_done(tile->fence)))
  76		tile->used = true;
  77	else
  78		tile = NULL;
  79
  80	spin_unlock(&drm->tile.lock);
  81	return tile;
  82}
  83
  84static void
  85nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
  86			struct dma_fence *fence)
  87{
  88	struct nouveau_drm *drm = nouveau_drm(dev);
  89
  90	if (tile) {
  91		spin_lock(&drm->tile.lock);
  92		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
  93		tile->used = false;
  94		spin_unlock(&drm->tile.lock);
  95	}
  96}
  97
  98static struct nouveau_drm_tile *
  99nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
 100		   u32 size, u32 pitch, u32 flags)
 101{
 102	struct nouveau_drm *drm = nouveau_drm(dev);
 103	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 104	struct nouveau_drm_tile *tile, *found = NULL;
 105	int i;
 106
 107	for (i = 0; i < fb->tile.regions; i++) {
 108		tile = nv10_bo_get_tile_region(dev, i);
 109
 110		if (pitch && !found) {
 111			found = tile;
 112			continue;
 113
 114		} else if (tile && fb->tile.region[i].pitch) {
 115			/* Kill an unused tile region. */
 116			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
 117		}
 118
 119		nv10_bo_put_tile_region(dev, tile, NULL);
 120	}
 121
 122	if (found)
 123		nv10_bo_update_tile_region(dev, found, addr, size,
 124					    pitch, flags);
 125	return found;
 126}
 127
 128static void
 129nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 130{
 131	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 132	struct drm_device *dev = drm->dev;
 133	struct nouveau_bo *nvbo = nouveau_bo(bo);
 134
 135	if (unlikely(nvbo->gem.filp))
 136		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 137	WARN_ON(nvbo->pin_refcnt > 0);
 138	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
 
 
 
 
 
 
 
 
 139	kfree(nvbo);
 140}
 141
 
 
 
 
 
 
 
 
 142static void
 143nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 144		       int *align, int *size)
 145{
 146	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 147	struct nvif_device *device = &drm->device;
 148
 149	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
 150		if (nvbo->tile_mode) {
 151			if (device->info.chipset >= 0x40) {
 152				*align = 65536;
 153				*size = roundup(*size, 64 * nvbo->tile_mode);
 154
 155			} else if (device->info.chipset >= 0x30) {
 156				*align = 32768;
 157				*size = roundup(*size, 64 * nvbo->tile_mode);
 158
 159			} else if (device->info.chipset >= 0x20) {
 160				*align = 16384;
 161				*size = roundup(*size, 64 * nvbo->tile_mode);
 162
 163			} else if (device->info.chipset >= 0x10) {
 164				*align = 16384;
 165				*size = roundup(*size, 32 * nvbo->tile_mode);
 166			}
 167		}
 168	} else {
 169		*size = roundup(*size, (1 << nvbo->page_shift));
 170		*align = max((1 <<  nvbo->page_shift), *align);
 171	}
 172
 173	*size = roundup(*size, PAGE_SIZE);
 174}
 175
 176int
 177nouveau_bo_new(struct drm_device *dev, int size, int align,
 178	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
 179	       struct sg_table *sg, struct reservation_object *robj,
 180	       struct nouveau_bo **pnvbo)
 181{
 182	struct nouveau_drm *drm = nouveau_drm(dev);
 183	struct nouveau_bo *nvbo;
 184	size_t acc_size;
 185	int ret;
 186	int type = ttm_bo_type_device;
 187	int lpg_shift = 12;
 188	int max_size;
 189
 190	if (drm->client.vm)
 191		lpg_shift = drm->client.vm->mmu->lpg_shift;
 192	max_size = INT_MAX & ~((1 << lpg_shift) - 1);
 193
 194	if (size <= 0 || size > max_size) {
 195		NV_WARN(drm, "skipped size %x\n", (u32)size);
 196		return -EINVAL;
 197	}
 198
 199	if (sg)
 200		type = ttm_bo_type_sg;
 201
 202	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 203	if (!nvbo)
 204		return -ENOMEM;
 205	INIT_LIST_HEAD(&nvbo->head);
 206	INIT_LIST_HEAD(&nvbo->entry);
 207	INIT_LIST_HEAD(&nvbo->vma_list);
 208	nvbo->tile_mode = tile_mode;
 209	nvbo->tile_flags = tile_flags;
 210	nvbo->bo.bdev = &drm->ttm.bdev;
 211
 212	if (!nvxx_device(&drm->device)->func->cpu_coherent)
 213		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 214
 215	nvbo->page_shift = 12;
 216	if (drm->client.vm) {
 217		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
 218			nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
 
 219	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220
 221	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
 222	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
 223	nouveau_bo_placement_set(nvbo, flags, 0);
 224
 225	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
 226				       sizeof(struct nouveau_bo));
 227
 228	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
 229			  type, &nvbo->placement,
 230			  align >> PAGE_SHIFT, false, NULL, acc_size, sg,
 231			  robj, nouveau_bo_del_ttm);
 232	if (ret) {
 233		/* ttm will call nouveau_bo_del_ttm if it fails.. */
 234		return ret;
 235	}
 236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237	*pnvbo = nvbo;
 238	return 0;
 239}
 240
 241static void
 242set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
 243{
 244	*n = 0;
 245
 246	if (type & TTM_PL_FLAG_VRAM)
 247		pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
 248	if (type & TTM_PL_FLAG_TT)
 249		pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
 250	if (type & TTM_PL_FLAG_SYSTEM)
 251		pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
 252}
 253
 254static void
 255set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 256{
 257	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 258	u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
 259	unsigned i, fpfn, lpfn;
 260
 261	if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
 262	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
 263	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 264		/*
 265		 * Make sure that the color and depth buffers are handled
 266		 * by independent memory controller units. Up to a 9x
 267		 * speed up when alpha-blending and depth-test are enabled
 268		 * at the same time.
 269		 */
 270		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
 271			fpfn = vram_pages / 2;
 272			lpfn = ~0;
 273		} else {
 274			fpfn = 0;
 275			lpfn = vram_pages / 2;
 276		}
 277		for (i = 0; i < nvbo->placement.num_placement; ++i) {
 278			nvbo->placements[i].fpfn = fpfn;
 279			nvbo->placements[i].lpfn = lpfn;
 280		}
 281		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
 282			nvbo->busy_placements[i].fpfn = fpfn;
 283			nvbo->busy_placements[i].lpfn = lpfn;
 284		}
 285	}
 286}
 287
 288void
 289nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
 290{
 291	struct ttm_placement *pl = &nvbo->placement;
 292	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
 293						 TTM_PL_MASK_CACHING) |
 294			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
 295
 296	pl->placement = nvbo->placements;
 297	set_placement_list(nvbo->placements, &pl->num_placement,
 298			   type, flags);
 299
 300	pl->busy_placement = nvbo->busy_placements;
 301	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
 302			   type | busy, flags);
 303
 304	set_placement_range(nvbo, type);
 305}
 306
 307int
 308nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
 309{
 310	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 311	struct ttm_buffer_object *bo = &nvbo->bo;
 312	bool force = false, evict = false;
 313	int ret;
 314
 315	ret = ttm_bo_reserve(bo, false, false, NULL);
 316	if (ret)
 317		return ret;
 318
 319	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
 320	    memtype == TTM_PL_FLAG_VRAM && contig) {
 321		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
 322			if (bo->mem.mem_type == TTM_PL_VRAM) {
 323				struct nvkm_mem *mem = bo->mem.mm_node;
 324				if (!list_is_singular(&mem->regions))
 325					evict = true;
 326			}
 327			nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
 328			force = true;
 
 329		}
 330	}
 331
 332	if (nvbo->pin_refcnt) {
 333		if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
 334			NV_ERROR(drm, "bo %p pinned elsewhere: "
 335				      "0x%08x vs 0x%08x\n", bo,
 336				 1 << bo->mem.mem_type, memtype);
 337			ret = -EBUSY;
 338		}
 339		nvbo->pin_refcnt++;
 340		goto out;
 341	}
 342
 343	if (evict) {
 344		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
 345		ret = nouveau_bo_validate(nvbo, false, false);
 346		if (ret)
 347			goto out;
 348	}
 349
 350	nvbo->pin_refcnt++;
 351	nouveau_bo_placement_set(nvbo, memtype, 0);
 352
 353	/* drop pin_refcnt temporarily, so we don't trip the assertion
 354	 * in nouveau_bo_move() that makes sure we're not trying to
 355	 * move a pinned buffer
 356	 */
 357	nvbo->pin_refcnt--;
 358	ret = nouveau_bo_validate(nvbo, false, false);
 359	if (ret)
 360		goto out;
 361	nvbo->pin_refcnt++;
 362
 363	switch (bo->mem.mem_type) {
 364	case TTM_PL_VRAM:
 365		drm->gem.vram_available -= bo->mem.size;
 366		break;
 367	case TTM_PL_TT:
 368		drm->gem.gart_available -= bo->mem.size;
 369		break;
 370	default:
 371		break;
 372	}
 373
 374out:
 375	if (force && ret)
 376		nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
 377	ttm_bo_unreserve(bo);
 378	return ret;
 379}
 380
 381int
 382nouveau_bo_unpin(struct nouveau_bo *nvbo)
 383{
 384	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 385	struct ttm_buffer_object *bo = &nvbo->bo;
 386	int ret, ref;
 387
 388	ret = ttm_bo_reserve(bo, false, false, NULL);
 389	if (ret)
 390		return ret;
 391
 392	ref = --nvbo->pin_refcnt;
 393	WARN_ON_ONCE(ref < 0);
 394	if (ref)
 395		goto out;
 396
 397	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 398
 399	ret = nouveau_bo_validate(nvbo, false, false);
 400	if (ret == 0) {
 401		switch (bo->mem.mem_type) {
 402		case TTM_PL_VRAM:
 403			drm->gem.vram_available += bo->mem.size;
 404			break;
 405		case TTM_PL_TT:
 406			drm->gem.gart_available += bo->mem.size;
 407			break;
 408		default:
 409			break;
 410		}
 411	}
 412
 413out:
 414	ttm_bo_unreserve(bo);
 415	return ret;
 416}
 417
 418int
 419nouveau_bo_map(struct nouveau_bo *nvbo)
 420{
 421	int ret;
 422
 423	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
 424	if (ret)
 425		return ret;
 426
 427	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
 428
 429	ttm_bo_unreserve(&nvbo->bo);
 430	return ret;
 431}
 432
 433void
 434nouveau_bo_unmap(struct nouveau_bo *nvbo)
 435{
 436	if (!nvbo)
 437		return;
 438
 439	ttm_bo_kunmap(&nvbo->kmap);
 440}
 441
 442void
 443nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 444{
 445	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 446	struct nvkm_device *device = nvxx_device(&drm->device);
 447	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 448	int i;
 449
 450	if (!ttm_dma)
 451		return;
 452
 453	/* Don't waste time looping if the object is coherent */
 454	if (nvbo->force_coherent)
 455		return;
 456
 457	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
 458		dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
 
 459					   PAGE_SIZE, DMA_TO_DEVICE);
 460}
 461
 462void
 463nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 464{
 465	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
 466	struct nvkm_device *device = nvxx_device(&drm->device);
 467	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
 468	int i;
 469
 470	if (!ttm_dma)
 471		return;
 472
 473	/* Don't waste time looping if the object is coherent */
 474	if (nvbo->force_coherent)
 475		return;
 476
 477	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
 478		dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
 479					PAGE_SIZE, DMA_FROM_DEVICE);
 480}
 481
 482int
 483nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
 484		    bool no_wait_gpu)
 485{
 
 486	int ret;
 487
 488	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
 489			      interruptible, no_wait_gpu);
 490	if (ret)
 491		return ret;
 492
 493	nouveau_bo_sync_for_device(nvbo);
 494
 495	return 0;
 496}
 497
 498void
 499nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
 500{
 501	bool is_iomem;
 502	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 503
 504	mem += index;
 505
 506	if (is_iomem)
 507		iowrite16_native(val, (void __force __iomem *)mem);
 508	else
 509		*mem = val;
 510}
 511
 512u32
 513nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
 514{
 515	bool is_iomem;
 516	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 517
 518	mem += index;
 519
 520	if (is_iomem)
 521		return ioread32_native((void __force __iomem *)mem);
 522	else
 523		return *mem;
 524}
 525
 526void
 527nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
 528{
 529	bool is_iomem;
 530	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 531
 532	mem += index;
 533
 534	if (is_iomem)
 535		iowrite32_native(val, (void __force __iomem *)mem);
 536	else
 537		*mem = val;
 538}
 539
 540static struct ttm_tt *
 541nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
 542		      uint32_t page_flags, struct page *dummy_read)
 543{
 544#if IS_ENABLED(CONFIG_AGP)
 545	struct nouveau_drm *drm = nouveau_bdev(bdev);
 546
 547	if (drm->agp.bridge) {
 548		return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
 549					 page_flags, dummy_read);
 550	}
 551#endif
 552
 553	return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
 554}
 555
 556static int
 557nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 558{
 559	/* We'll do this from user space. */
 560	return 0;
 561}
 562
 563static int
 564nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 565			 struct ttm_mem_type_manager *man)
 566{
 567	struct nouveau_drm *drm = nouveau_bdev(bdev);
 
 568
 569	switch (type) {
 570	case TTM_PL_SYSTEM:
 571		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 572		man->available_caching = TTM_PL_MASK_CACHING;
 573		man->default_caching = TTM_PL_FLAG_CACHED;
 574		break;
 575	case TTM_PL_VRAM:
 576		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 577			     TTM_MEMTYPE_FLAG_MAPPABLE;
 578		man->available_caching = TTM_PL_FLAG_UNCACHED |
 579					 TTM_PL_FLAG_WC;
 580		man->default_caching = TTM_PL_FLAG_WC;
 581
 582		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 583			/* Some BARs do not support being ioremapped WC */
 584			if (nvxx_bar(&drm->device)->iomap_uncached) {
 
 585				man->available_caching = TTM_PL_FLAG_UNCACHED;
 586				man->default_caching = TTM_PL_FLAG_UNCACHED;
 587			}
 588
 589			man->func = &nouveau_vram_manager;
 590			man->io_reserve_fastpath = false;
 591			man->use_io_reserve_lru = true;
 592		} else {
 593			man->func = &ttm_bo_manager_func;
 594		}
 595		break;
 596	case TTM_PL_TT:
 597		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 598			man->func = &nouveau_gart_manager;
 599		else
 600		if (!drm->agp.bridge)
 601			man->func = &nv04_gart_manager;
 602		else
 603			man->func = &ttm_bo_manager_func;
 604
 605		if (drm->agp.bridge) {
 606			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 607			man->available_caching = TTM_PL_FLAG_UNCACHED |
 608				TTM_PL_FLAG_WC;
 609			man->default_caching = TTM_PL_FLAG_WC;
 610		} else {
 611			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
 612				     TTM_MEMTYPE_FLAG_CMA;
 613			man->available_caching = TTM_PL_MASK_CACHING;
 614			man->default_caching = TTM_PL_FLAG_CACHED;
 615		}
 616
 617		break;
 618	default:
 619		return -EINVAL;
 620	}
 621	return 0;
 622}
 623
 624static void
 625nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 626{
 627	struct nouveau_bo *nvbo = nouveau_bo(bo);
 628
 629	switch (bo->mem.mem_type) {
 630	case TTM_PL_VRAM:
 631		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
 632					 TTM_PL_FLAG_SYSTEM);
 633		break;
 634	default:
 635		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
 636		break;
 637	}
 638
 639	*pl = nvbo->placement;
 640}
 641
 642
 643static int
 644nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
 645{
 646	int ret = RING_SPACE(chan, 2);
 647	if (ret == 0) {
 648		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
 649		OUT_RING  (chan, handle & 0x0000ffff);
 650		FIRE_RING (chan);
 651	}
 652	return ret;
 653}
 654
 655static int
 656nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 657		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 658{
 659	struct nvkm_mem *node = old_mem->mm_node;
 660	int ret = RING_SPACE(chan, 10);
 661	if (ret == 0) {
 662		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
 663		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
 664		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
 665		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
 666		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
 667		OUT_RING  (chan, PAGE_SIZE);
 668		OUT_RING  (chan, PAGE_SIZE);
 669		OUT_RING  (chan, PAGE_SIZE);
 670		OUT_RING  (chan, new_mem->num_pages);
 671		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
 672	}
 673	return ret;
 674}
 675
 676static int
 677nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
 678{
 679	int ret = RING_SPACE(chan, 2);
 680	if (ret == 0) {
 681		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
 682		OUT_RING  (chan, handle);
 683	}
 684	return ret;
 685}
 686
 687static int
 688nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 689		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 690{
 691	struct nvkm_mem *node = old_mem->mm_node;
 692	u64 src_offset = node->vma[0].offset;
 693	u64 dst_offset = node->vma[1].offset;
 694	u32 page_count = new_mem->num_pages;
 695	int ret;
 696
 697	page_count = new_mem->num_pages;
 698	while (page_count) {
 699		int line_count = (page_count > 8191) ? 8191 : page_count;
 700
 701		ret = RING_SPACE(chan, 11);
 702		if (ret)
 703			return ret;
 704
 705		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
 706		OUT_RING  (chan, upper_32_bits(src_offset));
 707		OUT_RING  (chan, lower_32_bits(src_offset));
 708		OUT_RING  (chan, upper_32_bits(dst_offset));
 709		OUT_RING  (chan, lower_32_bits(dst_offset));
 710		OUT_RING  (chan, PAGE_SIZE);
 711		OUT_RING  (chan, PAGE_SIZE);
 712		OUT_RING  (chan, PAGE_SIZE);
 713		OUT_RING  (chan, line_count);
 714		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
 715		OUT_RING  (chan, 0x00000110);
 716
 717		page_count -= line_count;
 718		src_offset += (PAGE_SIZE * line_count);
 719		dst_offset += (PAGE_SIZE * line_count);
 720	}
 721
 722	return 0;
 723}
 724
 725static int
 726nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 727		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 728{
 729	struct nvkm_mem *node = old_mem->mm_node;
 730	u64 src_offset = node->vma[0].offset;
 731	u64 dst_offset = node->vma[1].offset;
 732	u32 page_count = new_mem->num_pages;
 733	int ret;
 734
 735	page_count = new_mem->num_pages;
 736	while (page_count) {
 737		int line_count = (page_count > 2047) ? 2047 : page_count;
 738
 739		ret = RING_SPACE(chan, 12);
 740		if (ret)
 741			return ret;
 742
 743		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
 744		OUT_RING  (chan, upper_32_bits(dst_offset));
 745		OUT_RING  (chan, lower_32_bits(dst_offset));
 746		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
 747		OUT_RING  (chan, upper_32_bits(src_offset));
 748		OUT_RING  (chan, lower_32_bits(src_offset));
 749		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
 750		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
 751		OUT_RING  (chan, PAGE_SIZE); /* line_length */
 752		OUT_RING  (chan, line_count);
 753		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
 754		OUT_RING  (chan, 0x00100110);
 755
 756		page_count -= line_count;
 757		src_offset += (PAGE_SIZE * line_count);
 758		dst_offset += (PAGE_SIZE * line_count);
 759	}
 760
 761	return 0;
 762}
 763
 764static int
 765nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 766		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 767{
 768	struct nvkm_mem *node = old_mem->mm_node;
 769	u64 src_offset = node->vma[0].offset;
 770	u64 dst_offset = node->vma[1].offset;
 771	u32 page_count = new_mem->num_pages;
 772	int ret;
 773
 774	page_count = new_mem->num_pages;
 775	while (page_count) {
 776		int line_count = (page_count > 8191) ? 8191 : page_count;
 777
 778		ret = RING_SPACE(chan, 11);
 779		if (ret)
 780			return ret;
 781
 782		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
 783		OUT_RING  (chan, upper_32_bits(src_offset));
 784		OUT_RING  (chan, lower_32_bits(src_offset));
 785		OUT_RING  (chan, upper_32_bits(dst_offset));
 786		OUT_RING  (chan, lower_32_bits(dst_offset));
 787		OUT_RING  (chan, PAGE_SIZE);
 788		OUT_RING  (chan, PAGE_SIZE);
 789		OUT_RING  (chan, PAGE_SIZE);
 790		OUT_RING  (chan, line_count);
 791		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
 792		OUT_RING  (chan, 0x00000110);
 793
 794		page_count -= line_count;
 795		src_offset += (PAGE_SIZE * line_count);
 796		dst_offset += (PAGE_SIZE * line_count);
 797	}
 798
 799	return 0;
 800}
 801
 802static int
 803nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 804		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 805{
 806	struct nvkm_mem *node = old_mem->mm_node;
 807	int ret = RING_SPACE(chan, 7);
 808	if (ret == 0) {
 809		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
 810		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
 811		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
 812		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
 813		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
 814		OUT_RING  (chan, 0x00000000 /* COPY */);
 815		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
 816	}
 817	return ret;
 818}
 819
 820static int
 821nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 822		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 823{
 824	struct nvkm_mem *node = old_mem->mm_node;
 825	int ret = RING_SPACE(chan, 7);
 826	if (ret == 0) {
 827		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
 828		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
 829		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
 830		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
 831		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
 832		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
 833		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
 834	}
 835	return ret;
 836}
 837
 838static int
 839nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
 840{
 841	int ret = RING_SPACE(chan, 6);
 842	if (ret == 0) {
 843		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
 844		OUT_RING  (chan, handle);
 845		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
 846		OUT_RING  (chan, chan->drm->ntfy.handle);
 847		OUT_RING  (chan, chan->vram.handle);
 848		OUT_RING  (chan, chan->vram.handle);
 849	}
 850
 851	return ret;
 852}
 853
 854static int
 855nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 856		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 857{
 858	struct nvkm_mem *node = old_mem->mm_node;
 859	u64 length = (new_mem->num_pages << PAGE_SHIFT);
 860	u64 src_offset = node->vma[0].offset;
 861	u64 dst_offset = node->vma[1].offset;
 862	int src_tiled = !!node->memtype;
 863	int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
 864	int ret;
 865
 866	while (length) {
 867		u32 amount, stride, height;
 868
 869		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
 870		if (ret)
 871			return ret;
 872
 873		amount  = min(length, (u64)(4 * 1024 * 1024));
 874		stride  = 16 * 4;
 875		height  = amount / stride;
 876
 877		if (src_tiled) {
 878			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
 879			OUT_RING  (chan, 0);
 880			OUT_RING  (chan, 0);
 881			OUT_RING  (chan, stride);
 882			OUT_RING  (chan, height);
 883			OUT_RING  (chan, 1);
 884			OUT_RING  (chan, 0);
 885			OUT_RING  (chan, 0);
 886		} else {
 887			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
 888			OUT_RING  (chan, 1);
 889		}
 890		if (dst_tiled) {
 891			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
 892			OUT_RING  (chan, 0);
 893			OUT_RING  (chan, 0);
 894			OUT_RING  (chan, stride);
 895			OUT_RING  (chan, height);
 896			OUT_RING  (chan, 1);
 897			OUT_RING  (chan, 0);
 898			OUT_RING  (chan, 0);
 899		} else {
 900			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
 901			OUT_RING  (chan, 1);
 902		}
 903
 904		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
 905		OUT_RING  (chan, upper_32_bits(src_offset));
 906		OUT_RING  (chan, upper_32_bits(dst_offset));
 907		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
 908		OUT_RING  (chan, lower_32_bits(src_offset));
 909		OUT_RING  (chan, lower_32_bits(dst_offset));
 910		OUT_RING  (chan, stride);
 911		OUT_RING  (chan, stride);
 912		OUT_RING  (chan, stride);
 913		OUT_RING  (chan, height);
 914		OUT_RING  (chan, 0x00000101);
 915		OUT_RING  (chan, 0x00000000);
 916		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
 917		OUT_RING  (chan, 0);
 918
 919		length -= amount;
 920		src_offset += amount;
 921		dst_offset += amount;
 922	}
 923
 924	return 0;
 925}
 926
 927static int
 928nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
 929{
 930	int ret = RING_SPACE(chan, 4);
 931	if (ret == 0) {
 932		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
 933		OUT_RING  (chan, handle);
 934		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
 935		OUT_RING  (chan, chan->drm->ntfy.handle);
 936	}
 937
 938	return ret;
 939}
 940
 941static inline uint32_t
 942nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
 943		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
 944{
 945	if (mem->mem_type == TTM_PL_TT)
 946		return NvDmaTT;
 947	return chan->vram.handle;
 948}
 949
 950static int
 951nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 952		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 953{
 954	u32 src_offset = old_mem->start << PAGE_SHIFT;
 955	u32 dst_offset = new_mem->start << PAGE_SHIFT;
 956	u32 page_count = new_mem->num_pages;
 957	int ret;
 958
 959	ret = RING_SPACE(chan, 3);
 960	if (ret)
 961		return ret;
 962
 963	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
 964	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
 965	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
 966
 967	page_count = new_mem->num_pages;
 968	while (page_count) {
 969		int line_count = (page_count > 2047) ? 2047 : page_count;
 970
 971		ret = RING_SPACE(chan, 11);
 972		if (ret)
 973			return ret;
 974
 975		BEGIN_NV04(chan, NvSubCopy,
 976				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
 977		OUT_RING  (chan, src_offset);
 978		OUT_RING  (chan, dst_offset);
 979		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
 980		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
 981		OUT_RING  (chan, PAGE_SIZE); /* line_length */
 982		OUT_RING  (chan, line_count);
 983		OUT_RING  (chan, 0x00000101);
 984		OUT_RING  (chan, 0x00000000);
 985		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
 986		OUT_RING  (chan, 0);
 987
 988		page_count -= line_count;
 989		src_offset += (PAGE_SIZE * line_count);
 990		dst_offset += (PAGE_SIZE * line_count);
 991	}
 992
 993	return 0;
 994}
 995
 996static int
 997nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
 998		     struct ttm_mem_reg *mem)
 999{
1000	struct nvkm_mem *old_node = bo->mem.mm_node;
1001	struct nvkm_mem *new_node = mem->mm_node;
1002	u64 size = (u64)mem->num_pages << PAGE_SHIFT;
1003	int ret;
1004
1005	ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
1006			  NV_MEM_ACCESS_RW, &old_node->vma[0]);
1007	if (ret)
1008		return ret;
1009
1010	ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
1011			  NV_MEM_ACCESS_RW, &old_node->vma[1]);
 
 
 
 
 
 
 
 
 
1012	if (ret) {
1013		nvkm_vm_put(&old_node->vma[0]);
1014		return ret;
1015	}
1016
1017	nvkm_vm_map(&old_node->vma[0], old_node);
1018	nvkm_vm_map(&old_node->vma[1], new_node);
1019	return 0;
1020}
1021
1022static int
1023nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1024		     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1025{
1026	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1027	struct nouveau_channel *chan = drm->ttm.chan;
1028	struct nouveau_cli *cli = (void *)chan->user.client;
1029	struct nouveau_fence *fence;
1030	int ret;
1031
1032	/* create temporary vmas for the transfer and attach them to the
1033	 * old nvkm_mem node, these will get cleaned up after ttm has
1034	 * destroyed the ttm_mem_reg
1035	 */
1036	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1037		ret = nouveau_bo_move_prep(drm, bo, new_mem);
1038		if (ret)
1039			return ret;
1040	}
1041
1042	mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1043	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1044	if (ret == 0) {
1045		ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1046		if (ret == 0) {
1047			ret = nouveau_fence_new(chan, false, &fence);
1048			if (ret == 0) {
1049				ret = ttm_bo_move_accel_cleanup(bo,
1050								&fence->base,
1051								evict,
1052								new_mem);
1053				nouveau_fence_unref(&fence);
1054			}
1055		}
1056	}
1057	mutex_unlock(&cli->mutex);
1058	return ret;
1059}
1060
1061void
1062nouveau_bo_move_init(struct nouveau_drm *drm)
1063{
1064	static const struct {
1065		const char *name;
1066		int engine;
1067		s32 oclass;
1068		int (*exec)(struct nouveau_channel *,
1069			    struct ttm_buffer_object *,
1070			    struct ttm_mem_reg *, struct ttm_mem_reg *);
1071		int (*init)(struct nouveau_channel *, u32 handle);
1072	} _methods[] = {
 
 
 
 
1073		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1074		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1075		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1076		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1077		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1078		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1079		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1080		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1081		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1082		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1083		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1084		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1085		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1086		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1087		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1088		{},
1089		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1090	}, *mthd = _methods;
1091	const char *name = "CPU";
1092	int ret;
1093
1094	do {
1095		struct nouveau_channel *chan;
1096
1097		if (mthd->engine)
1098			chan = drm->cechan;
1099		else
1100			chan = drm->channel;
1101		if (chan == NULL)
1102			continue;
1103
1104		ret = nvif_object_init(&chan->user,
1105				       mthd->oclass | (mthd->engine << 16),
1106				       mthd->oclass, NULL, 0,
1107				       &drm->ttm.copy);
1108		if (ret == 0) {
1109			ret = mthd->init(chan, drm->ttm.copy.handle);
1110			if (ret) {
1111				nvif_object_fini(&drm->ttm.copy);
1112				continue;
1113			}
1114
1115			drm->ttm.move = mthd->exec;
1116			drm->ttm.chan = chan;
1117			name = mthd->name;
1118			break;
1119		}
1120	} while ((++mthd)->exec);
1121
1122	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1123}
1124
1125static int
1126nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1127		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1128{
 
1129	struct ttm_place placement_memtype = {
1130		.fpfn = 0,
1131		.lpfn = 0,
1132		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1133	};
1134	struct ttm_placement placement;
1135	struct ttm_mem_reg tmp_mem;
1136	int ret;
1137
1138	placement.num_placement = placement.num_busy_placement = 1;
1139	placement.placement = placement.busy_placement = &placement_memtype;
1140
1141	tmp_mem = *new_mem;
1142	tmp_mem.mm_node = NULL;
1143	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1144	if (ret)
1145		return ret;
1146
1147	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1148	if (ret)
1149		goto out;
1150
1151	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1152	if (ret)
1153		goto out;
1154
1155	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
1156out:
1157	ttm_bo_mem_put(bo, &tmp_mem);
1158	return ret;
1159}
1160
1161static int
1162nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1163		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1164{
 
1165	struct ttm_place placement_memtype = {
1166		.fpfn = 0,
1167		.lpfn = 0,
1168		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1169	};
1170	struct ttm_placement placement;
1171	struct ttm_mem_reg tmp_mem;
1172	int ret;
1173
1174	placement.num_placement = placement.num_busy_placement = 1;
1175	placement.placement = placement.busy_placement = &placement_memtype;
1176
1177	tmp_mem = *new_mem;
1178	tmp_mem.mm_node = NULL;
1179	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1180	if (ret)
1181		return ret;
1182
1183	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
1184	if (ret)
1185		goto out;
1186
1187	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1188	if (ret)
1189		goto out;
1190
1191out:
1192	ttm_bo_mem_put(bo, &tmp_mem);
1193	return ret;
1194}
1195
1196static void
1197nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
 
1198{
 
1199	struct nouveau_bo *nvbo = nouveau_bo(bo);
1200	struct nvkm_vma *vma;
1201
1202	/* ttm can now (stupidly) pass the driver bos it didn't create... */
1203	if (bo->destroy != nouveau_bo_del_ttm)
1204		return;
1205
1206	list_for_each_entry(vma, &nvbo->vma_list, head) {
1207		if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1208			      (new_mem->mem_type == TTM_PL_VRAM ||
1209			       nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
1210			nvkm_vm_map(vma, new_mem->mm_node);
1211		} else {
 
1212			WARN_ON(ttm_bo_wait(bo, false, false));
1213			nvkm_vm_unmap(vma);
1214		}
1215	}
1216}
1217
1218static int
1219nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1220		   struct nouveau_drm_tile **new_tile)
1221{
1222	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1223	struct drm_device *dev = drm->dev;
1224	struct nouveau_bo *nvbo = nouveau_bo(bo);
1225	u64 offset = new_mem->start << PAGE_SHIFT;
1226
1227	*new_tile = NULL;
1228	if (new_mem->mem_type != TTM_PL_VRAM)
1229		return 0;
1230
1231	if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1232		*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1233						nvbo->tile_mode,
1234						nvbo->tile_flags);
1235	}
1236
1237	return 0;
1238}
1239
1240static void
1241nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1242		      struct nouveau_drm_tile *new_tile,
1243		      struct nouveau_drm_tile **old_tile)
1244{
1245	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1246	struct drm_device *dev = drm->dev;
1247	struct dma_fence *fence = reservation_object_get_excl(bo->resv);
1248
1249	nv10_bo_put_tile_region(dev, *old_tile, fence);
1250	*old_tile = new_tile;
1251}
1252
1253static int
1254nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1255		bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 
1256{
1257	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1258	struct nouveau_bo *nvbo = nouveau_bo(bo);
1259	struct ttm_mem_reg *old_mem = &bo->mem;
1260	struct nouveau_drm_tile *new_tile = NULL;
1261	int ret = 0;
1262
1263	ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1264	if (ret)
1265		return ret;
1266
1267	if (nvbo->pin_refcnt)
1268		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1269
1270	if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1271		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1272		if (ret)
1273			return ret;
1274	}
1275
1276	/* Fake bo copy. */
1277	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1278		BUG_ON(bo->mem.mm_node != NULL);
1279		bo->mem = *new_mem;
1280		new_mem->mm_node = NULL;
1281		goto out;
1282	}
1283
1284	/* Hardware assisted copy. */
1285	if (drm->ttm.move) {
1286		if (new_mem->mem_type == TTM_PL_SYSTEM)
1287			ret = nouveau_bo_move_flipd(bo, evict, intr,
1288						    no_wait_gpu, new_mem);
1289		else if (old_mem->mem_type == TTM_PL_SYSTEM)
1290			ret = nouveau_bo_move_flips(bo, evict, intr,
1291						    no_wait_gpu, new_mem);
 
 
1292		else
1293			ret = nouveau_bo_move_m2mf(bo, evict, intr,
1294						   no_wait_gpu, new_mem);
 
1295		if (!ret)
1296			goto out;
1297	}
1298
1299	/* Fallback to software copy. */
1300	ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1301	if (ret == 0)
1302		ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
1303
1304out:
1305	if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1306		if (ret)
1307			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1308		else
1309			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1310	}
1311
1312	return ret;
1313}
1314
1315static int
1316nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1317{
1318	struct nouveau_bo *nvbo = nouveau_bo(bo);
1319
1320	return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1321					  filp->private_data);
1322}
1323
1324static int
1325nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1326{
1327	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1328	struct nouveau_drm *drm = nouveau_bdev(bdev);
1329	struct nvkm_device *device = nvxx_device(&drm->device);
1330	struct nvkm_mem *node = mem->mm_node;
1331	int ret;
1332
1333	mem->bus.addr = NULL;
1334	mem->bus.offset = 0;
1335	mem->bus.size = mem->num_pages << PAGE_SHIFT;
1336	mem->bus.base = 0;
1337	mem->bus.is_iomem = false;
1338	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1339		return -EINVAL;
1340	switch (mem->mem_type) {
1341	case TTM_PL_SYSTEM:
1342		/* System memory */
1343		return 0;
1344	case TTM_PL_TT:
1345#if IS_ENABLED(CONFIG_AGP)
1346		if (drm->agp.bridge) {
1347			mem->bus.offset = mem->start << PAGE_SHIFT;
1348			mem->bus.base = drm->agp.base;
1349			mem->bus.is_iomem = !drm->agp.cma;
1350		}
1351#endif
1352		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1353			/* untiled */
1354			break;
1355		/* fallthrough, tiled memory */
1356	case TTM_PL_VRAM:
1357		mem->bus.offset = mem->start << PAGE_SHIFT;
1358		mem->bus.base = device->func->resource_addr(device, 1);
1359		mem->bus.is_iomem = true;
1360		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1361			struct nvkm_bar *bar = nvxx_bar(&drm->device);
1362			int page_shift = 12;
1363			if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1364				page_shift = node->page_shift;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1365
1366			ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1367					    &node->bar_vma);
1368			if (ret)
1369				return ret;
 
1370
1371			nvkm_vm_map(&node->bar_vma, node);
1372			mem->bus.offset = node->bar_vma.offset;
1373		}
1374		break;
1375	default:
1376		return -EINVAL;
1377	}
1378	return 0;
1379}
1380
1381static void
1382nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1383{
1384	struct nvkm_mem *node = mem->mm_node;
 
1385
1386	if (!node->bar_vma.node)
1387		return;
1388
1389	nvkm_vm_unmap(&node->bar_vma);
1390	nvkm_vm_put(&node->bar_vma);
 
 
 
 
 
 
 
 
1391}
1392
1393static int
1394nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1395{
1396	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1397	struct nouveau_bo *nvbo = nouveau_bo(bo);
1398	struct nvkm_device *device = nvxx_device(&drm->device);
1399	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1400	int i, ret;
1401
1402	/* as long as the bo isn't in vram, and isn't tiled, we've got
1403	 * nothing to do here.
1404	 */
1405	if (bo->mem.mem_type != TTM_PL_VRAM) {
1406		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1407		    !nouveau_bo_tile_layout(nvbo))
1408			return 0;
1409
1410		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1411			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1412
1413			ret = nouveau_bo_validate(nvbo, false, false);
1414			if (ret)
1415				return ret;
1416		}
1417		return 0;
1418	}
1419
1420	/* make sure bo is in mappable vram */
1421	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1422	    bo->mem.start + bo->mem.num_pages < mappable)
1423		return 0;
1424
1425	for (i = 0; i < nvbo->placement.num_placement; ++i) {
1426		nvbo->placements[i].fpfn = 0;
1427		nvbo->placements[i].lpfn = mappable;
1428	}
1429
1430	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1431		nvbo->busy_placements[i].fpfn = 0;
1432		nvbo->busy_placements[i].lpfn = mappable;
1433	}
1434
1435	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1436	return nouveau_bo_validate(nvbo, false, false);
1437}
1438
1439static int
1440nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1441{
1442	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1443	struct nouveau_drm *drm;
1444	struct nvkm_device *device;
1445	struct drm_device *dev;
1446	struct device *pdev;
1447	unsigned i;
1448	int r;
1449	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1450
1451	if (ttm->state != tt_unpopulated)
1452		return 0;
1453
1454	if (slave && ttm->sg) {
1455		/* make userspace faulting work */
1456		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1457						 ttm_dma->dma_address, ttm->num_pages);
1458		ttm->state = tt_unbound;
1459		return 0;
1460	}
1461
1462	drm = nouveau_bdev(ttm->bdev);
1463	device = nvxx_device(&drm->device);
1464	dev = drm->dev;
1465	pdev = device->dev;
1466
1467#if IS_ENABLED(CONFIG_AGP)
1468	if (drm->agp.bridge) {
1469		return ttm_agp_tt_populate(ttm);
1470	}
1471#endif
1472
1473#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1474	if (swiotlb_nr_tbl()) {
1475		return ttm_dma_populate((void *)ttm, dev->dev);
1476	}
1477#endif
1478
1479	r = ttm_pool_populate(ttm);
1480	if (r) {
1481		return r;
1482	}
1483
1484	for (i = 0; i < ttm->num_pages; i++) {
1485		dma_addr_t addr;
1486
1487		addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1488				    DMA_BIDIRECTIONAL);
1489
1490		if (dma_mapping_error(pdev, addr)) {
1491			while (i--) {
1492				dma_unmap_page(pdev, ttm_dma->dma_address[i],
1493					       PAGE_SIZE, DMA_BIDIRECTIONAL);
1494				ttm_dma->dma_address[i] = 0;
1495			}
1496			ttm_pool_unpopulate(ttm);
1497			return -EFAULT;
1498		}
1499
1500		ttm_dma->dma_address[i] = addr;
1501	}
1502	return 0;
1503}
1504
1505static void
1506nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1507{
1508	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1509	struct nouveau_drm *drm;
1510	struct nvkm_device *device;
1511	struct drm_device *dev;
1512	struct device *pdev;
1513	unsigned i;
1514	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1515
1516	if (slave)
1517		return;
1518
1519	drm = nouveau_bdev(ttm->bdev);
1520	device = nvxx_device(&drm->device);
1521	dev = drm->dev;
1522	pdev = device->dev;
1523
1524#if IS_ENABLED(CONFIG_AGP)
1525	if (drm->agp.bridge) {
1526		ttm_agp_tt_unpopulate(ttm);
1527		return;
1528	}
1529#endif
1530
1531#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1532	if (swiotlb_nr_tbl()) {
1533		ttm_dma_unpopulate((void *)ttm, dev->dev);
1534		return;
1535	}
1536#endif
1537
1538	for (i = 0; i < ttm->num_pages; i++) {
1539		if (ttm_dma->dma_address[i]) {
1540			dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1541				       DMA_BIDIRECTIONAL);
1542		}
1543	}
1544
1545	ttm_pool_unpopulate(ttm);
1546}
1547
1548void
1549nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1550{
1551	struct reservation_object *resv = nvbo->bo.resv;
1552
1553	if (exclusive)
1554		reservation_object_add_excl_fence(resv, &fence->base);
1555	else if (fence)
1556		reservation_object_add_shared_fence(resv, &fence->base);
1557}
1558
1559struct ttm_bo_driver nouveau_bo_driver = {
1560	.ttm_tt_create = &nouveau_ttm_tt_create,
1561	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1562	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1563	.invalidate_caches = nouveau_bo_invalidate_caches,
1564	.init_mem_type = nouveau_bo_init_mem_type,
1565	.eviction_valuable = ttm_bo_eviction_valuable,
1566	.evict_flags = nouveau_bo_evict_flags,
1567	.move_notify = nouveau_bo_move_ntfy,
1568	.move = nouveau_bo_move,
1569	.verify_access = nouveau_bo_verify_access,
1570	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1571	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1572	.io_mem_free = &nouveau_ttm_io_mem_free,
1573	.lru_tail = &ttm_bo_default_lru_tail,
1574	.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
1575};
1576
1577struct nvkm_vma *
1578nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
1579{
1580	struct nvkm_vma *vma;
1581	list_for_each_entry(vma, &nvbo->vma_list, head) {
1582		if (vma->vm == vm)
1583			return vma;
1584	}
1585
1586	return NULL;
1587}
1588
1589int
1590nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1591		   struct nvkm_vma *vma)
1592{
1593	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1594	int ret;
1595
1596	ret = nvkm_vm_get(vm, size, nvbo->page_shift,
1597			     NV_MEM_ACCESS_RW, vma);
1598	if (ret)
1599		return ret;
1600
1601	if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1602	    (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1603	     nvbo->page_shift != vma->vm->mmu->lpg_shift))
1604		nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
1605
1606	list_add_tail(&vma->head, &nvbo->vma_list);
1607	vma->refcount = 1;
1608	return 0;
1609}
1610
1611void
1612nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
1613{
1614	if (vma->node) {
1615		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1616			nvkm_vm_unmap(vma);
1617		nvkm_vm_put(vma);
1618		list_del(&vma->head);
1619	}
1620}