Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include "xe_migrate.h"
   7
   8#include <linux/bitfield.h>
   9#include <linux/sizes.h>
  10
  11#include <drm/drm_managed.h>
  12#include <drm/ttm/ttm_tt.h>
  13#include <drm/xe_drm.h>
  14
  15#include <generated/xe_wa_oob.h>
  16
  17#include "instructions/xe_mi_commands.h"
  18#include "regs/xe_gpu_commands.h"
  19#include "tests/xe_test.h"
  20#include "xe_assert.h"
  21#include "xe_bb.h"
  22#include "xe_bo.h"
  23#include "xe_exec_queue.h"
  24#include "xe_ggtt.h"
  25#include "xe_gt.h"
  26#include "xe_hw_engine.h"
  27#include "xe_lrc.h"
  28#include "xe_map.h"
  29#include "xe_mocs.h"
  30#include "xe_pt.h"
  31#include "xe_res_cursor.h"
  32#include "xe_sched_job.h"
  33#include "xe_sync.h"
  34#include "xe_trace.h"
  35#include "xe_vm.h"
  36
  37/**
  38 * struct xe_migrate - migrate context.
  39 */
  40struct xe_migrate {
  41	/** @q: Default exec queue used for migration */
  42	struct xe_exec_queue *q;
  43	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
  44	struct xe_tile *tile;
  45	/** @job_mutex: Timeline mutex for @eng. */
  46	struct mutex job_mutex;
  47	/** @pt_bo: Page-table buffer object. */
  48	struct xe_bo *pt_bo;
  49	/** @batch_base_ofs: VM offset of the migration batch buffer */
  50	u64 batch_base_ofs;
  51	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
  52	u64 usm_batch_base_ofs;
  53	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
  54	u64 cleared_mem_ofs;
  55	/**
  56	 * @fence: dma-fence representing the last migration job batch.
  57	 * Protected by @job_mutex.
  58	 */
  59	struct dma_fence *fence;
  60	/**
  61	 * @vm_update_sa: For integrated, used to suballocate page-tables
  62	 * out of the pt_bo.
  63	 */
  64	struct drm_suballoc_manager vm_update_sa;
  65	/** @min_chunk_size: For dgfx, Minimum chunk size */
  66	u64 min_chunk_size;
  67};
  68
  69#define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
  70#define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
  71#define NUM_KERNEL_PDE 17
  72#define NUM_PT_SLOTS 32
  73#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
  74#define MAX_NUM_PTE 512
  75
  76/*
  77 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
  78 * legal value accepted.  Since that instruction field is always stored in
  79 * (val-2) format, this translates to 0x400 dwords for the true maximum length
  80 * of the instruction.  Subtracting the instruction header (1 dword) and
  81 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
  82 */
  83#define MAX_PTE_PER_SDI 0x1FE
  84
  85/**
  86 * xe_tile_migrate_engine() - Get this tile's migrate engine.
  87 * @tile: The tile.
  88 *
  89 * Returns the default migrate engine of this tile.
  90 * TODO: Perhaps this function is slightly misplaced, and even unneeded?
  91 *
  92 * Return: The default migrate engine
  93 */
  94struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
  95{
  96	return tile->migrate->q;
  97}
  98
  99static void xe_migrate_fini(struct drm_device *dev, void *arg)
 100{
 101	struct xe_migrate *m = arg;
 102
 103	xe_vm_lock(m->q->vm, false);
 104	xe_bo_unpin(m->pt_bo);
 105	xe_vm_unlock(m->q->vm);
 106
 107	dma_fence_put(m->fence);
 108	xe_bo_put(m->pt_bo);
 109	drm_suballoc_manager_fini(&m->vm_update_sa);
 110	mutex_destroy(&m->job_mutex);
 111	xe_vm_close_and_put(m->q->vm);
 112	xe_exec_queue_put(m->q);
 113}
 114
 115static u64 xe_migrate_vm_addr(u64 slot, u32 level)
 116{
 117	XE_WARN_ON(slot >= NUM_PT_SLOTS);
 118
 119	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
 120	return (slot + 1ULL) << xe_pt_shift(level + 1);
 121}
 122
 123static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
 124{
 125	/*
 126	 * Remove the DPA to get a correct offset into identity table for the
 127	 * migrate offset
 128	 */
 129	addr -= xe->mem.vram.dpa_base;
 130	return addr + (256ULL << xe_pt_shift(2));
 131}
 132
 133static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
 134				 struct xe_vm *vm)
 135{
 136	struct xe_device *xe = tile_to_xe(tile);
 137	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
 138	u8 id = tile->id;
 139	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
 140	u32 map_ofs, level, i;
 141	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
 142	u64 entry;
 143
 144	/* Can't bump NUM_PT_SLOTS too high */
 145	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
 146	/* Must be a multiple of 64K to support all platforms */
 147	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
 148	/* And one slot reserved for the 4KiB page table updates */
 149	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
 150
 151	/* Need to be sure everything fits in the first PT, or create more */
 152	xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
 153
 154	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
 155				  num_entries * XE_PAGE_SIZE,
 156				  ttm_bo_type_kernel,
 157				  XE_BO_CREATE_VRAM_IF_DGFX(tile) |
 158				  XE_BO_CREATE_PINNED_BIT);
 159	if (IS_ERR(bo))
 160		return PTR_ERR(bo);
 161
 162	entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
 163	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
 164
 165	map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
 166
 167	/* Map the entire BO in our level 0 pt */
 168	for (i = 0, level = 0; i < num_entries; level++) {
 169		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
 170						  pat_index, 0);
 171
 172		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
 173
 174		if (vm->flags & XE_VM_FLAG_64K)
 175			i += 16;
 176		else
 177			i += 1;
 178	}
 179
 180	if (!IS_DGFX(xe)) {
 181		/* Write out batch too */
 182		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
 183		for (i = 0; i < batch->size;
 184		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
 185		     XE_PAGE_SIZE) {
 186			entry = vm->pt_ops->pte_encode_bo(batch, i,
 187							  pat_index, 0);
 188
 189			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
 190				  entry);
 191			level++;
 192		}
 193		if (xe->info.has_usm) {
 194			xe_tile_assert(tile, batch->size == SZ_1M);
 195
 196			batch = tile->primary_gt->usm.bb_pool->bo;
 197			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
 198			xe_tile_assert(tile, batch->size == SZ_512K);
 199
 200			for (i = 0; i < batch->size;
 201			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
 202			     XE_PAGE_SIZE) {
 203				entry = vm->pt_ops->pte_encode_bo(batch, i,
 204								  pat_index, 0);
 205
 206				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
 207					  entry);
 208				level++;
 209			}
 210		}
 211	} else {
 212		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
 213
 214		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
 215
 216		if (xe->info.has_usm) {
 217			batch = tile->primary_gt->usm.bb_pool->bo;
 218			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
 219			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
 220		}
 221	}
 222
 223	for (level = 1; level < num_level; level++) {
 224		u32 flags = 0;
 225
 226		if (vm->flags & XE_VM_FLAG_64K && level == 1)
 227			flags = XE_PDE_64K;
 228
 229		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
 230						  XE_PAGE_SIZE, pat_index);
 231		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
 232			  entry | flags);
 233	}
 234
 235	/* Write PDE's that point to our BO. */
 236	for (i = 0; i < num_entries - num_level; i++) {
 237		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
 238						  pat_index);
 239
 240		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
 241			  (i + 1) * 8, u64, entry);
 242	}
 243
 244	/* Set up a 1GiB NULL mapping at 255GiB offset. */
 245	level = 2;
 246	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
 247		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
 248		  | XE_PTE_NULL);
 249	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
 250
 251	/* Identity map the entire vram at 256GiB offset */
 252	if (IS_DGFX(xe)) {
 253		u64 pos, ofs, flags;
 254
 255		level = 2;
 256		ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
 257		flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
 258						    true, 0);
 259
 260		/*
 261		 * Use 1GB pages, it shouldn't matter the physical amount of
 262		 * vram is less, when we don't access it.
 263		 */
 264		for (pos = xe->mem.vram.dpa_base;
 265		     pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
 266		     pos += SZ_1G, ofs += 8)
 267			xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
 268	}
 269
 270	/*
 271	 * Example layout created above, with root level = 3:
 272	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
 273	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
 274	 * [PT9...PT28]: Userspace PT's for VM_BIND, 4 KiB PTE's
 275	 * [PT29 = PDE 0] [PT30 = PDE 1] [PT31 = PDE 2]
 276	 *
 277	 * This makes the lowest part of the VM point to the pagetables.
 278	 * Hence the lowest 2M in the vm should point to itself, with a few writes
 279	 * and flushes, other parts of the VM can be used either for copying and
 280	 * clearing.
 281	 *
 282	 * For performance, the kernel reserves PDE's, so about 20 are left
 283	 * for async VM updates.
 284	 *
 285	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
 286	 * everywhere, this allows lockless updates to scratch pages by using
 287	 * the different addresses in VM.
 288	 */
 289#define NUM_VMUSA_UNIT_PER_PAGE	32
 290#define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
 291#define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
 292	drm_suballoc_manager_init(&m->vm_update_sa,
 293				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
 294				  NUM_VMUSA_UNIT_PER_PAGE, 0);
 295
 296	m->pt_bo = bo;
 297	return 0;
 298}
 299
 300/*
 301 * Including the reserved copy engine is required to avoid deadlocks due to
 302 * migrate jobs servicing the faults gets stuck behind the job that faulted.
 303 */
 304static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
 305{
 306	u32 logical_mask = 0;
 307	struct xe_hw_engine *hwe;
 308	enum xe_hw_engine_id id;
 309
 310	for_each_hw_engine(hwe, gt, id) {
 311		if (hwe->class != XE_ENGINE_CLASS_COPY)
 312			continue;
 313
 314		if (xe_gt_is_usm_hwe(gt, hwe))
 315			logical_mask |= BIT(hwe->logical_instance);
 316	}
 317
 318	return logical_mask;
 319}
 320
 321/**
 322 * xe_migrate_init() - Initialize a migrate context
 323 * @tile: Back-pointer to the tile we're initializing for.
 324 *
 325 * Return: Pointer to a migrate context on success. Error pointer on error.
 326 */
 327struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
 328{
 329	struct xe_device *xe = tile_to_xe(tile);
 330	struct xe_gt *primary_gt = tile->primary_gt;
 331	struct xe_migrate *m;
 332	struct xe_vm *vm;
 333	int err;
 334
 335	m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
 336	if (!m)
 337		return ERR_PTR(-ENOMEM);
 338
 339	m->tile = tile;
 340
 341	/* Special layout, prepared below.. */
 342	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
 343			  XE_VM_FLAG_SET_TILE_ID(tile));
 344	if (IS_ERR(vm))
 345		return ERR_CAST(vm);
 346
 347	xe_vm_lock(vm, false);
 348	err = xe_migrate_prepare_vm(tile, m, vm);
 349	xe_vm_unlock(vm);
 350	if (err) {
 351		xe_vm_close_and_put(vm);
 352		return ERR_PTR(err);
 353	}
 354
 355	if (xe->info.has_usm) {
 356		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
 357							   XE_ENGINE_CLASS_COPY,
 358							   primary_gt->usm.reserved_bcs_instance,
 359							   false);
 360		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
 361
 362		if (!hwe || !logical_mask)
 363			return ERR_PTR(-EINVAL);
 364
 365		/*
 366		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
 367		 * PVC, may want to revisit if performance is needed.
 368		 */
 369		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
 370					    EXEC_QUEUE_FLAG_KERNEL |
 371					    EXEC_QUEUE_FLAG_PERMANENT |
 372					    EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
 373	} else {
 374		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
 375						  XE_ENGINE_CLASS_COPY,
 376						  EXEC_QUEUE_FLAG_KERNEL |
 377						  EXEC_QUEUE_FLAG_PERMANENT);
 378	}
 379	if (IS_ERR(m->q)) {
 380		xe_vm_close_and_put(vm);
 381		return ERR_CAST(m->q);
 382	}
 383
 384	mutex_init(&m->job_mutex);
 385
 386	err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
 387	if (err)
 388		return ERR_PTR(err);
 389
 390	if (IS_DGFX(xe)) {
 391		if (xe_device_has_flat_ccs(xe))
 392			/* min chunk size corresponds to 4K of CCS Metadata */
 393			m->min_chunk_size = SZ_4K * SZ_64K /
 394				xe_device_ccs_bytes(xe, SZ_64K);
 395		else
 396			/* Somewhat arbitrary to avoid a huge amount of blits */
 397			m->min_chunk_size = SZ_64K;
 398		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
 399		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
 400			(unsigned long long)m->min_chunk_size);
 401	}
 402
 403	return m;
 404}
 405
 406static u64 max_mem_transfer_per_pass(struct xe_device *xe)
 407{
 408	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
 409		return MAX_CCS_LIMITED_TRANSFER;
 410
 411	return MAX_PREEMPTDISABLE_TRANSFER;
 412}
 413
 414static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
 415{
 416	struct xe_device *xe = tile_to_xe(m->tile);
 417	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
 418
 419	if (mem_type_is_vram(cur->mem_type)) {
 420		/*
 421		 * VRAM we want to blit in chunks with sizes aligned to
 422		 * min_chunk_size in order for the offset to CCS metadata to be
 423		 * page-aligned. If it's the last chunk it may be smaller.
 424		 *
 425		 * Another constraint is that we need to limit the blit to
 426		 * the VRAM block size, unless size is smaller than
 427		 * min_chunk_size.
 428		 */
 429		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
 430
 431		size = min_t(u64, size, chunk);
 432		if (size > m->min_chunk_size)
 433			size = round_down(size, m->min_chunk_size);
 434	}
 435
 436	return size;
 437}
 438
 439static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
 440{
 441	/* If the chunk is not fragmented, allow identity map. */
 442	return cur->size >= size;
 443}
 444
 445static u32 pte_update_size(struct xe_migrate *m,
 446			   bool is_vram,
 447			   struct ttm_resource *res,
 448			   struct xe_res_cursor *cur,
 449			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
 450			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
 451{
 452	u32 cmds = 0;
 453
 454	*L0_pt = pt_ofs;
 455	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
 456		/* Offset into identity map. */
 457		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
 458					      cur->start + vram_region_gpu_offset(res));
 459		cmds += cmd_size;
 460	} else {
 461		/* Clip L0 to available size */
 462		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
 463		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
 464
 465		*L0 = size;
 466		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
 467
 468		/* MI_STORE_DATA_IMM */
 469		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
 470
 471		/* PDE qwords */
 472		cmds += num_4k_pages * 2;
 473
 474		/* Each chunk has a single blit command */
 475		cmds += cmd_size;
 476	}
 477
 478	return cmds;
 479}
 480
 481static void emit_pte(struct xe_migrate *m,
 482		     struct xe_bb *bb, u32 at_pt,
 483		     bool is_vram, bool is_comp_pte,
 484		     struct xe_res_cursor *cur,
 485		     u32 size, struct ttm_resource *res)
 486{
 487	struct xe_device *xe = tile_to_xe(m->tile);
 488	struct xe_vm *vm = m->q->vm;
 489	u16 pat_index;
 490	u32 ptes;
 491	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
 492	u64 cur_ofs;
 493
 494	/* Indirect access needs compression enabled uncached PAT index */
 495	if (GRAPHICS_VERx100(xe) >= 2000)
 496		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
 497					  xe->pat.idx[XE_CACHE_WB];
 498	else
 499		pat_index = xe->pat.idx[XE_CACHE_WB];
 500
 501	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
 502
 503	while (ptes) {
 504		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
 505
 506		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
 507		bb->cs[bb->len++] = ofs;
 508		bb->cs[bb->len++] = 0;
 509
 510		cur_ofs = ofs;
 511		ofs += chunk * 8;
 512		ptes -= chunk;
 513
 514		while (chunk--) {
 515			u64 addr, flags = 0;
 516			bool devmem = false;
 517
 518			addr = xe_res_dma(cur) & PAGE_MASK;
 519			if (is_vram) {
 520				if (vm->flags & XE_VM_FLAG_64K) {
 521					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
 522
 523					xe_assert(xe, (va & (SZ_64K - 1)) ==
 524						  (addr & (SZ_64K - 1)));
 525
 526					flags |= XE_PTE_PS64;
 527				}
 528
 529				addr += vram_region_gpu_offset(res);
 530				devmem = true;
 531			}
 532
 533			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
 534							   addr, pat_index,
 535							   0, devmem, flags);
 536			bb->cs[bb->len++] = lower_32_bits(addr);
 537			bb->cs[bb->len++] = upper_32_bits(addr);
 538
 539			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
 540			cur_ofs += 8;
 541		}
 542	}
 543}
 544
 545#define EMIT_COPY_CCS_DW 5
 546static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
 547			  u64 dst_ofs, bool dst_is_indirect,
 548			  u64 src_ofs, bool src_is_indirect,
 549			  u32 size)
 550{
 551	struct xe_device *xe = gt_to_xe(gt);
 552	u32 *cs = bb->cs + bb->len;
 553	u32 num_ccs_blks;
 554	u32 num_pages;
 555	u32 ccs_copy_size;
 556	u32 mocs;
 557
 558	if (GRAPHICS_VERx100(xe) >= 2000) {
 559		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
 560		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
 561
 562		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
 563		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
 564
 565	} else {
 566		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
 567					    NUM_CCS_BYTES_PER_BLOCK);
 568		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
 569
 570		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
 571		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
 572	}
 573
 574	*cs++ = XY_CTRL_SURF_COPY_BLT |
 575		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
 576		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
 577		ccs_copy_size;
 578	*cs++ = lower_32_bits(src_ofs);
 579	*cs++ = upper_32_bits(src_ofs) | mocs;
 580	*cs++ = lower_32_bits(dst_ofs);
 581	*cs++ = upper_32_bits(dst_ofs) | mocs;
 582
 583	bb->len = cs - bb->cs;
 584}
 585
 586#define EMIT_COPY_DW 10
 587static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
 588		      u64 src_ofs, u64 dst_ofs, unsigned int size,
 589		      unsigned int pitch)
 590{
 591	struct xe_device *xe = gt_to_xe(gt);
 592	u32 mocs = 0;
 593	u32 tile_y = 0;
 594
 595	xe_gt_assert(gt, size / pitch <= S16_MAX);
 596	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
 597	xe_gt_assert(gt, pitch <= U16_MAX);
 598
 599	if (GRAPHICS_VER(xe) >= 20)
 600		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
 601
 602	if (GRAPHICS_VERx100(xe) >= 1250)
 603		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
 604
 605	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
 606	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
 607	bb->cs[bb->len++] = 0;
 608	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
 609	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
 610	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
 611	bb->cs[bb->len++] = 0;
 612	bb->cs[bb->len++] = pitch | mocs;
 613	bb->cs[bb->len++] = lower_32_bits(src_ofs);
 614	bb->cs[bb->len++] = upper_32_bits(src_ofs);
 615}
 616
 617static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
 618			enum dma_resv_usage usage)
 619{
 620	return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
 621}
 622
 623static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
 624{
 625	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
 626}
 627
 628static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
 629			       struct xe_bb *bb,
 630			       u64 src_ofs, bool src_is_indirect,
 631			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
 632			       u64 ccs_ofs, bool copy_ccs)
 633{
 634	struct xe_gt *gt = m->tile->primary_gt;
 635	u32 flush_flags = 0;
 636
 637	if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
 638		/*
 639		 * If the src is already in vram, then it should already
 640		 * have been cleared by us, or has been populated by the
 641		 * user. Make sure we copy the CCS aux state as-is.
 642		 *
 643		 * Otherwise if the bo doesn't have any CCS metadata attached,
 644		 * we still need to clear it for security reasons.
 645		 */
 646		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
 647
 648		emit_copy_ccs(gt, bb,
 649			      dst_ofs, true,
 650			      ccs_src_ofs, src_is_indirect, dst_size);
 651
 652		flush_flags = MI_FLUSH_DW_CCS;
 653	} else if (copy_ccs) {
 654		if (!src_is_indirect)
 655			src_ofs = ccs_ofs;
 656		else if (!dst_is_indirect)
 657			dst_ofs = ccs_ofs;
 658
 659		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
 660
 661		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
 662			      src_is_indirect, dst_size);
 663		if (dst_is_indirect)
 664			flush_flags = MI_FLUSH_DW_CCS;
 665	}
 666
 667	return flush_flags;
 668}
 669
 670/**
 671 * xe_migrate_copy() - Copy content of TTM resources.
 672 * @m: The migration context.
 673 * @src_bo: The buffer object @src is currently bound to.
 674 * @dst_bo: If copying between resources created for the same bo, set this to
 675 * the same value as @src_bo. If copying between buffer objects, set it to
 676 * the buffer object @dst is currently bound to.
 677 * @src: The source TTM resource.
 678 * @dst: The dst TTM resource.
 679 * @copy_only_ccs: If true copy only CCS metadata
 680 *
 681 * Copies the contents of @src to @dst: On flat CCS devices,
 682 * the CCS metadata is copied as well if needed, or if not present,
 683 * the CCS metadata of @dst is cleared for security reasons.
 684 *
 685 * Return: Pointer to a dma_fence representing the last copy batch, or
 686 * an error pointer on failure. If there is a failure, any copy operation
 687 * started by the function call has been synced.
 688 */
 689struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
 690				  struct xe_bo *src_bo,
 691				  struct xe_bo *dst_bo,
 692				  struct ttm_resource *src,
 693				  struct ttm_resource *dst,
 694				  bool copy_only_ccs)
 695{
 696	struct xe_gt *gt = m->tile->primary_gt;
 697	struct xe_device *xe = gt_to_xe(gt);
 698	struct dma_fence *fence = NULL;
 699	u64 size = src_bo->size;
 700	struct xe_res_cursor src_it, dst_it, ccs_it;
 701	u64 src_L0_ofs, dst_L0_ofs;
 702	u32 src_L0_pt, dst_L0_pt;
 703	u64 src_L0, dst_L0;
 704	int pass = 0;
 705	int err;
 706	bool src_is_pltt = src->mem_type == XE_PL_TT;
 707	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
 708	bool src_is_vram = mem_type_is_vram(src->mem_type);
 709	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
 710	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
 711		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
 712	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
 713
 714	/* Copying CCS between two different BOs is not supported yet. */
 715	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
 716		return ERR_PTR(-EINVAL);
 717
 718	if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
 719		return ERR_PTR(-EINVAL);
 720
 721	if (!src_is_vram)
 722		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
 723	else
 724		xe_res_first(src, 0, size, &src_it);
 725	if (!dst_is_vram)
 726		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
 727	else
 728		xe_res_first(dst, 0, size, &dst_it);
 729
 730	if (copy_system_ccs)
 731		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
 732				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
 733				&ccs_it);
 734
 735	while (size) {
 736		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
 737		struct xe_sched_job *job;
 738		struct xe_bb *bb;
 739		u32 flush_flags;
 740		u32 update_idx;
 741		u64 ccs_ofs, ccs_size;
 742		u32 ccs_pt;
 743
 744		bool usm = xe->info.has_usm;
 745		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
 746
 747		src_L0 = xe_migrate_res_sizes(m, &src_it);
 748		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
 749
 750		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
 751			pass++, src_L0, dst_L0);
 752
 753		src_L0 = min(src_L0, dst_L0);
 754
 755		batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
 756					      &src_L0_ofs, &src_L0_pt, 0, 0,
 757					      avail_pts);
 758
 759		batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
 760					      &dst_L0_ofs, &dst_L0_pt, 0,
 761					      avail_pts, avail_pts);
 762
 763		if (copy_system_ccs) {
 764			ccs_size = xe_device_ccs_bytes(xe, src_L0);
 765			batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
 766						      &ccs_ofs, &ccs_pt, 0,
 767						      2 * avail_pts,
 768						      avail_pts);
 769			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
 770		}
 771
 772		/* Add copy commands size here */
 773		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
 774			((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
 775
 776		bb = xe_bb_new(gt, batch_size, usm);
 777		if (IS_ERR(bb)) {
 778			err = PTR_ERR(bb);
 779			goto err_sync;
 780		}
 781
 782		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
 783			xe_res_next(&src_it, src_L0);
 784		else
 785			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
 786				 &src_it, src_L0, src);
 787
 788		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
 789			xe_res_next(&dst_it, src_L0);
 790		else
 791			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
 792				 &dst_it, src_L0, dst);
 793
 794		if (copy_system_ccs)
 795			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
 796
 797		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
 798		update_idx = bb->len;
 799
 800		if (!copy_only_ccs)
 801			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
 802
 803		flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
 804						  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
 805						  dst_L0_ofs,
 806						  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
 807						  src_L0, ccs_ofs, copy_ccs);
 808
 809		mutex_lock(&m->job_mutex);
 810		job = xe_bb_create_migration_job(m->q, bb,
 811						 xe_migrate_batch_base(m, usm),
 812						 update_idx);
 813		if (IS_ERR(job)) {
 814			err = PTR_ERR(job);
 815			goto err;
 816		}
 817
 818		xe_sched_job_add_migrate_flush(job, flush_flags);
 819		if (!fence) {
 820			err = job_add_deps(job, src_bo->ttm.base.resv,
 821					   DMA_RESV_USAGE_BOOKKEEP);
 822			if (!err && src_bo != dst_bo)
 823				err = job_add_deps(job, dst_bo->ttm.base.resv,
 824						   DMA_RESV_USAGE_BOOKKEEP);
 825			if (err)
 826				goto err_job;
 827		}
 828
 829		xe_sched_job_arm(job);
 830		dma_fence_put(fence);
 831		fence = dma_fence_get(&job->drm.s_fence->finished);
 832		xe_sched_job_push(job);
 833
 834		dma_fence_put(m->fence);
 835		m->fence = dma_fence_get(fence);
 836
 837		mutex_unlock(&m->job_mutex);
 838
 839		xe_bb_free(bb, fence);
 840		size -= src_L0;
 841		continue;
 842
 843err_job:
 844		xe_sched_job_put(job);
 845err:
 846		mutex_unlock(&m->job_mutex);
 847		xe_bb_free(bb, NULL);
 848
 849err_sync:
 850		/* Sync partial copy if any. FIXME: under job_mutex? */
 851		if (fence) {
 852			dma_fence_wait(fence, false);
 853			dma_fence_put(fence);
 854		}
 855
 856		return ERR_PTR(err);
 857	}
 858
 859	return fence;
 860}
 861
 862static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
 863				 u32 size, u32 pitch)
 864{
 865	struct xe_device *xe = gt_to_xe(gt);
 866	u32 *cs = bb->cs + bb->len;
 867	u32 len = PVC_MEM_SET_CMD_LEN_DW;
 868
 869	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
 870	*cs++ = pitch - 1;
 871	*cs++ = (size / pitch) - 1;
 872	*cs++ = pitch - 1;
 873	*cs++ = lower_32_bits(src_ofs);
 874	*cs++ = upper_32_bits(src_ofs);
 875	if (GRAPHICS_VERx100(xe) >= 2000)
 876		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
 877	else
 878		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
 879
 880	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
 881
 882	bb->len += len;
 883}
 884
 885static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
 886				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
 887{
 888	struct xe_device *xe = gt_to_xe(gt);
 889	u32 *cs = bb->cs + bb->len;
 890	u32 len = XY_FAST_COLOR_BLT_DW;
 891
 892	if (GRAPHICS_VERx100(xe) < 1250)
 893		len = 11;
 894
 895	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
 896		(len - 2);
 897	if (GRAPHICS_VERx100(xe) >= 2000)
 898		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
 899			(pitch - 1);
 900	else
 901		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
 902			(pitch - 1);
 903	*cs++ = 0;
 904	*cs++ = (size / pitch) << 16 | pitch / 4;
 905	*cs++ = lower_32_bits(src_ofs);
 906	*cs++ = upper_32_bits(src_ofs);
 907	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
 908	*cs++ = 0;
 909	*cs++ = 0;
 910	*cs++ = 0;
 911	*cs++ = 0;
 912
 913	if (len > 11) {
 914		*cs++ = 0;
 915		*cs++ = 0;
 916		*cs++ = 0;
 917		*cs++ = 0;
 918		*cs++ = 0;
 919	}
 920
 921	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
 922
 923	bb->len += len;
 924}
 925
 926static bool has_service_copy_support(struct xe_gt *gt)
 927{
 928	/*
 929	 * What we care about is whether the architecture was designed with
 930	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
 931	 * instructions) so check the architectural engine list rather than the
 932	 * actual list since these instructions are usable on BCS0 even if
 933	 * all of the actual service copy engines (BCS1-BCS8) have been fused
 934	 * off.
 935	 */
 936	return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
 937						XE_HW_ENGINE_BCS1);
 938}
 939
 940static u32 emit_clear_cmd_len(struct xe_gt *gt)
 941{
 942	if (has_service_copy_support(gt))
 943		return PVC_MEM_SET_CMD_LEN_DW;
 944	else
 945		return XY_FAST_COLOR_BLT_DW;
 946}
 947
 948static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
 949		       u32 size, u32 pitch, bool is_vram)
 950{
 951	if (has_service_copy_support(gt))
 952		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
 953	else
 954		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
 955				     is_vram);
 956}
 957
 958/**
 959 * xe_migrate_clear() - Copy content of TTM resources.
 960 * @m: The migration context.
 961 * @bo: The buffer object @dst is currently bound to.
 962 * @dst: The dst TTM resource to be cleared.
 963 *
 964 * Clear the contents of @dst to zero. On flat CCS devices,
 965 * the CCS metadata is cleared to zero as well on VRAM destinations.
 966 * TODO: Eliminate the @bo argument.
 967 *
 968 * Return: Pointer to a dma_fence representing the last clear batch, or
 969 * an error pointer on failure. If there is a failure, any clear operation
 970 * started by the function call has been synced.
 971 */
 972struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
 973				   struct xe_bo *bo,
 974				   struct ttm_resource *dst)
 975{
 976	bool clear_vram = mem_type_is_vram(dst->mem_type);
 977	struct xe_gt *gt = m->tile->primary_gt;
 978	struct xe_device *xe = gt_to_xe(gt);
 979	bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
 980	struct dma_fence *fence = NULL;
 981	u64 size = bo->size;
 982	struct xe_res_cursor src_it;
 983	struct ttm_resource *src = dst;
 984	int err;
 985	int pass = 0;
 986
 987	if (!clear_vram)
 988		xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
 989	else
 990		xe_res_first(src, 0, bo->size, &src_it);
 991
 992	while (size) {
 993		u64 clear_L0_ofs;
 994		u32 clear_L0_pt;
 995		u32 flush_flags = 0;
 996		u64 clear_L0;
 997		struct xe_sched_job *job;
 998		struct xe_bb *bb;
 999		u32 batch_size, update_idx;
1000
1001		bool usm = xe->info.has_usm;
1002		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1003
1004		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1005
1006		drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
1007
1008		/* Calculate final sizes and batch size.. */
1009		batch_size = 2 +
1010			pte_update_size(m, clear_vram, src, &src_it,
1011					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1012					clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
1013					avail_pts);
1014
1015		if (xe_device_has_flat_ccs(xe))
1016			batch_size += EMIT_COPY_CCS_DW;
1017
1018		/* Clear commands */
1019
1020		if (WARN_ON_ONCE(!clear_L0))
1021			break;
1022
1023		bb = xe_bb_new(gt, batch_size, usm);
1024		if (IS_ERR(bb)) {
1025			err = PTR_ERR(bb);
1026			goto err_sync;
1027		}
1028
1029		size -= clear_L0;
1030		/* Preemption is enabled again by the ring ops. */
1031		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
1032			xe_res_next(&src_it, clear_L0);
1033		else
1034			emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
1035				 &src_it, clear_L0, dst);
1036
1037		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1038		update_idx = bb->len;
1039
1040		if (!clear_system_ccs)
1041			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1042
1043		if (xe_device_has_flat_ccs(xe)) {
1044			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1045				      m->cleared_mem_ofs, false, clear_L0);
1046			flush_flags = MI_FLUSH_DW_CCS;
1047		}
1048
1049		mutex_lock(&m->job_mutex);
1050		job = xe_bb_create_migration_job(m->q, bb,
1051						 xe_migrate_batch_base(m, usm),
1052						 update_idx);
1053		if (IS_ERR(job)) {
1054			err = PTR_ERR(job);
1055			goto err;
1056		}
1057
1058		xe_sched_job_add_migrate_flush(job, flush_flags);
1059		if (!fence) {
1060			/*
1061			 * There can't be anything userspace related at this
1062			 * point, so we just need to respect any potential move
1063			 * fences, which are always tracked as
1064			 * DMA_RESV_USAGE_KERNEL.
1065			 */
1066			err = job_add_deps(job, bo->ttm.base.resv,
1067					   DMA_RESV_USAGE_KERNEL);
1068			if (err)
1069				goto err_job;
1070		}
1071
1072		xe_sched_job_arm(job);
1073		dma_fence_put(fence);
1074		fence = dma_fence_get(&job->drm.s_fence->finished);
1075		xe_sched_job_push(job);
1076
1077		dma_fence_put(m->fence);
1078		m->fence = dma_fence_get(fence);
1079
1080		mutex_unlock(&m->job_mutex);
1081
1082		xe_bb_free(bb, fence);
1083		continue;
1084
1085err_job:
1086		xe_sched_job_put(job);
1087err:
1088		mutex_unlock(&m->job_mutex);
1089		xe_bb_free(bb, NULL);
1090err_sync:
1091		/* Sync partial copies if any. FIXME: job_mutex? */
1092		if (fence) {
1093			dma_fence_wait(m->fence, false);
1094			dma_fence_put(fence);
1095		}
1096
1097		return ERR_PTR(err);
1098	}
1099
1100	if (clear_system_ccs)
1101		bo->ccs_cleared = true;
1102
1103	return fence;
1104}
1105
1106static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1107			  const struct xe_vm_pgtable_update *update,
1108			  struct xe_migrate_pt_update *pt_update)
1109{
1110	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1111	u32 chunk;
1112	u32 ofs = update->ofs, size = update->qwords;
1113
1114	/*
1115	 * If we have 512 entries (max), we would populate it ourselves,
1116	 * and update the PDE above it to the new pointer.
1117	 * The only time this can only happen if we have to update the top
1118	 * PDE. This requires a BO that is almost vm->size big.
1119	 *
1120	 * This shouldn't be possible in practice.. might change when 16K
1121	 * pages are used. Hence the assert.
1122	 */
1123	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1124	if (!ppgtt_ofs)
1125		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1126						xe_bo_addr(update->pt_bo, 0,
1127							   XE_PAGE_SIZE));
1128
1129	do {
1130		u64 addr = ppgtt_ofs + ofs * 8;
1131
1132		chunk = min(size, MAX_PTE_PER_SDI);
1133
1134		/* Ensure populatefn can do memset64 by aligning bb->cs */
1135		if (!(bb->len & 1))
1136			bb->cs[bb->len++] = MI_NOOP;
1137
1138		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1139		bb->cs[bb->len++] = lower_32_bits(addr);
1140		bb->cs[bb->len++] = upper_32_bits(addr);
1141		ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
1142			      update);
1143
1144		bb->len += chunk * 2;
1145		ofs += chunk;
1146		size -= chunk;
1147	} while (size);
1148}
1149
1150struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1151{
1152	return xe_vm_get(m->q->vm);
1153}
1154
1155#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1156struct migrate_test_params {
1157	struct xe_test_priv base;
1158	bool force_gpu;
1159};
1160
1161#define to_migrate_test_params(_priv) \
1162	container_of(_priv, struct migrate_test_params, base)
1163#endif
1164
1165static struct dma_fence *
1166xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1167			       struct xe_vm *vm, struct xe_bo *bo,
1168			       const struct  xe_vm_pgtable_update *updates,
1169			       u32 num_updates, bool wait_vm,
1170			       struct xe_migrate_pt_update *pt_update)
1171{
1172	XE_TEST_DECLARE(struct migrate_test_params *test =
1173			to_migrate_test_params
1174			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1175	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1176	struct dma_fence *fence;
1177	int err;
1178	u32 i;
1179
1180	if (XE_TEST_ONLY(test && test->force_gpu))
1181		return ERR_PTR(-ETIME);
1182
1183	if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
1184					  DMA_RESV_USAGE_KERNEL))
1185		return ERR_PTR(-ETIME);
1186
1187	if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
1188					       DMA_RESV_USAGE_BOOKKEEP))
1189		return ERR_PTR(-ETIME);
1190
1191	if (ops->pre_commit) {
1192		pt_update->job = NULL;
1193		err = ops->pre_commit(pt_update);
1194		if (err)
1195			return ERR_PTR(err);
1196	}
1197	for (i = 0; i < num_updates; i++) {
1198		const struct xe_vm_pgtable_update *update = &updates[i];
1199
1200		ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
1201			      update->ofs, update->qwords, update);
1202	}
1203
1204	if (vm) {
1205		trace_xe_vm_cpu_bind(vm);
1206		xe_device_wmb(vm->xe);
1207	}
1208
1209	fence = dma_fence_get_stub();
1210
1211	return fence;
1212}
1213
1214static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
1215			struct xe_sync_entry *syncs, u32 num_syncs)
1216{
1217	struct dma_fence *fence;
1218	int i;
1219
1220	for (i = 0; i < num_syncs; i++) {
1221		fence = syncs[i].fence;
1222
1223		if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1224				       &fence->flags))
1225			return false;
1226	}
1227	if (q) {
1228		fence = xe_exec_queue_last_fence_get(q, vm);
1229		if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1230			dma_fence_put(fence);
1231			return false;
1232		}
1233		dma_fence_put(fence);
1234	}
1235
1236	return true;
1237}
1238
1239/**
1240 * xe_migrate_update_pgtables() - Pipelined page-table update
1241 * @m: The migrate context.
1242 * @vm: The vm we'll be updating.
1243 * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
1244 * @q: The exec queue to be used for the update or NULL if the default
1245 * migration engine is to be used.
1246 * @updates: An array of update descriptors.
1247 * @num_updates: Number of descriptors in @updates.
1248 * @syncs: Array of xe_sync_entry to await before updating. Note that waits
1249 * will block the engine timeline.
1250 * @num_syncs: Number of entries in @syncs.
1251 * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
1252 * pointers to callback functions and, if subclassed, private arguments to
1253 * those.
1254 *
1255 * Perform a pipelined page-table update. The update descriptors are typically
1256 * built under the same lock critical section as a call to this function. If
1257 * using the default engine for the updates, they will be performed in the
1258 * order they grab the job_mutex. If different engines are used, external
1259 * synchronization is needed for overlapping updates to maintain page-table
1260 * consistency. Note that the meaing of "overlapping" is that the updates
1261 * touch the same page-table, which might be a higher-level page-directory.
1262 * If no pipelining is needed, then updates may be performed by the cpu.
1263 *
1264 * Return: A dma_fence that, when signaled, indicates the update completion.
1265 */
1266struct dma_fence *
1267xe_migrate_update_pgtables(struct xe_migrate *m,
1268			   struct xe_vm *vm,
1269			   struct xe_bo *bo,
1270			   struct xe_exec_queue *q,
1271			   const struct xe_vm_pgtable_update *updates,
1272			   u32 num_updates,
1273			   struct xe_sync_entry *syncs, u32 num_syncs,
1274			   struct xe_migrate_pt_update *pt_update)
1275{
1276	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1277	struct xe_tile *tile = m->tile;
1278	struct xe_gt *gt = tile->primary_gt;
1279	struct xe_device *xe = tile_to_xe(tile);
1280	struct xe_sched_job *job;
1281	struct dma_fence *fence;
1282	struct drm_suballoc *sa_bo = NULL;
1283	struct xe_vma *vma = pt_update->vma;
1284	struct xe_bb *bb;
1285	u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
1286	u64 addr;
1287	int err = 0;
1288	bool usm = !q && xe->info.has_usm;
1289	bool first_munmap_rebind = vma &&
1290		vma->gpuva.flags & XE_VMA_FIRST_REBIND;
1291	struct xe_exec_queue *q_override = !q ? m->q : q;
1292	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1293
1294	/* Use the CPU if no in syncs and engine is idle */
1295	if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
1296		fence =  xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
1297							num_updates,
1298							first_munmap_rebind,
1299							pt_update);
1300		if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
1301			return fence;
1302	}
1303
1304	/* fixed + PTE entries */
1305	if (IS_DGFX(xe))
1306		batch_size = 2;
1307	else
1308		batch_size = 6 + num_updates * 2;
1309
1310	for (i = 0; i < num_updates; i++) {
1311		u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
1312
1313		/* align noop + MI_STORE_DATA_IMM cmd prefix */
1314		batch_size += 4 * num_cmds + updates[i].qwords * 2;
1315	}
1316
1317	/*
1318	 * XXX: Create temp bo to copy from, if batch_size becomes too big?
1319	 *
1320	 * Worst case: Sum(2 * (each lower level page size) + (top level page size))
1321	 * Should be reasonably bound..
1322	 */
1323	xe_tile_assert(tile, batch_size < SZ_128K);
1324
1325	bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
1326	if (IS_ERR(bb))
1327		return ERR_CAST(bb);
1328
1329	/* For sysmem PTE's, need to map them in our hole.. */
1330	if (!IS_DGFX(xe)) {
1331		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1332		if (q) {
1333			xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
1334
1335			sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
1336						 GFP_KERNEL, true, 0);
1337			if (IS_ERR(sa_bo)) {
1338				err = PTR_ERR(sa_bo);
1339				goto err;
1340			}
1341
1342			ppgtt_ofs = NUM_KERNEL_PDE +
1343				(drm_suballoc_soffset(sa_bo) /
1344				 NUM_VMUSA_UNIT_PER_PAGE);
1345			page_ofs = (drm_suballoc_soffset(sa_bo) %
1346				    NUM_VMUSA_UNIT_PER_PAGE) *
1347				VM_SA_UPDATE_UNIT_SIZE;
1348		}
1349
1350		/* Map our PT's to gtt */
1351		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
1352		bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1353		bb->cs[bb->len++] = 0; /* upper_32_bits */
1354
1355		for (i = 0; i < num_updates; i++) {
1356			struct xe_bo *pt_bo = updates[i].pt_bo;
1357
1358			xe_tile_assert(tile, pt_bo->size == SZ_4K);
1359
1360			addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
1361			bb->cs[bb->len++] = lower_32_bits(addr);
1362			bb->cs[bb->len++] = upper_32_bits(addr);
1363		}
1364
1365		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1366		update_idx = bb->len;
1367
1368		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1369			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1370		for (i = 0; i < num_updates; i++)
1371			write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
1372				      &updates[i], pt_update);
1373	} else {
1374		/* phys pages, no preamble required */
1375		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1376		update_idx = bb->len;
1377
1378		for (i = 0; i < num_updates; i++)
1379			write_pgtable(tile, bb, 0, &updates[i], pt_update);
1380	}
1381
1382	if (!q)
1383		mutex_lock(&m->job_mutex);
1384
1385	job = xe_bb_create_migration_job(q ?: m->q, bb,
1386					 xe_migrate_batch_base(m, usm),
1387					 update_idx);
1388	if (IS_ERR(job)) {
1389		err = PTR_ERR(job);
1390		goto err_bb;
1391	}
1392
1393	/* Wait on BO move */
1394	if (bo) {
1395		err = job_add_deps(job, bo->ttm.base.resv,
1396				   DMA_RESV_USAGE_KERNEL);
1397		if (err)
1398			goto err_job;
1399	}
1400
1401	/*
1402	 * Munmap style VM unbind, need to wait for all jobs to be complete /
1403	 * trigger preempts before moving forward
1404	 */
1405	if (first_munmap_rebind) {
1406		err = job_add_deps(job, xe_vm_resv(vm),
1407				   DMA_RESV_USAGE_BOOKKEEP);
1408		if (err)
1409			goto err_job;
1410	}
1411
1412	err = xe_sched_job_last_fence_add_dep(job, vm);
1413	for (i = 0; !err && i < num_syncs; i++)
1414		err = xe_sync_entry_add_deps(&syncs[i], job);
1415
1416	if (err)
1417		goto err_job;
1418
1419	if (ops->pre_commit) {
1420		pt_update->job = job;
1421		err = ops->pre_commit(pt_update);
1422		if (err)
1423			goto err_job;
1424	}
1425	xe_sched_job_arm(job);
1426	fence = dma_fence_get(&job->drm.s_fence->finished);
1427	xe_sched_job_push(job);
1428
1429	if (!q)
1430		mutex_unlock(&m->job_mutex);
1431
1432	xe_bb_free(bb, fence);
1433	drm_suballoc_free(sa_bo, fence);
1434
1435	return fence;
1436
1437err_job:
1438	xe_sched_job_put(job);
1439err_bb:
1440	if (!q)
1441		mutex_unlock(&m->job_mutex);
1442	xe_bb_free(bb, NULL);
1443err:
1444	drm_suballoc_free(sa_bo, NULL);
1445	return ERR_PTR(err);
1446}
1447
1448/**
1449 * xe_migrate_wait() - Complete all operations using the xe_migrate context
1450 * @m: Migrate context to wait for.
1451 *
1452 * Waits until the GPU no longer uses the migrate context's default engine
1453 * or its page-table objects. FIXME: What about separate page-table update
1454 * engines?
1455 */
1456void xe_migrate_wait(struct xe_migrate *m)
1457{
1458	if (m->fence)
1459		dma_fence_wait(m->fence, false);
1460}
1461
1462#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1463#include "tests/xe_migrate.c"
1464#endif