Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0 or MIT
   2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
   3/* Copyright 2023 Collabora ltd. */
   4
   5#include <drm/drm_debugfs.h>
   6#include <drm/drm_drv.h>
   7#include <drm/drm_exec.h>
   8#include <drm/drm_gpuvm.h>
   9#include <drm/drm_managed.h>
  10#include <drm/gpu_scheduler.h>
  11#include <drm/panthor_drm.h>
  12
  13#include <linux/atomic.h>
  14#include <linux/bitfield.h>
  15#include <linux/delay.h>
  16#include <linux/dma-mapping.h>
  17#include <linux/interrupt.h>
  18#include <linux/io.h>
  19#include <linux/iopoll.h>
  20#include <linux/io-pgtable.h>
  21#include <linux/iommu.h>
  22#include <linux/kmemleak.h>
  23#include <linux/platform_device.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/rwsem.h>
  26#include <linux/sched.h>
  27#include <linux/shmem_fs.h>
  28#include <linux/sizes.h>
  29
  30#include "panthor_device.h"
  31#include "panthor_gem.h"
  32#include "panthor_heap.h"
  33#include "panthor_mmu.h"
  34#include "panthor_regs.h"
  35#include "panthor_sched.h"
  36
  37#define MAX_AS_SLOTS			32
  38
  39struct panthor_vm;
  40
  41/**
  42 * struct panthor_as_slot - Address space slot
  43 */
  44struct panthor_as_slot {
  45	/** @vm: VM bound to this slot. NULL is no VM is bound. */
  46	struct panthor_vm *vm;
  47};
  48
  49/**
  50 * struct panthor_mmu - MMU related data
  51 */
  52struct panthor_mmu {
  53	/** @irq: The MMU irq. */
  54	struct panthor_irq irq;
  55
  56	/** @as: Address space related fields.
  57	 *
  58	 * The GPU has a limited number of address spaces (AS) slots, forcing
  59	 * us to re-assign them to re-assign slots on-demand.
  60	 */
  61	struct {
  62		/** @slots_lock: Lock protecting access to all other AS fields. */
  63		struct mutex slots_lock;
  64
  65		/** @alloc_mask: Bitmask encoding the allocated slots. */
  66		unsigned long alloc_mask;
  67
  68		/** @faulty_mask: Bitmask encoding the faulty slots. */
  69		unsigned long faulty_mask;
  70
  71		/** @slots: VMs currently bound to the AS slots. */
  72		struct panthor_as_slot slots[MAX_AS_SLOTS];
  73
  74		/**
  75		 * @lru_list: List of least recently used VMs.
  76		 *
  77		 * We use this list to pick a VM to evict when all slots are
  78		 * used.
  79		 *
  80		 * There should be no more active VMs than there are AS slots,
  81		 * so this LRU is just here to keep VMs bound until there's
  82		 * a need to release a slot, thus avoid unnecessary TLB/cache
  83		 * flushes.
  84		 */
  85		struct list_head lru_list;
  86	} as;
  87
  88	/** @vm: VMs management fields */
  89	struct {
  90		/** @lock: Lock protecting access to list. */
  91		struct mutex lock;
  92
  93		/** @list: List containing all VMs. */
  94		struct list_head list;
  95
  96		/** @reset_in_progress: True if a reset is in progress. */
  97		bool reset_in_progress;
  98
  99		/** @wq: Workqueue used for the VM_BIND queues. */
 100		struct workqueue_struct *wq;
 101	} vm;
 102};
 103
 104/**
 105 * struct panthor_vm_pool - VM pool object
 106 */
 107struct panthor_vm_pool {
 108	/** @xa: Array used for VM handle tracking. */
 109	struct xarray xa;
 110};
 111
 112/**
 113 * struct panthor_vma - GPU mapping object
 114 *
 115 * This is used to track GEM mappings in GPU space.
 116 */
 117struct panthor_vma {
 118	/** @base: Inherits from drm_gpuva. */
 119	struct drm_gpuva base;
 120
 121	/** @node: Used to implement deferred release of VMAs. */
 122	struct list_head node;
 123
 124	/**
 125	 * @flags: Combination of drm_panthor_vm_bind_op_flags.
 126	 *
 127	 * Only map related flags are accepted.
 128	 */
 129	u32 flags;
 130};
 131
 132/**
 133 * struct panthor_vm_op_ctx - VM operation context
 134 *
 135 * With VM operations potentially taking place in a dma-signaling path, we
 136 * need to make sure everything that might require resource allocation is
 137 * pre-allocated upfront. This is what this operation context is far.
 138 *
 139 * We also collect resources that have been freed, so we can release them
 140 * asynchronously, and let the VM_BIND scheduler process the next VM_BIND
 141 * request.
 142 */
 143struct panthor_vm_op_ctx {
 144	/** @rsvd_page_tables: Pages reserved for the MMU page table update. */
 145	struct {
 146		/** @count: Number of pages reserved. */
 147		u32 count;
 148
 149		/** @ptr: Point to the first unused page in the @pages table. */
 150		u32 ptr;
 151
 152		/**
 153		 * @page: Array of pages that can be used for an MMU page table update.
 154		 *
 155		 * After an VM operation, there might be free pages left in this array.
 156		 * They should be returned to the pt_cache as part of the op_ctx cleanup.
 157		 */
 158		void **pages;
 159	} rsvd_page_tables;
 160
 161	/**
 162	 * @preallocated_vmas: Pre-allocated VMAs to handle the remap case.
 163	 *
 164	 * Partial unmap requests or map requests overlapping existing mappings will
 165	 * trigger a remap call, which need to register up to three panthor_vma objects
 166	 * (one for the new mapping, and two for the previous and next mappings).
 167	 */
 168	struct panthor_vma *preallocated_vmas[3];
 169
 170	/** @flags: Combination of drm_panthor_vm_bind_op_flags. */
 171	u32 flags;
 172
 173	/** @va: Virtual range targeted by the VM operation. */
 174	struct {
 175		/** @addr: Start address. */
 176		u64 addr;
 177
 178		/** @range: Range size. */
 179		u64 range;
 180	} va;
 181
 182	/**
 183	 * @returned_vmas: List of panthor_vma objects returned after a VM operation.
 184	 *
 185	 * For unmap operations, this will contain all VMAs that were covered by the
 186	 * specified VA range.
 187	 *
 188	 * For map operations, this will contain all VMAs that previously mapped to
 189	 * the specified VA range.
 190	 *
 191	 * Those VMAs, and the resources they point to will be released as part of
 192	 * the op_ctx cleanup operation.
 193	 */
 194	struct list_head returned_vmas;
 195
 196	/** @map: Fields specific to a map operation. */
 197	struct {
 198		/** @vm_bo: Buffer object to map. */
 199		struct drm_gpuvm_bo *vm_bo;
 200
 201		/** @bo_offset: Offset in the buffer object. */
 202		u64 bo_offset;
 203
 204		/**
 205		 * @sgt: sg-table pointing to pages backing the GEM object.
 206		 *
 207		 * This is gathered at job creation time, such that we don't have
 208		 * to allocate in ::run_job().
 209		 */
 210		struct sg_table *sgt;
 211
 212		/**
 213		 * @new_vma: The new VMA object that will be inserted to the VA tree.
 214		 */
 215		struct panthor_vma *new_vma;
 216	} map;
 217};
 218
 219/**
 220 * struct panthor_vm - VM object
 221 *
 222 * A VM is an object representing a GPU (or MCU) virtual address space.
 223 * It embeds the MMU page table for this address space, a tree containing
 224 * all the virtual mappings of GEM objects, and other things needed to manage
 225 * the VM.
 226 *
 227 * Except for the MCU VM, which is managed by the kernel, all other VMs are
 228 * created by userspace and mostly managed by userspace, using the
 229 * %DRM_IOCTL_PANTHOR_VM_BIND ioctl.
 230 *
 231 * A portion of the virtual address space is reserved for kernel objects,
 232 * like heap chunks, and userspace gets to decide how much of the virtual
 233 * address space is left to the kernel (half of the virtual address space
 234 * by default).
 235 */
 236struct panthor_vm {
 237	/**
 238	 * @base: Inherit from drm_gpuvm.
 239	 *
 240	 * We delegate all the VA management to the common drm_gpuvm framework
 241	 * and only implement hooks to update the MMU page table.
 242	 */
 243	struct drm_gpuvm base;
 244
 245	/**
 246	 * @sched: Scheduler used for asynchronous VM_BIND request.
 247	 *
 248	 * We use a 1:1 scheduler here.
 249	 */
 250	struct drm_gpu_scheduler sched;
 251
 252	/**
 253	 * @entity: Scheduling entity representing the VM_BIND queue.
 254	 *
 255	 * There's currently one bind queue per VM. It doesn't make sense to
 256	 * allow more given the VM operations are serialized anyway.
 257	 */
 258	struct drm_sched_entity entity;
 259
 260	/** @ptdev: Device. */
 261	struct panthor_device *ptdev;
 262
 263	/** @memattr: Value to program to the AS_MEMATTR register. */
 264	u64 memattr;
 265
 266	/** @pgtbl_ops: Page table operations. */
 267	struct io_pgtable_ops *pgtbl_ops;
 268
 269	/** @root_page_table: Stores the root page table pointer. */
 270	void *root_page_table;
 271
 272	/**
 273	 * @op_lock: Lock used to serialize operations on a VM.
 274	 *
 275	 * The serialization of jobs queued to the VM_BIND queue is already
 276	 * taken care of by drm_sched, but we need to serialize synchronous
 277	 * and asynchronous VM_BIND request. This is what this lock is for.
 278	 */
 279	struct mutex op_lock;
 280
 281	/**
 282	 * @op_ctx: The context attached to the currently executing VM operation.
 283	 *
 284	 * NULL when no operation is in progress.
 285	 */
 286	struct panthor_vm_op_ctx *op_ctx;
 287
 288	/**
 289	 * @mm: Memory management object representing the auto-VA/kernel-VA.
 290	 *
 291	 * Used to auto-allocate VA space for kernel-managed objects (tiler
 292	 * heaps, ...).
 293	 *
 294	 * For the MCU VM, this is managing the VA range that's used to map
 295	 * all shared interfaces.
 296	 *
 297	 * For user VMs, the range is specified by userspace, and must not
 298	 * exceed half of the VA space addressable.
 299	 */
 300	struct drm_mm mm;
 301
 302	/** @mm_lock: Lock protecting the @mm field. */
 303	struct mutex mm_lock;
 304
 305	/** @kernel_auto_va: Automatic VA-range for kernel BOs. */
 306	struct {
 307		/** @start: Start of the automatic VA-range for kernel BOs. */
 308		u64 start;
 309
 310		/** @size: Size of the automatic VA-range for kernel BOs. */
 311		u64 end;
 312	} kernel_auto_va;
 313
 314	/** @as: Address space related fields. */
 315	struct {
 316		/**
 317		 * @id: ID of the address space this VM is bound to.
 318		 *
 319		 * A value of -1 means the VM is inactive/not bound.
 320		 */
 321		int id;
 322
 323		/** @active_cnt: Number of active users of this VM. */
 324		refcount_t active_cnt;
 325
 326		/**
 327		 * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
 328		 *
 329		 * Active VMs should not be inserted in the LRU list.
 330		 */
 331		struct list_head lru_node;
 332	} as;
 333
 334	/**
 335	 * @heaps: Tiler heap related fields.
 336	 */
 337	struct {
 338		/**
 339		 * @pool: The heap pool attached to this VM.
 340		 *
 341		 * Will stay NULL until someone creates a heap context on this VM.
 342		 */
 343		struct panthor_heap_pool *pool;
 344
 345		/** @lock: Lock used to protect access to @pool. */
 346		struct mutex lock;
 347	} heaps;
 348
 349	/** @node: Used to insert the VM in the panthor_mmu::vm::list. */
 350	struct list_head node;
 351
 352	/** @for_mcu: True if this is the MCU VM. */
 353	bool for_mcu;
 354
 355	/**
 356	 * @destroyed: True if the VM was destroyed.
 357	 *
 358	 * No further bind requests should be queued to a destroyed VM.
 359	 */
 360	bool destroyed;
 361
 362	/**
 363	 * @unusable: True if the VM has turned unusable because something
 364	 * bad happened during an asynchronous request.
 365	 *
 366	 * We don't try to recover from such failures, because this implies
 367	 * informing userspace about the specific operation that failed, and
 368	 * hoping the userspace driver can replay things from there. This all
 369	 * sounds very complicated for little gain.
 370	 *
 371	 * Instead, we should just flag the VM as unusable, and fail any
 372	 * further request targeting this VM.
 373	 *
 374	 * We also provide a way to query a VM state, so userspace can destroy
 375	 * it and create a new one.
 376	 *
 377	 * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
 378	 * situation, where the logical device needs to be re-created.
 379	 */
 380	bool unusable;
 381
 382	/**
 383	 * @unhandled_fault: Unhandled fault happened.
 384	 *
 385	 * This should be reported to the scheduler, and the queue/group be
 386	 * flagged as faulty as a result.
 387	 */
 388	bool unhandled_fault;
 389};
 390
 391/**
 392 * struct panthor_vm_bind_job - VM bind job
 393 */
 394struct panthor_vm_bind_job {
 395	/** @base: Inherit from drm_sched_job. */
 396	struct drm_sched_job base;
 397
 398	/** @refcount: Reference count. */
 399	struct kref refcount;
 400
 401	/** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
 402	struct work_struct cleanup_op_ctx_work;
 403
 404	/** @vm: VM targeted by the VM operation. */
 405	struct panthor_vm *vm;
 406
 407	/** @ctx: Operation context. */
 408	struct panthor_vm_op_ctx ctx;
 409};
 410
 411/**
 412 * @pt_cache: Cache used to allocate MMU page tables.
 413 *
 414 * The pre-allocation pattern forces us to over-allocate to plan for
 415 * the worst case scenario, and return the pages we didn't use.
 416 *
 417 * Having a kmem_cache allows us to speed allocations.
 418 */
 419static struct kmem_cache *pt_cache;
 420
 421/**
 422 * alloc_pt() - Custom page table allocator
 423 * @cookie: Cookie passed at page table allocation time.
 424 * @size: Size of the page table. This size should be fixed,
 425 * and determined at creation time based on the granule size.
 426 * @gfp: GFP flags.
 427 *
 428 * We want a custom allocator so we can use a cache for page table
 429 * allocations and amortize the cost of the over-reservation that's
 430 * done to allow asynchronous VM operations.
 431 *
 432 * Return: non-NULL on success, NULL if the allocation failed for any
 433 * reason.
 434 */
 435static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
 436{
 437	struct panthor_vm *vm = cookie;
 438	void *page;
 439
 440	/* Allocation of the root page table happening during init. */
 441	if (unlikely(!vm->root_page_table)) {
 442		struct page *p;
 443
 444		drm_WARN_ON(&vm->ptdev->base, vm->op_ctx);
 445		p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev),
 446				     gfp | __GFP_ZERO, get_order(size));
 447		page = p ? page_address(p) : NULL;
 448		vm->root_page_table = page;
 449		return page;
 450	}
 451
 452	/* We're not supposed to have anything bigger than 4k here, because we picked a
 453	 * 4k granule size at init time.
 454	 */
 455	if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
 456		return NULL;
 457
 458	/* We must have some op_ctx attached to the VM and it must have at least one
 459	 * free page.
 460	 */
 461	if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) ||
 462	    drm_WARN_ON(&vm->ptdev->base,
 463			vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count))
 464		return NULL;
 465
 466	page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++];
 467	memset(page, 0, SZ_4K);
 468
 469	/* Page table entries don't use virtual addresses, which trips out
 470	 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
 471	 * are mixed with other fields, and I fear kmemleak won't detect that
 472	 * either.
 473	 *
 474	 * Let's just ignore memory passed to the page-table driver for now.
 475	 */
 476	kmemleak_ignore(page);
 477	return page;
 478}
 479
 480/**
 481 * @free_pt() - Custom page table free function
 482 * @cookie: Cookie passed at page table allocation time.
 483 * @data: Page table to free.
 484 * @size: Size of the page table. This size should be fixed,
 485 * and determined at creation time based on the granule size.
 486 */
 487static void free_pt(void *cookie, void *data, size_t size)
 488{
 489	struct panthor_vm *vm = cookie;
 490
 491	if (unlikely(vm->root_page_table == data)) {
 492		free_pages((unsigned long)data, get_order(size));
 493		vm->root_page_table = NULL;
 494		return;
 495	}
 496
 497	if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
 498		return;
 499
 500	/* Return the page to the pt_cache. */
 501	kmem_cache_free(pt_cache, data);
 502}
 503
 504static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
 505{
 506	int ret;
 507	u32 val;
 508
 509	/* Wait for the MMU status to indicate there is no active command, in
 510	 * case one is pending.
 511	 */
 512	ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
 513						val, !(val & AS_STATUS_AS_ACTIVE),
 514						10, 100000);
 515
 516	if (ret) {
 517		panthor_device_schedule_reset(ptdev);
 518		drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n");
 519	}
 520
 521	return ret;
 522}
 523
 524static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd)
 525{
 526	int status;
 527
 528	/* write AS_COMMAND when MMU is ready to accept another command */
 529	status = wait_ready(ptdev, as_nr);
 530	if (!status)
 531		gpu_write(ptdev, AS_COMMAND(as_nr), cmd);
 532
 533	return status;
 534}
 535
 536static void lock_region(struct panthor_device *ptdev, u32 as_nr,
 537			u64 region_start, u64 size)
 538{
 539	u8 region_width;
 540	u64 region;
 541	u64 region_end = region_start + size;
 542
 543	if (!size)
 544		return;
 545
 546	/*
 547	 * The locked region is a naturally aligned power of 2 block encoded as
 548	 * log2 minus(1).
 549	 * Calculate the desired start/end and look for the highest bit which
 550	 * differs. The smallest naturally aligned block must include this bit
 551	 * change, the desired region starts with this bit (and subsequent bits)
 552	 * zeroed and ends with the bit (and subsequent bits) set to one.
 553	 */
 554	region_width = max(fls64(region_start ^ (region_end - 1)),
 555			   const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
 556
 557	/*
 558	 * Mask off the low bits of region_start (which would be ignored by
 559	 * the hardware anyway)
 560	 */
 561	region_start &= GENMASK_ULL(63, region_width);
 562
 563	region = region_width | region_start;
 564
 565	/* Lock the region that needs to be updated */
 566	gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
 567	gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
 568	write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
 569}
 570
 571static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
 572				      u64 iova, u64 size, u32 op)
 573{
 574	lockdep_assert_held(&ptdev->mmu->as.slots_lock);
 575
 576	if (as_nr < 0)
 577		return 0;
 578
 579	/*
 580	 * If the AS number is greater than zero, then we can be sure
 581	 * the device is up and running, so we don't need to explicitly
 582	 * power it up
 583	 */
 584
 585	if (op != AS_COMMAND_UNLOCK)
 586		lock_region(ptdev, as_nr, iova, size);
 587
 588	/* Run the MMU operation */
 589	write_cmd(ptdev, as_nr, op);
 590
 591	/* Wait for the flush to complete */
 592	return wait_ready(ptdev, as_nr);
 593}
 594
 595static int mmu_hw_do_operation(struct panthor_vm *vm,
 596			       u64 iova, u64 size, u32 op)
 597{
 598	struct panthor_device *ptdev = vm->ptdev;
 599	int ret;
 600
 601	mutex_lock(&ptdev->mmu->as.slots_lock);
 602	ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op);
 603	mutex_unlock(&ptdev->mmu->as.slots_lock);
 604
 605	return ret;
 606}
 607
 608static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
 609				 u64 transtab, u64 transcfg, u64 memattr)
 610{
 611	int ret;
 612
 613	ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
 614	if (ret)
 615		return ret;
 616
 617	gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
 618	gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
 619
 620	gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
 621	gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
 622
 623	gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
 624	gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
 625
 626	return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
 627}
 628
 629static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
 630{
 631	int ret;
 632
 633	ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
 634	if (ret)
 635		return ret;
 636
 637	gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
 638	gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
 639
 640	gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
 641	gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
 642
 643	gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
 644	gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
 645
 646	return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
 647}
 648
 649static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value)
 650{
 651	/* Bits 16 to 31 mean REQ_COMPLETE. */
 652	return value & GENMASK(15, 0);
 653}
 654
 655static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
 656{
 657	return BIT(as);
 658}
 659
 660/**
 661 * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
 662 * @vm: VM to check.
 663 *
 664 * Return: true if the VM has unhandled faults, false otherwise.
 665 */
 666bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm)
 667{
 668	return vm->unhandled_fault;
 669}
 670
 671/**
 672 * panthor_vm_is_unusable() - Check if the VM is still usable
 673 * @vm: VM to check.
 674 *
 675 * Return: true if the VM is unusable, false otherwise.
 676 */
 677bool panthor_vm_is_unusable(struct panthor_vm *vm)
 678{
 679	return vm->unusable;
 680}
 681
 682static void panthor_vm_release_as_locked(struct panthor_vm *vm)
 683{
 684	struct panthor_device *ptdev = vm->ptdev;
 685
 686	lockdep_assert_held(&ptdev->mmu->as.slots_lock);
 687
 688	if (drm_WARN_ON(&ptdev->base, vm->as.id < 0))
 689		return;
 690
 691	ptdev->mmu->as.slots[vm->as.id].vm = NULL;
 692	clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
 693	refcount_set(&vm->as.active_cnt, 0);
 694	list_del_init(&vm->as.lru_node);
 695	vm->as.id = -1;
 696}
 697
 698/**
 699 * panthor_vm_active() - Flag a VM as active
 700 * @VM: VM to flag as active.
 701 *
 702 * Assigns an address space to a VM so it can be used by the GPU/MCU.
 703 *
 704 * Return: 0 on success, a negative error code otherwise.
 705 */
 706int panthor_vm_active(struct panthor_vm *vm)
 707{
 708	struct panthor_device *ptdev = vm->ptdev;
 709	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
 710	struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
 711	int ret = 0, as, cookie;
 712	u64 transtab, transcfg;
 713
 714	if (!drm_dev_enter(&ptdev->base, &cookie))
 715		return -ENODEV;
 716
 717	if (refcount_inc_not_zero(&vm->as.active_cnt))
 718		goto out_dev_exit;
 719
 720	mutex_lock(&ptdev->mmu->as.slots_lock);
 721
 722	if (refcount_inc_not_zero(&vm->as.active_cnt))
 723		goto out_unlock;
 724
 725	as = vm->as.id;
 726	if (as >= 0) {
 727		/* Unhandled pagefault on this AS, the MMU was disabled. We need to
 728		 * re-enable the MMU after clearing+unmasking the AS interrupts.
 729		 */
 730		if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
 731			goto out_enable_as;
 732
 733		goto out_make_active;
 734	}
 735
 736	/* Check for a free AS */
 737	if (vm->for_mcu) {
 738		drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
 739		as = 0;
 740	} else {
 741		as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
 742	}
 743
 744	if (!(BIT(as) & ptdev->gpu_info.as_present)) {
 745		struct panthor_vm *lru_vm;
 746
 747		lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
 748						  struct panthor_vm,
 749						  as.lru_node);
 750		if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
 751			ret = -EBUSY;
 752			goto out_unlock;
 753		}
 754
 755		drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt));
 756		as = lru_vm->as.id;
 757		panthor_vm_release_as_locked(lru_vm);
 758	}
 759
 760	/* Assign the free or reclaimed AS to the FD */
 761	vm->as.id = as;
 762	set_bit(as, &ptdev->mmu->as.alloc_mask);
 763	ptdev->mmu->as.slots[as].vm = vm;
 764
 765out_enable_as:
 766	transtab = cfg->arm_lpae_s1_cfg.ttbr;
 767	transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
 768		   AS_TRANSCFG_PTW_RA |
 769		   AS_TRANSCFG_ADRMODE_AARCH64_4K |
 770		   AS_TRANSCFG_INA_BITS(55 - va_bits);
 771	if (ptdev->coherent)
 772		transcfg |= AS_TRANSCFG_PTW_SH_OS;
 773
 774	/* If the VM is re-activated, we clear the fault. */
 775	vm->unhandled_fault = false;
 776
 777	/* Unhandled pagefault on this AS, clear the fault and re-enable interrupts
 778	 * before enabling the AS.
 779	 */
 780	if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
 781		gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
 782		ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
 783		gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
 784	}
 785
 786	ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr);
 787
 788out_make_active:
 789	if (!ret) {
 790		refcount_set(&vm->as.active_cnt, 1);
 791		list_del_init(&vm->as.lru_node);
 792	}
 793
 794out_unlock:
 795	mutex_unlock(&ptdev->mmu->as.slots_lock);
 796
 797out_dev_exit:
 798	drm_dev_exit(cookie);
 799	return ret;
 800}
 801
 802/**
 803 * panthor_vm_idle() - Flag a VM idle
 804 * @VM: VM to flag as idle.
 805 *
 806 * When we know the GPU is done with the VM (no more jobs to process),
 807 * we can relinquish the AS slot attached to this VM, if any.
 808 *
 809 * We don't release the slot immediately, but instead place the VM in
 810 * the LRU list, so it can be evicted if another VM needs an AS slot.
 811 * This way, VMs keep attached to the AS they were given until we run
 812 * out of free slot, limiting the number of MMU operations (TLB flush
 813 * and other AS updates).
 814 */
 815void panthor_vm_idle(struct panthor_vm *vm)
 816{
 817	struct panthor_device *ptdev = vm->ptdev;
 818
 819	if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
 820		return;
 821
 822	if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node)))
 823		list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
 824
 825	refcount_set(&vm->as.active_cnt, 0);
 826	mutex_unlock(&ptdev->mmu->as.slots_lock);
 827}
 828
 829u32 panthor_vm_page_size(struct panthor_vm *vm)
 830{
 831	const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
 832	u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
 833
 834	return 1u << pg_shift;
 835}
 836
 837static void panthor_vm_stop(struct panthor_vm *vm)
 838{
 839	drm_sched_stop(&vm->sched, NULL);
 840}
 841
 842static void panthor_vm_start(struct panthor_vm *vm)
 843{
 844	drm_sched_start(&vm->sched, 0);
 845}
 846
 847/**
 848 * panthor_vm_as() - Get the AS slot attached to a VM
 849 * @vm: VM to get the AS slot of.
 850 *
 851 * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
 852 */
 853int panthor_vm_as(struct panthor_vm *vm)
 854{
 855	return vm->as.id;
 856}
 857
 858static size_t get_pgsize(u64 addr, size_t size, size_t *count)
 859{
 860	/*
 861	 * io-pgtable only operates on multiple pages within a single table
 862	 * entry, so we need to split at boundaries of the table size, i.e.
 863	 * the next block size up. The distance from address A to the next
 864	 * boundary of block size B is logically B - A % B, but in unsigned
 865	 * two's complement where B is a power of two we get the equivalence
 866	 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
 867	 */
 868	size_t blk_offset = -addr % SZ_2M;
 869
 870	if (blk_offset || size < SZ_2M) {
 871		*count = min_not_zero(blk_offset, size) / SZ_4K;
 872		return SZ_4K;
 873	}
 874	blk_offset = -addr % SZ_1G ?: SZ_1G;
 875	*count = min(blk_offset, size) / SZ_2M;
 876	return SZ_2M;
 877}
 878
 879static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
 880{
 881	struct panthor_device *ptdev = vm->ptdev;
 882	int ret = 0, cookie;
 883
 884	if (vm->as.id < 0)
 885		return 0;
 886
 887	/* If the device is unplugged, we just silently skip the flush. */
 888	if (!drm_dev_enter(&ptdev->base, &cookie))
 889		return 0;
 890
 891	ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
 892
 893	drm_dev_exit(cookie);
 894	return ret;
 895}
 896
 897/**
 898 * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
 899 * @vm: VM whose cache to flush
 900 *
 901 * Return: 0 on success, a negative error code if flush failed.
 902 */
 903int panthor_vm_flush_all(struct panthor_vm *vm)
 904{
 905	return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
 906}
 907
 908static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
 909{
 910	struct panthor_device *ptdev = vm->ptdev;
 911	struct io_pgtable_ops *ops = vm->pgtbl_ops;
 912	u64 offset = 0;
 913
 914	drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
 915
 916	while (offset < size) {
 917		size_t unmapped_sz = 0, pgcount;
 918		size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
 919
 920		unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL);
 921
 922		if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) {
 923			drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n",
 924				iova + offset + unmapped_sz,
 925				iova + offset + pgsize * pgcount,
 926				iova, iova + size);
 927			panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
 928			return  -EINVAL;
 929		}
 930		offset += unmapped_sz;
 931	}
 932
 933	return panthor_vm_flush_range(vm, iova, size);
 934}
 935
 936static int
 937panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
 938		     struct sg_table *sgt, u64 offset, u64 size)
 939{
 940	struct panthor_device *ptdev = vm->ptdev;
 941	unsigned int count;
 942	struct scatterlist *sgl;
 943	struct io_pgtable_ops *ops = vm->pgtbl_ops;
 944	u64 start_iova = iova;
 945	int ret;
 946
 947	if (!size)
 948		return 0;
 949
 950	for_each_sgtable_dma_sg(sgt, sgl, count) {
 951		dma_addr_t paddr = sg_dma_address(sgl);
 952		size_t len = sg_dma_len(sgl);
 953
 954		if (len <= offset) {
 955			offset -= len;
 956			continue;
 957		}
 958
 959		paddr += offset;
 960		len -= offset;
 961		len = min_t(size_t, len, size);
 962		size -= len;
 963
 964		drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
 965			vm->as.id, iova, &paddr, len);
 966
 967		while (len) {
 968			size_t pgcount, mapped = 0;
 969			size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
 970
 971			ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
 972					     GFP_KERNEL, &mapped);
 973			iova += mapped;
 974			paddr += mapped;
 975			len -= mapped;
 976
 977			if (drm_WARN_ON(&ptdev->base, !ret && !mapped))
 978				ret = -ENOMEM;
 979
 980			if (ret) {
 981				/* If something failed, unmap what we've already mapped before
 982				 * returning. The unmap call is not supposed to fail.
 983				 */
 984				drm_WARN_ON(&ptdev->base,
 985					    panthor_vm_unmap_pages(vm, start_iova,
 986								   iova - start_iova));
 987				return ret;
 988			}
 989		}
 990
 991		if (!size)
 992			break;
 993
 994		offset = 0;
 995	}
 996
 997	return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
 998}
 999
1000static int flags_to_prot(u32 flags)
1001{
1002	int prot = 0;
1003
1004	if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC)
1005		prot |= IOMMU_NOEXEC;
1006
1007	if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED))
1008		prot |= IOMMU_CACHE;
1009
1010	if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY)
1011		prot |= IOMMU_READ;
1012	else
1013		prot |= IOMMU_READ | IOMMU_WRITE;
1014
1015	return prot;
1016}
1017
1018/**
1019 * panthor_vm_alloc_va() - Allocate a region in the auto-va space
1020 * @VM: VM to allocate a region on.
1021 * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
1022 * wants the VA to be automatically allocated from the auto-VA range.
1023 * @size: size of the VA range.
1024 * @va_node: drm_mm_node to initialize. Must be zero-initialized.
1025 *
1026 * Some GPU objects, like heap chunks, are fully managed by the kernel and
1027 * need to be mapped to the userspace VM, in the region reserved for kernel
1028 * objects.
1029 *
1030 * This function takes care of allocating a region in the kernel auto-VA space.
1031 *
1032 * Return: 0 on success, an error code otherwise.
1033 */
1034int
1035panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
1036		    struct drm_mm_node *va_node)
1037{
1038	ssize_t vm_pgsz = panthor_vm_page_size(vm);
1039	int ret;
1040
1041	if (!size || !IS_ALIGNED(size, vm_pgsz))
1042		return -EINVAL;
1043
1044	if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
1045		return -EINVAL;
1046
1047	mutex_lock(&vm->mm_lock);
1048	if (va != PANTHOR_VM_KERNEL_AUTO_VA) {
1049		va_node->start = va;
1050		va_node->size = size;
1051		ret = drm_mm_reserve_node(&vm->mm, va_node);
1052	} else {
1053		ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size,
1054						  size >= SZ_2M ? SZ_2M : SZ_4K,
1055						  0, vm->kernel_auto_va.start,
1056						  vm->kernel_auto_va.end,
1057						  DRM_MM_INSERT_BEST);
1058	}
1059	mutex_unlock(&vm->mm_lock);
1060
1061	return ret;
1062}
1063
1064/**
1065 * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
1066 * @VM: VM to free the region on.
1067 * @va_node: Memory node representing the region to free.
1068 */
1069void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
1070{
1071	mutex_lock(&vm->mm_lock);
1072	drm_mm_remove_node(va_node);
1073	mutex_unlock(&vm->mm_lock);
1074}
1075
1076static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
1077{
1078	struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
1079	struct drm_gpuvm *vm = vm_bo->vm;
1080	bool unpin;
1081
1082	/* We must retain the GEM before calling drm_gpuvm_bo_put(),
1083	 * otherwise the mutex might be destroyed while we hold it.
1084	 * Same goes for the VM, since we take the VM resv lock.
1085	 */
1086	drm_gem_object_get(&bo->base.base);
1087	drm_gpuvm_get(vm);
1088
1089	/* We take the resv lock to protect against concurrent accesses to the
1090	 * gpuvm evicted/extobj lists that are modified in
1091	 * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
1092	 * releases sthe last vm_bo reference.
1093	 * We take the BO GPUVA list lock to protect the vm_bo removal from the
1094	 * GEM vm_bo list.
1095	 */
1096	dma_resv_lock(drm_gpuvm_resv(vm), NULL);
1097	mutex_lock(&bo->gpuva_list_lock);
1098	unpin = drm_gpuvm_bo_put(vm_bo);
1099	mutex_unlock(&bo->gpuva_list_lock);
1100	dma_resv_unlock(drm_gpuvm_resv(vm));
1101
1102	/* If the vm_bo object was destroyed, release the pin reference that
1103	 * was hold by this object.
1104	 */
1105	if (unpin && !bo->base.base.import_attach)
1106		drm_gem_shmem_unpin(&bo->base);
1107
1108	drm_gpuvm_put(vm);
1109	drm_gem_object_put(&bo->base.base);
1110}
1111
1112static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1113				      struct panthor_vm *vm)
1114{
1115	struct panthor_vma *vma, *tmp_vma;
1116
1117	u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
1118				 op_ctx->rsvd_page_tables.ptr;
1119
1120	if (remaining_pt_count) {
1121		kmem_cache_free_bulk(pt_cache, remaining_pt_count,
1122				     op_ctx->rsvd_page_tables.pages +
1123				     op_ctx->rsvd_page_tables.ptr);
1124	}
1125
1126	kfree(op_ctx->rsvd_page_tables.pages);
1127
1128	if (op_ctx->map.vm_bo)
1129		panthor_vm_bo_put(op_ctx->map.vm_bo);
1130
1131	for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
1132		kfree(op_ctx->preallocated_vmas[i]);
1133
1134	list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
1135		list_del(&vma->node);
1136		panthor_vm_bo_put(vma->base.vm_bo);
1137		kfree(vma);
1138	}
1139}
1140
1141static struct panthor_vma *
1142panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx)
1143{
1144	for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
1145		struct panthor_vma *vma = op_ctx->preallocated_vmas[i];
1146
1147		if (vma) {
1148			op_ctx->preallocated_vmas[i] = NULL;
1149			return vma;
1150		}
1151	}
1152
1153	return NULL;
1154}
1155
1156static int
1157panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
1158{
1159	u32 vma_count;
1160
1161	switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
1162	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
1163		/* One VMA for the new mapping, and two more VMAs for the remap case
1164		 * which might contain both a prev and next VA.
1165		 */
1166		vma_count = 3;
1167		break;
1168
1169	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
1170		/* Partial unmaps might trigger a remap with either a prev or a next VA,
1171		 * but not both.
1172		 */
1173		vma_count = 1;
1174		break;
1175
1176	default:
1177		return 0;
1178	}
1179
1180	for (u32 i = 0; i < vma_count; i++) {
1181		struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
1182
1183		if (!vma)
1184			return -ENOMEM;
1185
1186		op_ctx->preallocated_vmas[i] = vma;
1187	}
1188
1189	return 0;
1190}
1191
1192#define PANTHOR_VM_BIND_OP_MAP_FLAGS \
1193	(DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
1194	 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
1195	 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
1196	 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
1197
1198static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1199					 struct panthor_vm *vm,
1200					 struct panthor_gem_object *bo,
1201					 u64 offset,
1202					 u64 size, u64 va,
1203					 u32 flags)
1204{
1205	struct drm_gpuvm_bo *preallocated_vm_bo;
1206	struct sg_table *sgt = NULL;
1207	u64 pt_count;
1208	int ret;
1209
1210	if (!bo)
1211		return -EINVAL;
1212
1213	if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) ||
1214	    (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
1215		return -EINVAL;
1216
1217	/* Make sure the VA and size are aligned and in-bounds. */
1218	if (size > bo->base.base.size || offset > bo->base.base.size - size)
1219		return -EINVAL;
1220
1221	/* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
1222	if (bo->exclusive_vm_root_gem &&
1223	    bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm))
1224		return -EINVAL;
1225
1226	memset(op_ctx, 0, sizeof(*op_ctx));
1227	INIT_LIST_HEAD(&op_ctx->returned_vmas);
1228	op_ctx->flags = flags;
1229	op_ctx->va.range = size;
1230	op_ctx->va.addr = va;
1231
1232	ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1233	if (ret)
1234		goto err_cleanup;
1235
1236	if (!bo->base.base.import_attach) {
1237		/* Pre-reserve the BO pages, so the map operation doesn't have to
1238		 * allocate.
1239		 */
1240		ret = drm_gem_shmem_pin(&bo->base);
1241		if (ret)
1242			goto err_cleanup;
1243	}
1244
1245	sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
1246	if (IS_ERR(sgt)) {
1247		if (!bo->base.base.import_attach)
1248			drm_gem_shmem_unpin(&bo->base);
1249
1250		ret = PTR_ERR(sgt);
1251		goto err_cleanup;
1252	}
1253
1254	op_ctx->map.sgt = sgt;
1255
1256	preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
1257	if (!preallocated_vm_bo) {
1258		if (!bo->base.base.import_attach)
1259			drm_gem_shmem_unpin(&bo->base);
1260
1261		ret = -ENOMEM;
1262		goto err_cleanup;
1263	}
1264
1265	/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
1266	 * pre-allocated BO if the <BO,VM> association exists. Given we
1267	 * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
1268	 * be called immediately, and we have to hold the VM resv lock when
1269	 * calling this function.
1270	 */
1271	dma_resv_lock(panthor_vm_resv(vm), NULL);
1272	mutex_lock(&bo->gpuva_list_lock);
1273	op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
1274	mutex_unlock(&bo->gpuva_list_lock);
1275	dma_resv_unlock(panthor_vm_resv(vm));
1276
1277	/* If the a vm_bo for this <VM,BO> combination exists, it already
1278	 * retains a pin ref, and we can release the one we took earlier.
1279	 *
1280	 * If our pre-allocated vm_bo is picked, it now retains the pin ref,
1281	 * which will be released in panthor_vm_bo_put().
1282	 */
1283	if (preallocated_vm_bo != op_ctx->map.vm_bo &&
1284	    !bo->base.base.import_attach)
1285		drm_gem_shmem_unpin(&bo->base);
1286
1287	op_ctx->map.bo_offset = offset;
1288
1289	/* L1, L2 and L3 page tables.
1290	 * We could optimize L3 allocation by iterating over the sgt and merging
1291	 * 2M contiguous blocks, but it's simpler to over-provision and return
1292	 * the pages if they're not used.
1293	 */
1294	pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) +
1295		   ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) +
1296		   ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
1297
1298	op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
1299						 sizeof(*op_ctx->rsvd_page_tables.pages),
1300						 GFP_KERNEL);
1301	if (!op_ctx->rsvd_page_tables.pages) {
1302		ret = -ENOMEM;
1303		goto err_cleanup;
1304	}
1305
1306	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1307				    op_ctx->rsvd_page_tables.pages);
1308	op_ctx->rsvd_page_tables.count = ret;
1309	if (ret != pt_count) {
1310		ret = -ENOMEM;
1311		goto err_cleanup;
1312	}
1313
1314	/* Insert BO into the extobj list last, when we know nothing can fail. */
1315	dma_resv_lock(panthor_vm_resv(vm), NULL);
1316	drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
1317	dma_resv_unlock(panthor_vm_resv(vm));
1318
1319	return 0;
1320
1321err_cleanup:
1322	panthor_vm_cleanup_op_ctx(op_ctx, vm);
1323	return ret;
1324}
1325
1326static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1327					   struct panthor_vm *vm,
1328					   u64 va, u64 size)
1329{
1330	u32 pt_count = 0;
1331	int ret;
1332
1333	memset(op_ctx, 0, sizeof(*op_ctx));
1334	INIT_LIST_HEAD(&op_ctx->returned_vmas);
1335	op_ctx->va.range = size;
1336	op_ctx->va.addr = va;
1337	op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
1338
1339	/* Pre-allocate L3 page tables to account for the split-2M-block
1340	 * situation on unmap.
1341	 */
1342	if (va != ALIGN(va, SZ_2M))
1343		pt_count++;
1344
1345	if (va + size != ALIGN(va + size, SZ_2M) &&
1346	    ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M))
1347		pt_count++;
1348
1349	ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1350	if (ret)
1351		goto err_cleanup;
1352
1353	if (pt_count) {
1354		op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
1355							 sizeof(*op_ctx->rsvd_page_tables.pages),
1356							 GFP_KERNEL);
1357		if (!op_ctx->rsvd_page_tables.pages) {
1358			ret = -ENOMEM;
1359			goto err_cleanup;
1360		}
1361
1362		ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1363					    op_ctx->rsvd_page_tables.pages);
1364		if (ret != pt_count) {
1365			ret = -ENOMEM;
1366			goto err_cleanup;
1367		}
1368		op_ctx->rsvd_page_tables.count = pt_count;
1369	}
1370
1371	return 0;
1372
1373err_cleanup:
1374	panthor_vm_cleanup_op_ctx(op_ctx, vm);
1375	return ret;
1376}
1377
1378static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1379						struct panthor_vm *vm)
1380{
1381	memset(op_ctx, 0, sizeof(*op_ctx));
1382	INIT_LIST_HEAD(&op_ctx->returned_vmas);
1383	op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
1384}
1385
1386/**
1387 * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address
1388 * @vm: VM to look into.
1389 * @va: Virtual address to search for.
1390 * @bo_offset: Offset of the GEM object mapped at this virtual address.
1391 * Only valid on success.
1392 *
1393 * The object returned by this function might no longer be mapped when the
1394 * function returns. It's the caller responsibility to ensure there's no
1395 * concurrent map/unmap operations making the returned value invalid, or
1396 * make sure it doesn't matter if the object is no longer mapped.
1397 *
1398 * Return: A valid pointer on success, an ERR_PTR() otherwise.
1399 */
1400struct panthor_gem_object *
1401panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset)
1402{
1403	struct panthor_gem_object *bo = ERR_PTR(-ENOENT);
1404	struct drm_gpuva *gpuva;
1405	struct panthor_vma *vma;
1406
1407	/* Take the VM lock to prevent concurrent map/unmap operations. */
1408	mutex_lock(&vm->op_lock);
1409	gpuva = drm_gpuva_find_first(&vm->base, va, 1);
1410	vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
1411	if (vma && vma->base.gem.obj) {
1412		drm_gem_object_get(vma->base.gem.obj);
1413		bo = to_panthor_bo(vma->base.gem.obj);
1414		*bo_offset = vma->base.gem.offset + (va - vma->base.va.addr);
1415	}
1416	mutex_unlock(&vm->op_lock);
1417
1418	return bo;
1419}
1420
1421#define PANTHOR_VM_MIN_KERNEL_VA_SIZE	SZ_256M
1422
1423static u64
1424panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args,
1425				    u64 full_va_range)
1426{
1427	u64 user_va_range;
1428
1429	/* Make sure we have a minimum amount of VA space for kernel objects. */
1430	if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE)
1431		return 0;
1432
1433	if (args->user_va_range) {
1434		/* Use the user provided value if != 0. */
1435		user_va_range = args->user_va_range;
1436	} else if (TASK_SIZE_OF(current) < full_va_range) {
1437		/* If the task VM size is smaller than the GPU VA range, pick this
1438		 * as our default user VA range, so userspace can CPU/GPU map buffers
1439		 * at the same address.
1440		 */
1441		user_va_range = TASK_SIZE_OF(current);
1442	} else {
1443		/* If the GPU VA range is smaller than the task VM size, we
1444		 * just have to live with the fact we won't be able to map
1445		 * all buffers at the same GPU/CPU address.
1446		 *
1447		 * If the GPU VA range is bigger than 4G (more than 32-bit of
1448		 * VA), we split the range in two, and assign half of it to
1449		 * the user and the other half to the kernel, if it's not, we
1450		 * keep the kernel VA space as small as possible.
1451		 */
1452		user_va_range = full_va_range > SZ_4G ?
1453				full_va_range / 2 :
1454				full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1455	}
1456
1457	if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range)
1458		user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1459
1460	return user_va_range;
1461}
1462
1463#define PANTHOR_VM_CREATE_FLAGS		0
1464
1465static int
1466panthor_vm_create_check_args(const struct panthor_device *ptdev,
1467			     const struct drm_panthor_vm_create *args,
1468			     u64 *kernel_va_start, u64 *kernel_va_range)
1469{
1470	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
1471	u64 full_va_range = 1ull << va_bits;
1472	u64 user_va_range;
1473
1474	if (args->flags & ~PANTHOR_VM_CREATE_FLAGS)
1475		return -EINVAL;
1476
1477	user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range);
1478	if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range))
1479		return -EINVAL;
1480
1481	/* Pick a kernel VA range that's a power of two, to have a clear split. */
1482	*kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range);
1483	*kernel_va_start = full_va_range - *kernel_va_range;
1484	return 0;
1485}
1486
1487/*
1488 * Only 32 VMs per open file. If that becomes a limiting factor, we can
1489 * increase this number.
1490 */
1491#define PANTHOR_MAX_VMS_PER_FILE	32
1492
1493/**
1494 * panthor_vm_pool_create_vm() - Create a VM
1495 * @pool: The VM to create this VM on.
1496 * @kernel_va_start: Start of the region reserved for kernel objects.
1497 * @kernel_va_range: Size of the region reserved for kernel objects.
1498 *
1499 * Return: a positive VM ID on success, a negative error code otherwise.
1500 */
1501int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
1502			      struct panthor_vm_pool *pool,
1503			      struct drm_panthor_vm_create *args)
1504{
1505	u64 kernel_va_start, kernel_va_range;
1506	struct panthor_vm *vm;
1507	int ret;
1508	u32 id;
1509
1510	ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range);
1511	if (ret)
1512		return ret;
1513
1514	vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range,
1515			       kernel_va_start, kernel_va_range);
1516	if (IS_ERR(vm))
1517		return PTR_ERR(vm);
1518
1519	ret = xa_alloc(&pool->xa, &id, vm,
1520		       XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL);
1521
1522	if (ret) {
1523		panthor_vm_put(vm);
1524		return ret;
1525	}
1526
1527	args->user_va_range = kernel_va_start;
1528	return id;
1529}
1530
1531static void panthor_vm_destroy(struct panthor_vm *vm)
1532{
1533	if (!vm)
1534		return;
1535
1536	vm->destroyed = true;
1537
1538	mutex_lock(&vm->heaps.lock);
1539	panthor_heap_pool_destroy(vm->heaps.pool);
1540	vm->heaps.pool = NULL;
1541	mutex_unlock(&vm->heaps.lock);
1542
1543	drm_WARN_ON(&vm->ptdev->base,
1544		    panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range));
1545	panthor_vm_put(vm);
1546}
1547
1548/**
1549 * panthor_vm_pool_destroy_vm() - Destroy a VM.
1550 * @pool: VM pool.
1551 * @handle: VM handle.
1552 *
1553 * This function doesn't free the VM object or its resources, it just kills
1554 * all mappings, and makes sure nothing can be mapped after that point.
1555 *
1556 * If there was any active jobs at the time this function is called, these
1557 * jobs should experience page faults and be killed as a result.
1558 *
1559 * The VM resources are freed when the last reference on the VM object is
1560 * dropped.
1561 */
1562int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
1563{
1564	struct panthor_vm *vm;
1565
1566	vm = xa_erase(&pool->xa, handle);
1567
1568	panthor_vm_destroy(vm);
1569
1570	return vm ? 0 : -EINVAL;
1571}
1572
1573/**
1574 * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
1575 * @pool: VM pool to check.
1576 * @handle: Handle of the VM to retrieve.
1577 *
1578 * Return: A valid pointer if the VM exists, NULL otherwise.
1579 */
1580struct panthor_vm *
1581panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
1582{
1583	struct panthor_vm *vm;
1584
1585	xa_lock(&pool->xa);
1586	vm = panthor_vm_get(xa_load(&pool->xa, handle));
1587	xa_unlock(&pool->xa);
1588
1589	return vm;
1590}
1591
1592/**
1593 * panthor_vm_pool_destroy() - Destroy a VM pool.
1594 * @pfile: File.
1595 *
1596 * Destroy all VMs in the pool, and release the pool resources.
1597 *
1598 * Note that VMs can outlive the pool they were created from if other
1599 * objects hold a reference to there VMs.
1600 */
1601void panthor_vm_pool_destroy(struct panthor_file *pfile)
1602{
1603	struct panthor_vm *vm;
1604	unsigned long i;
1605
1606	if (!pfile->vms)
1607		return;
1608
1609	xa_for_each(&pfile->vms->xa, i, vm)
1610		panthor_vm_destroy(vm);
1611
1612	xa_destroy(&pfile->vms->xa);
1613	kfree(pfile->vms);
1614}
1615
1616/**
1617 * panthor_vm_pool_create() - Create a VM pool
1618 * @pfile: File.
1619 *
1620 * Return: 0 on success, a negative error code otherwise.
1621 */
1622int panthor_vm_pool_create(struct panthor_file *pfile)
1623{
1624	pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL);
1625	if (!pfile->vms)
1626		return -ENOMEM;
1627
1628	xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1);
1629	return 0;
1630}
1631
1632/* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */
1633static void mmu_tlb_flush_all(void *cookie)
1634{
1635}
1636
1637static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie)
1638{
1639}
1640
1641static const struct iommu_flush_ops mmu_tlb_ops = {
1642	.tlb_flush_all = mmu_tlb_flush_all,
1643	.tlb_flush_walk = mmu_tlb_flush_walk,
1644};
1645
1646static const char *access_type_name(struct panthor_device *ptdev,
1647				    u32 fault_status)
1648{
1649	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
1650	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
1651		return "ATOMIC";
1652	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
1653		return "READ";
1654	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
1655		return "WRITE";
1656	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
1657		return "EXECUTE";
1658	default:
1659		drm_WARN_ON(&ptdev->base, 1);
1660		return NULL;
1661	}
1662}
1663
1664static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
1665{
1666	bool has_unhandled_faults = false;
1667
1668	status = panthor_mmu_fault_mask(ptdev, status);
1669	while (status) {
1670		u32 as = ffs(status | (status >> 16)) - 1;
1671		u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
1672		u32 new_int_mask;
1673		u64 addr;
1674		u32 fault_status;
1675		u32 exception_type;
1676		u32 access_type;
1677		u32 source_id;
1678
1679		fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
1680		addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
1681		addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
1682
1683		/* decode the fault status */
1684		exception_type = fault_status & 0xFF;
1685		access_type = (fault_status >> 8) & 0x3;
1686		source_id = (fault_status >> 16);
1687
1688		mutex_lock(&ptdev->mmu->as.slots_lock);
1689
1690		ptdev->mmu->as.faulty_mask |= mask;
1691		new_int_mask =
1692			panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
1693
1694		/* terminal fault, print info about the fault */
1695		drm_err(&ptdev->base,
1696			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
1697			"raw fault status: 0x%X\n"
1698			"decoded fault status: %s\n"
1699			"exception type 0x%X: %s\n"
1700			"access type 0x%X: %s\n"
1701			"source id 0x%X\n",
1702			as, addr,
1703			fault_status,
1704			(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
1705			exception_type, panthor_exception_name(ptdev, exception_type),
1706			access_type, access_type_name(ptdev, fault_status),
1707			source_id);
1708
1709		/* Ignore MMU interrupts on this AS until it's been
1710		 * re-enabled.
1711		 */
1712		ptdev->mmu->irq.mask = new_int_mask;
1713		gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
1714
1715		if (ptdev->mmu->as.slots[as].vm)
1716			ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
1717
1718		/* Disable the MMU to kill jobs on this AS. */
1719		panthor_mmu_as_disable(ptdev, as);
1720		mutex_unlock(&ptdev->mmu->as.slots_lock);
1721
1722		status &= ~mask;
1723		has_unhandled_faults = true;
1724	}
1725
1726	if (has_unhandled_faults)
1727		panthor_sched_report_mmu_fault(ptdev);
1728}
1729PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
1730
1731/**
1732 * panthor_mmu_suspend() - Suspend the MMU logic
1733 * @ptdev: Device.
1734 *
1735 * All we do here is de-assign the AS slots on all active VMs, so things
1736 * get flushed to the main memory, and no further access to these VMs are
1737 * possible.
1738 *
1739 * We also suspend the MMU IRQ.
1740 */
1741void panthor_mmu_suspend(struct panthor_device *ptdev)
1742{
1743	mutex_lock(&ptdev->mmu->as.slots_lock);
1744	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1745		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1746
1747		if (vm) {
1748			drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
1749			panthor_vm_release_as_locked(vm);
1750		}
1751	}
1752	mutex_unlock(&ptdev->mmu->as.slots_lock);
1753
1754	panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1755}
1756
1757/**
1758 * panthor_mmu_resume() - Resume the MMU logic
1759 * @ptdev: Device.
1760 *
1761 * Resume the IRQ.
1762 *
1763 * We don't re-enable previously active VMs. We assume other parts of the
1764 * driver will call panthor_vm_active() on the VMs they intend to use.
1765 */
1766void panthor_mmu_resume(struct panthor_device *ptdev)
1767{
1768	mutex_lock(&ptdev->mmu->as.slots_lock);
1769	ptdev->mmu->as.alloc_mask = 0;
1770	ptdev->mmu->as.faulty_mask = 0;
1771	mutex_unlock(&ptdev->mmu->as.slots_lock);
1772
1773	panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1774}
1775
1776/**
1777 * panthor_mmu_pre_reset() - Prepare for a reset
1778 * @ptdev: Device.
1779 *
1780 * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we
1781 * don't get asked to do a VM operation while the GPU is down.
1782 *
1783 * We don't cleanly shutdown the AS slots here, because the reset might
1784 * come from an AS_ACTIVE_BIT stuck situation.
1785 */
1786void panthor_mmu_pre_reset(struct panthor_device *ptdev)
1787{
1788	struct panthor_vm *vm;
1789
1790	panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1791
1792	mutex_lock(&ptdev->mmu->vm.lock);
1793	ptdev->mmu->vm.reset_in_progress = true;
1794	list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
1795		panthor_vm_stop(vm);
1796	mutex_unlock(&ptdev->mmu->vm.lock);
1797}
1798
1799/**
1800 * panthor_mmu_post_reset() - Restore things after a reset
1801 * @ptdev: Device.
1802 *
1803 * Put the MMU logic back in action after a reset. That implies resuming the
1804 * IRQ and re-enabling the VM_BIND queues.
1805 */
1806void panthor_mmu_post_reset(struct panthor_device *ptdev)
1807{
1808	struct panthor_vm *vm;
1809
1810	mutex_lock(&ptdev->mmu->as.slots_lock);
1811
1812	/* Now that the reset is effective, we can assume that none of the
1813	 * AS slots are setup, and clear the faulty flags too.
1814	 */
1815	ptdev->mmu->as.alloc_mask = 0;
1816	ptdev->mmu->as.faulty_mask = 0;
1817
1818	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1819		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1820
1821		if (vm)
1822			panthor_vm_release_as_locked(vm);
1823	}
1824
1825	mutex_unlock(&ptdev->mmu->as.slots_lock);
1826
1827	panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1828
1829	/* Restart the VM_BIND queues. */
1830	mutex_lock(&ptdev->mmu->vm.lock);
1831	list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
1832		panthor_vm_start(vm);
1833	}
1834	ptdev->mmu->vm.reset_in_progress = false;
1835	mutex_unlock(&ptdev->mmu->vm.lock);
1836}
1837
1838static void panthor_vm_free(struct drm_gpuvm *gpuvm)
1839{
1840	struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base);
1841	struct panthor_device *ptdev = vm->ptdev;
1842
1843	mutex_lock(&vm->heaps.lock);
1844	if (drm_WARN_ON(&ptdev->base, vm->heaps.pool))
1845		panthor_heap_pool_destroy(vm->heaps.pool);
1846	mutex_unlock(&vm->heaps.lock);
1847	mutex_destroy(&vm->heaps.lock);
1848
1849	mutex_lock(&ptdev->mmu->vm.lock);
1850	list_del(&vm->node);
1851	/* Restore the scheduler state so we can call drm_sched_entity_destroy()
1852	 * and drm_sched_fini(). If get there, that means we have no job left
1853	 * and no new jobs can be queued, so we can start the scheduler without
1854	 * risking interfering with the reset.
1855	 */
1856	if (ptdev->mmu->vm.reset_in_progress)
1857		panthor_vm_start(vm);
1858	mutex_unlock(&ptdev->mmu->vm.lock);
1859
1860	drm_sched_entity_destroy(&vm->entity);
1861	drm_sched_fini(&vm->sched);
1862
1863	mutex_lock(&ptdev->mmu->as.slots_lock);
1864	if (vm->as.id >= 0) {
1865		int cookie;
1866
1867		if (drm_dev_enter(&ptdev->base, &cookie)) {
1868			panthor_mmu_as_disable(ptdev, vm->as.id);
1869			drm_dev_exit(cookie);
1870		}
1871
1872		ptdev->mmu->as.slots[vm->as.id].vm = NULL;
1873		clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
1874		list_del(&vm->as.lru_node);
1875	}
1876	mutex_unlock(&ptdev->mmu->as.slots_lock);
1877
1878	free_io_pgtable_ops(vm->pgtbl_ops);
1879
1880	drm_mm_takedown(&vm->mm);
1881	kfree(vm);
1882}
1883
1884/**
1885 * panthor_vm_put() - Release a reference on a VM
1886 * @vm: VM to release the reference on. Can be NULL.
1887 */
1888void panthor_vm_put(struct panthor_vm *vm)
1889{
1890	drm_gpuvm_put(vm ? &vm->base : NULL);
1891}
1892
1893/**
1894 * panthor_vm_get() - Get a VM reference
1895 * @vm: VM to get the reference on. Can be NULL.
1896 *
1897 * Return: @vm value.
1898 */
1899struct panthor_vm *panthor_vm_get(struct panthor_vm *vm)
1900{
1901	if (vm)
1902		drm_gpuvm_get(&vm->base);
1903
1904	return vm;
1905}
1906
1907/**
1908 * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
1909 * @vm: VM to query the heap pool on.
1910 * @create: True if the heap pool should be created when it doesn't exist.
1911 *
1912 * Heap pools are per-VM. This function allows one to retrieve the heap pool
1913 * attached to a VM.
1914 *
1915 * If no heap pool exists yet, and @create is true, we create one.
1916 *
1917 * The returned panthor_heap_pool should be released with panthor_heap_pool_put().
1918 *
1919 * Return: A valid pointer on success, an ERR_PTR() otherwise.
1920 */
1921struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create)
1922{
1923	struct panthor_heap_pool *pool;
1924
1925	mutex_lock(&vm->heaps.lock);
1926	if (!vm->heaps.pool && create) {
1927		if (vm->destroyed)
1928			pool = ERR_PTR(-EINVAL);
1929		else
1930			pool = panthor_heap_pool_create(vm->ptdev, vm);
1931
1932		if (!IS_ERR(pool))
1933			vm->heaps.pool = panthor_heap_pool_get(pool);
1934	} else {
1935		pool = panthor_heap_pool_get(vm->heaps.pool);
1936		if (!pool)
1937			pool = ERR_PTR(-ENOENT);
1938	}
1939	mutex_unlock(&vm->heaps.lock);
1940
1941	return pool;
1942}
1943
1944static u64 mair_to_memattr(u64 mair)
1945{
1946	u64 memattr = 0;
1947	u32 i;
1948
1949	for (i = 0; i < 8; i++) {
1950		u8 in_attr = mair >> (8 * i), out_attr;
1951		u8 outer = in_attr >> 4, inner = in_attr & 0xf;
1952
1953		/* For caching to be enabled, inner and outer caching policy
1954		 * have to be both write-back, if one of them is write-through
1955		 * or non-cacheable, we just choose non-cacheable. Device
1956		 * memory is also translated to non-cacheable.
1957		 */
1958		if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
1959			out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
1960				   AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
1961				   AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
1962		} else {
1963			/* Use SH_CPU_INNER mode so SH_IS, which is used when
1964			 * IOMMU_CACHE is set, actually maps to the standard
1965			 * definition of inner-shareable and not Mali's
1966			 * internal-shareable mode.
1967			 */
1968			out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
1969				   AS_MEMATTR_AARCH64_SH_CPU_INNER |
1970				   AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
1971		}
1972
1973		memattr |= (u64)out_attr << (8 * i);
1974	}
1975
1976	return memattr;
1977}
1978
1979static void panthor_vma_link(struct panthor_vm *vm,
1980			     struct panthor_vma *vma,
1981			     struct drm_gpuvm_bo *vm_bo)
1982{
1983	struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
1984
1985	mutex_lock(&bo->gpuva_list_lock);
1986	drm_gpuva_link(&vma->base, vm_bo);
1987	drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
1988	mutex_unlock(&bo->gpuva_list_lock);
1989}
1990
1991static void panthor_vma_unlink(struct panthor_vm *vm,
1992			       struct panthor_vma *vma)
1993{
1994	struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
1995	struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
1996
1997	mutex_lock(&bo->gpuva_list_lock);
1998	drm_gpuva_unlink(&vma->base);
1999	mutex_unlock(&bo->gpuva_list_lock);
2000
2001	/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
2002	 * when entering this function, so we can implement deferred VMA
2003	 * destruction. Re-assign it here.
2004	 */
2005	vma->base.vm_bo = vm_bo;
2006	list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
2007}
2008
2009static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
2010{
2011	INIT_LIST_HEAD(&vma->node);
2012	vma->flags = flags;
2013}
2014
2015#define PANTHOR_VM_MAP_FLAGS \
2016	(DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
2017	 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
2018	 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
2019
2020static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
2021{
2022	struct panthor_vm *vm = priv;
2023	struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
2024	struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
2025	int ret;
2026
2027	if (!vma)
2028		return -EINVAL;
2029
2030	panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
2031
2032	ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
2033				   op_ctx->map.sgt, op->map.gem.offset,
2034				   op->map.va.range);
2035	if (ret)
2036		return ret;
2037
2038	/* Ref owned by the mapping now, clear the obj field so we don't release the
2039	 * pinning/obj ref behind GPUVA's back.
2040	 */
2041	drm_gpuva_map(&vm->base, &vma->base, &op->map);
2042	panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
2043	op_ctx->map.vm_bo = NULL;
2044	return 0;
2045}
2046
2047static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
2048				       void *priv)
2049{
2050	struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
2051	struct panthor_vm *vm = priv;
2052	struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
2053	struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
2054	u64 unmap_start, unmap_range;
2055	int ret;
2056
2057	drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
2058	ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
2059	if (ret)
2060		return ret;
2061
2062	if (op->remap.prev) {
2063		prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2064		panthor_vma_init(prev_vma, unmap_vma->flags);
2065	}
2066
2067	if (op->remap.next) {
2068		next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2069		panthor_vma_init(next_vma, unmap_vma->flags);
2070	}
2071
2072	drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL,
2073			next_vma ? &next_vma->base : NULL,
2074			&op->remap);
2075
2076	if (prev_vma) {
2077		/* panthor_vma_link() transfers the vm_bo ownership to
2078		 * the VMA object. Since the vm_bo we're passing is still
2079		 * owned by the old mapping which will be released when this
2080		 * mapping is destroyed, we need to grab a ref here.
2081		 */
2082		panthor_vma_link(vm, prev_vma,
2083				 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2084	}
2085
2086	if (next_vma) {
2087		panthor_vma_link(vm, next_vma,
2088				 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2089	}
2090
2091	panthor_vma_unlink(vm, unmap_vma);
2092	return 0;
2093}
2094
2095static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
2096				       void *priv)
2097{
2098	struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
2099	struct panthor_vm *vm = priv;
2100	int ret;
2101
2102	ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
2103				     unmap_vma->base.va.range);
2104	if (drm_WARN_ON(&vm->ptdev->base, ret))
2105		return ret;
2106
2107	drm_gpuva_unmap(&op->unmap);
2108	panthor_vma_unlink(vm, unmap_vma);
2109	return 0;
2110}
2111
2112static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
2113	.vm_free = panthor_vm_free,
2114	.sm_step_map = panthor_gpuva_sm_step_map,
2115	.sm_step_remap = panthor_gpuva_sm_step_remap,
2116	.sm_step_unmap = panthor_gpuva_sm_step_unmap,
2117};
2118
2119/**
2120 * panthor_vm_resv() - Get the dma_resv object attached to a VM.
2121 * @vm: VM to get the dma_resv of.
2122 *
2123 * Return: A dma_resv object.
2124 */
2125struct dma_resv *panthor_vm_resv(struct panthor_vm *vm)
2126{
2127	return drm_gpuvm_resv(&vm->base);
2128}
2129
2130struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm)
2131{
2132	if (!vm)
2133		return NULL;
2134
2135	return vm->base.r_obj;
2136}
2137
2138static int
2139panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
2140		   bool flag_vm_unusable_on_failure)
2141{
2142	u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK;
2143	int ret;
2144
2145	if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY)
2146		return 0;
2147
2148	mutex_lock(&vm->op_lock);
2149	vm->op_ctx = op;
2150	switch (op_type) {
2151	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2152		if (vm->unusable) {
2153			ret = -EINVAL;
2154			break;
2155		}
2156
2157		ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
2158				       op->map.vm_bo->obj, op->map.bo_offset);
2159		break;
2160
2161	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2162		ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
2163		break;
2164
2165	default:
2166		ret = -EINVAL;
2167		break;
2168	}
2169
2170	if (ret && flag_vm_unusable_on_failure)
2171		vm->unusable = true;
2172
2173	vm->op_ctx = NULL;
2174	mutex_unlock(&vm->op_lock);
2175
2176	return ret;
2177}
2178
2179static struct dma_fence *
2180panthor_vm_bind_run_job(struct drm_sched_job *sched_job)
2181{
2182	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2183	bool cookie;
2184	int ret;
2185
2186	/* Not only we report an error whose result is propagated to the
2187	 * drm_sched finished fence, but we also flag the VM as unusable, because
2188	 * a failure in the async VM_BIND results in an inconsistent state. VM needs
2189	 * to be destroyed and recreated.
2190	 */
2191	cookie = dma_fence_begin_signalling();
2192	ret = panthor_vm_exec_op(job->vm, &job->ctx, true);
2193	dma_fence_end_signalling(cookie);
2194
2195	return ret ? ERR_PTR(ret) : NULL;
2196}
2197
2198static void panthor_vm_bind_job_release(struct kref *kref)
2199{
2200	struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount);
2201
2202	if (job->base.s_fence)
2203		drm_sched_job_cleanup(&job->base);
2204
2205	panthor_vm_cleanup_op_ctx(&job->ctx, job->vm);
2206	panthor_vm_put(job->vm);
2207	kfree(job);
2208}
2209
2210/**
2211 * panthor_vm_bind_job_put() - Release a VM_BIND job reference
2212 * @sched_job: Job to release the reference on.
2213 */
2214void panthor_vm_bind_job_put(struct drm_sched_job *sched_job)
2215{
2216	struct panthor_vm_bind_job *job =
2217		container_of(sched_job, struct panthor_vm_bind_job, base);
2218
2219	if (sched_job)
2220		kref_put(&job->refcount, panthor_vm_bind_job_release);
2221}
2222
2223static void
2224panthor_vm_bind_free_job(struct drm_sched_job *sched_job)
2225{
2226	struct panthor_vm_bind_job *job =
2227		container_of(sched_job, struct panthor_vm_bind_job, base);
2228
2229	drm_sched_job_cleanup(sched_job);
2230
2231	/* Do the heavy cleanups asynchronously, so we're out of the
2232	 * dma-signaling path and can acquire dma-resv locks safely.
2233	 */
2234	queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work);
2235}
2236
2237static enum drm_gpu_sched_stat
2238panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
2239{
2240	WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
2241	return DRM_GPU_SCHED_STAT_NOMINAL;
2242}
2243
2244static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
2245	.run_job = panthor_vm_bind_run_job,
2246	.free_job = panthor_vm_bind_free_job,
2247	.timedout_job = panthor_vm_bind_timedout_job,
2248};
2249
2250/**
2251 * panthor_vm_create() - Create a VM
2252 * @ptdev: Device.
2253 * @for_mcu: True if this is the FW MCU VM.
2254 * @kernel_va_start: Start of the range reserved for kernel BO mapping.
2255 * @kernel_va_size: Size of the range reserved for kernel BO mapping.
2256 * @auto_kernel_va_start: Start of the auto-VA kernel range.
2257 * @auto_kernel_va_size: Size of the auto-VA kernel range.
2258 *
2259 * Return: A valid pointer on success, an ERR_PTR() otherwise.
2260 */
2261struct panthor_vm *
2262panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
2263		  u64 kernel_va_start, u64 kernel_va_size,
2264		  u64 auto_kernel_va_start, u64 auto_kernel_va_size)
2265{
2266	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2267	u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
2268	u64 full_va_range = 1ull << va_bits;
2269	struct drm_gem_object *dummy_gem;
2270	struct drm_gpu_scheduler *sched;
2271	struct io_pgtable_cfg pgtbl_cfg;
2272	u64 mair, min_va, va_range;
2273	struct panthor_vm *vm;
2274	int ret;
2275
2276	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2277	if (!vm)
2278		return ERR_PTR(-ENOMEM);
2279
2280	/* We allocate a dummy GEM for the VM. */
2281	dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base);
2282	if (!dummy_gem) {
2283		ret = -ENOMEM;
2284		goto err_free_vm;
2285	}
2286
2287	mutex_init(&vm->heaps.lock);
2288	vm->for_mcu = for_mcu;
2289	vm->ptdev = ptdev;
2290	mutex_init(&vm->op_lock);
2291
2292	if (for_mcu) {
2293		/* CSF MCU is a cortex M7, and can only address 4G */
2294		min_va = 0;
2295		va_range = SZ_4G;
2296	} else {
2297		min_va = 0;
2298		va_range = full_va_range;
2299	}
2300
2301	mutex_init(&vm->mm_lock);
2302	drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size);
2303	vm->kernel_auto_va.start = auto_kernel_va_start;
2304	vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1;
2305
2306	INIT_LIST_HEAD(&vm->node);
2307	INIT_LIST_HEAD(&vm->as.lru_node);
2308	vm->as.id = -1;
2309	refcount_set(&vm->as.active_cnt, 0);
2310
2311	pgtbl_cfg = (struct io_pgtable_cfg) {
2312		.pgsize_bitmap	= SZ_4K | SZ_2M,
2313		.ias		= va_bits,
2314		.oas		= pa_bits,
2315		.coherent_walk	= ptdev->coherent,
2316		.tlb		= &mmu_tlb_ops,
2317		.iommu_dev	= ptdev->base.dev,
2318		.alloc		= alloc_pt,
2319		.free		= free_pt,
2320	};
2321
2322	vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm);
2323	if (!vm->pgtbl_ops) {
2324		ret = -EINVAL;
2325		goto err_mm_takedown;
2326	}
2327
2328	/* Bind operations are synchronous for now, no timeout needed. */
2329	ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
2330			     1, 1, 0,
2331			     MAX_SCHEDULE_TIMEOUT, NULL, NULL,
2332			     "panthor-vm-bind", ptdev->base.dev);
2333	if (ret)
2334		goto err_free_io_pgtable;
2335
2336	sched = &vm->sched;
2337	ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
2338	if (ret)
2339		goto err_sched_fini;
2340
2341	mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
2342	vm->memattr = mair_to_memattr(mair);
2343
2344	mutex_lock(&ptdev->mmu->vm.lock);
2345	list_add_tail(&vm->node, &ptdev->mmu->vm.list);
2346
2347	/* If a reset is in progress, stop the scheduler. */
2348	if (ptdev->mmu->vm.reset_in_progress)
2349		panthor_vm_stop(vm);
2350	mutex_unlock(&ptdev->mmu->vm.lock);
2351
2352	/* We intentionally leave the reserved range to zero, because we want kernel VMAs
2353	 * to be handled the same way user VMAs are.
2354	 */
2355	drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
2356		       DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
2357		       min_va, va_range, 0, 0, &panthor_gpuvm_ops);
2358	drm_gem_object_put(dummy_gem);
2359	return vm;
2360
2361err_sched_fini:
2362	drm_sched_fini(&vm->sched);
2363
2364err_free_io_pgtable:
2365	free_io_pgtable_ops(vm->pgtbl_ops);
2366
2367err_mm_takedown:
2368	drm_mm_takedown(&vm->mm);
2369	drm_gem_object_put(dummy_gem);
2370
2371err_free_vm:
2372	kfree(vm);
2373	return ERR_PTR(ret);
2374}
2375
2376static int
2377panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
2378			       struct panthor_vm *vm,
2379			       const struct drm_panthor_vm_bind_op *op,
2380			       struct panthor_vm_op_ctx *op_ctx)
2381{
2382	ssize_t vm_pgsz = panthor_vm_page_size(vm);
2383	struct drm_gem_object *gem;
2384	int ret;
2385
2386	/* Aligned on page size. */
2387	if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
2388		return -EINVAL;
2389
2390	switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
2391	case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2392		gem = drm_gem_object_lookup(file, op->bo_handle);
2393		ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
2394						    gem ? to_panthor_bo(gem) : NULL,
2395						    op->bo_offset,
2396						    op->size,
2397						    op->va,
2398						    op->flags);
2399		drm_gem_object_put(gem);
2400		return ret;
2401
2402	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2403		if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2404			return -EINVAL;
2405
2406		if (op->bo_handle || op->bo_offset)
2407			return -EINVAL;
2408
2409		return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size);
2410
2411	case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY:
2412		if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2413			return -EINVAL;
2414
2415		if (op->bo_handle || op->bo_offset)
2416			return -EINVAL;
2417
2418		if (op->va || op->size)
2419			return -EINVAL;
2420
2421		if (!op->syncs.count)
2422			return -EINVAL;
2423
2424		panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm);
2425		return 0;
2426
2427	default:
2428		return -EINVAL;
2429	}
2430}
2431
2432static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
2433{
2434	struct panthor_vm_bind_job *job =
2435		container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
2436
2437	panthor_vm_bind_job_put(&job->base);
2438}
2439
2440/**
2441 * panthor_vm_bind_job_create() - Create a VM_BIND job
2442 * @file: File.
2443 * @vm: VM targeted by the VM_BIND job.
2444 * @op: VM operation data.
2445 *
2446 * Return: A valid pointer on success, an ERR_PTR() otherwise.
2447 */
2448struct drm_sched_job *
2449panthor_vm_bind_job_create(struct drm_file *file,
2450			   struct panthor_vm *vm,
2451			   const struct drm_panthor_vm_bind_op *op)
2452{
2453	struct panthor_vm_bind_job *job;
2454	int ret;
2455
2456	if (!vm)
2457		return ERR_PTR(-EINVAL);
2458
2459	if (vm->destroyed || vm->unusable)
2460		return ERR_PTR(-EINVAL);
2461
2462	job = kzalloc(sizeof(*job), GFP_KERNEL);
2463	if (!job)
2464		return ERR_PTR(-ENOMEM);
2465
2466	ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx);
2467	if (ret) {
2468		kfree(job);
2469		return ERR_PTR(ret);
2470	}
2471
2472	INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work);
2473	kref_init(&job->refcount);
2474	job->vm = panthor_vm_get(vm);
2475
2476	ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
2477	if (ret)
2478		goto err_put_job;
2479
2480	return &job->base;
2481
2482err_put_job:
2483	panthor_vm_bind_job_put(&job->base);
2484	return ERR_PTR(ret);
2485}
2486
2487/**
2488 * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs
2489 * @exec: The locking/preparation context.
2490 * @sched_job: The job to prepare resvs on.
2491 *
2492 * Locks and prepare the VM resv.
2493 *
2494 * If this is a map operation, locks and prepares the GEM resv.
2495 *
2496 * Return: 0 on success, a negative error code otherwise.
2497 */
2498int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
2499				      struct drm_sched_job *sched_job)
2500{
2501	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2502	int ret;
2503
2504	/* Acquire the VM lock an reserve a slot for this VM bind job. */
2505	ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1);
2506	if (ret)
2507		return ret;
2508
2509	if (job->ctx.map.vm_bo) {
2510		/* Lock/prepare the GEM being mapped. */
2511		ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
2512		if (ret)
2513			return ret;
2514	}
2515
2516	return 0;
2517}
2518
2519/**
2520 * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job
2521 * @exec: drm_exec context.
2522 * @sched_job: Job to update the resvs on.
2523 */
2524void panthor_vm_bind_job_update_resvs(struct drm_exec *exec,
2525				      struct drm_sched_job *sched_job)
2526{
2527	struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2528
2529	/* Explicit sync => we just register our job finished fence as bookkeep. */
2530	drm_gpuvm_resv_add_fence(&job->vm->base, exec,
2531				 &sched_job->s_fence->finished,
2532				 DMA_RESV_USAGE_BOOKKEEP,
2533				 DMA_RESV_USAGE_BOOKKEEP);
2534}
2535
2536void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
2537			     struct dma_fence *fence,
2538			     enum dma_resv_usage private_usage,
2539			     enum dma_resv_usage extobj_usage)
2540{
2541	drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage);
2542}
2543
2544/**
2545 * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously.
2546 * @file: File.
2547 * @vm: VM targeted by the VM operation.
2548 * @op: Data describing the VM operation.
2549 *
2550 * Return: 0 on success, a negative error code otherwise.
2551 */
2552int panthor_vm_bind_exec_sync_op(struct drm_file *file,
2553				 struct panthor_vm *vm,
2554				 struct drm_panthor_vm_bind_op *op)
2555{
2556	struct panthor_vm_op_ctx op_ctx;
2557	int ret;
2558
2559	/* No sync objects allowed on synchronous operations. */
2560	if (op->syncs.count)
2561		return -EINVAL;
2562
2563	if (!op->size)
2564		return 0;
2565
2566	ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx);
2567	if (ret)
2568		return ret;
2569
2570	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2571	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2572
2573	return ret;
2574}
2575
2576/**
2577 * panthor_vm_map_bo_range() - Map a GEM object range to a VM
2578 * @vm: VM to map the GEM to.
2579 * @bo: GEM object to map.
2580 * @offset: Offset in the GEM object.
2581 * @size: Size to map.
2582 * @va: Virtual address to map the object to.
2583 * @flags: Combination of drm_panthor_vm_bind_op_flags flags.
2584 * Only map-related flags are valid.
2585 *
2586 * Internal use only. For userspace requests, use
2587 * panthor_vm_bind_exec_sync_op() instead.
2588 *
2589 * Return: 0 on success, a negative error code otherwise.
2590 */
2591int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
2592			    u64 offset, u64 size, u64 va, u32 flags)
2593{
2594	struct panthor_vm_op_ctx op_ctx;
2595	int ret;
2596
2597	ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
2598	if (ret)
2599		return ret;
2600
2601	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2602	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2603
2604	return ret;
2605}
2606
2607/**
2608 * panthor_vm_unmap_range() - Unmap a portion of the VA space
2609 * @vm: VM to unmap the region from.
2610 * @va: Virtual address to unmap. Must be 4k aligned.
2611 * @size: Size of the region to unmap. Must be 4k aligned.
2612 *
2613 * Internal use only. For userspace requests, use
2614 * panthor_vm_bind_exec_sync_op() instead.
2615 *
2616 * Return: 0 on success, a negative error code otherwise.
2617 */
2618int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size)
2619{
2620	struct panthor_vm_op_ctx op_ctx;
2621	int ret;
2622
2623	ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size);
2624	if (ret)
2625		return ret;
2626
2627	ret = panthor_vm_exec_op(vm, &op_ctx, false);
2628	panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2629
2630	return ret;
2631}
2632
2633/**
2634 * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
2635 * @exec: Locking/preparation context.
2636 * @vm: VM targeted by the GPU job.
2637 * @slot_count: Number of slots to reserve.
2638 *
2639 * GPU jobs assume all BOs bound to the VM at the time the job is submitted
2640 * are available when the job is executed. In order to guarantee that, we
2641 * need to reserve a slot on all BOs mapped to a VM and update this slot with
2642 * the job fence after its submission.
2643 *
2644 * Return: 0 on success, a negative error code otherwise.
2645 */
2646int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm,
2647					u32 slot_count)
2648{
2649	int ret;
2650
2651	/* Acquire the VM lock and reserve a slot for this GPU job. */
2652	ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count);
2653	if (ret)
2654		return ret;
2655
2656	return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count);
2657}
2658
2659/**
2660 * panthor_mmu_unplug() - Unplug the MMU logic
2661 * @ptdev: Device.
2662 *
2663 * No access to the MMU regs should be done after this function is called.
2664 * We suspend the IRQ and disable all VMs to guarantee that.
2665 */
2666void panthor_mmu_unplug(struct panthor_device *ptdev)
2667{
2668	panthor_mmu_irq_suspend(&ptdev->mmu->irq);
2669
2670	mutex_lock(&ptdev->mmu->as.slots_lock);
2671	for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
2672		struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
2673
2674		if (vm) {
2675			drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
2676			panthor_vm_release_as_locked(vm);
2677		}
2678	}
2679	mutex_unlock(&ptdev->mmu->as.slots_lock);
2680}
2681
2682static void panthor_mmu_release_wq(struct drm_device *ddev, void *res)
2683{
2684	destroy_workqueue(res);
2685}
2686
2687/**
2688 * panthor_mmu_init() - Initialize the MMU logic.
2689 * @ptdev: Device.
2690 *
2691 * Return: 0 on success, a negative error code otherwise.
2692 */
2693int panthor_mmu_init(struct panthor_device *ptdev)
2694{
2695	u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2696	struct panthor_mmu *mmu;
2697	int ret, irq;
2698
2699	mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
2700	if (!mmu)
2701		return -ENOMEM;
2702
2703	INIT_LIST_HEAD(&mmu->as.lru_list);
2704
2705	ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
2706	if (ret)
2707		return ret;
2708
2709	INIT_LIST_HEAD(&mmu->vm.list);
2710	ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
2711	if (ret)
2712		return ret;
2713
2714	ptdev->mmu = mmu;
2715
2716	irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
2717	if (irq <= 0)
2718		return -ENODEV;
2719
2720	ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
2721				      panthor_mmu_fault_mask(ptdev, ~0));
2722	if (ret)
2723		return ret;
2724
2725	mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
2726	if (!mmu->vm.wq)
2727		return -ENOMEM;
2728
2729	/* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction,
2730	 * which passes iova as an unsigned long. Patch the mmu_features to reflect this
2731	 * limitation.
2732	 */
2733	if (va_bits > BITS_PER_LONG) {
2734		ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
2735		ptdev->gpu_info.mmu_features |= BITS_PER_LONG;
2736	}
2737
2738	return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
2739}
2740
2741#ifdef CONFIG_DEBUG_FS
2742static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m)
2743{
2744	int ret;
2745
2746	mutex_lock(&vm->op_lock);
2747	ret = drm_debugfs_gpuva_info(m, &vm->base);
2748	mutex_unlock(&vm->op_lock);
2749
2750	return ret;
2751}
2752
2753static int show_each_vm(struct seq_file *m, void *arg)
2754{
2755	struct drm_info_node *node = (struct drm_info_node *)m->private;
2756	struct drm_device *ddev = node->minor->dev;
2757	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
2758	int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
2759	struct panthor_vm *vm;
2760	int ret = 0;
2761
2762	mutex_lock(&ptdev->mmu->vm.lock);
2763	list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
2764		ret = show(vm, m);
2765		if (ret < 0)
2766			break;
2767
2768		seq_puts(m, "\n");
2769	}
2770	mutex_unlock(&ptdev->mmu->vm.lock);
2771
2772	return ret;
2773}
2774
2775static struct drm_info_list panthor_mmu_debugfs_list[] = {
2776	DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas),
2777};
2778
2779/**
2780 * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries
2781 * @minor: Minor.
2782 */
2783void panthor_mmu_debugfs_init(struct drm_minor *minor)
2784{
2785	drm_debugfs_create_files(panthor_mmu_debugfs_list,
2786				 ARRAY_SIZE(panthor_mmu_debugfs_list),
2787				 minor->debugfs_root, minor);
2788}
2789#endif /* CONFIG_DEBUG_FS */
2790
2791/**
2792 * panthor_mmu_pt_cache_init() - Initialize the page table cache.
2793 *
2794 * Return: 0 on success, a negative error code otherwise.
2795 */
2796int panthor_mmu_pt_cache_init(void)
2797{
2798	pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);
2799	if (!pt_cache)
2800		return -ENOMEM;
2801
2802	return 0;
2803}
2804
2805/**
2806 * panthor_mmu_pt_cache_fini() - Destroy the page table cache.
2807 */
2808void panthor_mmu_pt_cache_fini(void)
2809{
2810	kmem_cache_destroy(pt_cache);
2811}