Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only OR MIT
   2/* Copyright (c) 2023 Imagination Technologies Ltd. */
   3
   4#include "pvr_vm.h"
   5
   6#include "pvr_device.h"
   7#include "pvr_drv.h"
   8#include "pvr_gem.h"
   9#include "pvr_mmu.h"
  10#include "pvr_rogue_fwif.h"
  11#include "pvr_rogue_heap_config.h"
  12
  13#include <drm/drm_exec.h>
  14#include <drm/drm_gem.h>
  15#include <drm/drm_gpuvm.h>
  16
  17#include <linux/bug.h>
  18#include <linux/container_of.h>
  19#include <linux/err.h>
  20#include <linux/errno.h>
  21#include <linux/gfp_types.h>
  22#include <linux/kref.h>
  23#include <linux/mutex.h>
  24#include <linux/stddef.h>
  25
  26/**
  27 * DOC: Memory context
  28 *
  29 * This is the "top level" datatype in the VM code. It's exposed in the public
  30 * API as an opaque handle.
  31 */
  32
  33/**
  34 * struct pvr_vm_context - Context type used to represent a single VM.
  35 */
  36struct pvr_vm_context {
  37	/**
  38	 * @pvr_dev: The PowerVR device to which this context is bound.
  39	 * This binding is immutable for the life of the context.
  40	 */
  41	struct pvr_device *pvr_dev;
  42
  43	/** @mmu_ctx: The context for binding to physical memory. */
  44	struct pvr_mmu_context *mmu_ctx;
  45
  46	/** @gpuvm_mgr: GPUVM object associated with this context. */
  47	struct drm_gpuvm gpuvm_mgr;
  48
  49	/** @lock: Global lock on this VM. */
  50	struct mutex lock;
  51
  52	/**
  53	 * @fw_mem_ctx_obj: Firmware object representing firmware memory
  54	 * context.
  55	 */
  56	struct pvr_fw_object *fw_mem_ctx_obj;
  57
  58	/** @ref_count: Reference count of object. */
  59	struct kref ref_count;
  60
  61	/**
  62	 * @dummy_gem: GEM object to enable VM reservation. All private BOs
  63	 * should use the @dummy_gem.resv and not their own _resv field.
  64	 */
  65	struct drm_gem_object dummy_gem;
  66};
  67
  68static inline
  69struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
  70{
  71	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
  72}
  73
  74struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
  75{
  76	if (vm_ctx)
  77		kref_get(&vm_ctx->ref_count);
  78
  79	return vm_ctx;
  80}
  81
  82/**
  83 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
  84 *                                     page table structure behind a VM context.
  85 * @vm_ctx: Target VM context.
  86 */
  87dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
  88{
  89	return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
  90}
  91
  92/**
  93 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
  94 * @vm_ctx: Target VM context.
  95 *
  96 * This is used to allow private BOs to share a dma_resv for faster fence
  97 * updates.
  98 *
  99 * Returns: The dma_resv pointer.
 100 */
 101struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
 102{
 103	return vm_ctx->dummy_gem.resv;
 104}
 105
 106/**
 107 * DOC: Memory mappings
 108 */
 109
 110/**
 111 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
 112 */
 113struct pvr_vm_gpuva {
 114	/** @base: The wrapped drm_gpuva object. */
 115	struct drm_gpuva base;
 116};
 117
 118#define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
 119
 120enum pvr_vm_bind_type {
 121	PVR_VM_BIND_TYPE_MAP,
 122	PVR_VM_BIND_TYPE_UNMAP,
 123};
 124
 125/**
 126 * struct pvr_vm_bind_op - Context of a map/unmap operation.
 127 */
 128struct pvr_vm_bind_op {
 129	/** @type: Map or unmap. */
 130	enum pvr_vm_bind_type type;
 131
 132	/** @pvr_obj: Object associated with mapping (map only). */
 133	struct pvr_gem_object *pvr_obj;
 134
 135	/**
 136	 * @vm_ctx: VM context where the mapping will be created or destroyed.
 137	 */
 138	struct pvr_vm_context *vm_ctx;
 139
 140	/** @mmu_op_ctx: MMU op context. */
 141	struct pvr_mmu_op_context *mmu_op_ctx;
 142
 143	/** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
 144	struct drm_gpuvm_bo *gpuvm_bo;
 145
 146	/**
 147	 * @new_va: Prealloced VA mapping object (init in callback).
 148	 * Used when creating a mapping.
 149	 */
 150	struct pvr_vm_gpuva *new_va;
 151
 152	/**
 153	 * @prev_va: Prealloced VA mapping object (init in callback).
 154	 * Used when a mapping or unmapping operation overlaps an existing
 155	 * mapping and splits away the beginning into a new mapping.
 156	 */
 157	struct pvr_vm_gpuva *prev_va;
 158
 159	/**
 160	 * @next_va: Prealloced VA mapping object (init in callback).
 161	 * Used when a mapping or unmapping operation overlaps an existing
 162	 * mapping and splits away the end into a new mapping.
 163	 */
 164	struct pvr_vm_gpuva *next_va;
 165
 166	/** @offset: Offset into @pvr_obj to begin mapping from. */
 167	u64 offset;
 168
 169	/** @device_addr: Device-virtual address at the start of the mapping. */
 170	u64 device_addr;
 171
 172	/** @size: Size of the desired mapping. */
 173	u64 size;
 174};
 175
 176/**
 177 * pvr_vm_bind_op_exec() - Execute a single bind op.
 178 * @bind_op: Bind op context.
 179 *
 180 * Returns:
 181 *  * 0 on success,
 182 *  * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
 183 *    a callback function.
 184 */
 185static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
 186{
 187	switch (bind_op->type) {
 188	case PVR_VM_BIND_TYPE_MAP:
 189		return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
 190					bind_op, bind_op->device_addr,
 191					bind_op->size,
 192					gem_from_pvr_gem(bind_op->pvr_obj),
 193					bind_op->offset);
 194
 195	case PVR_VM_BIND_TYPE_UNMAP:
 196		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
 197					  bind_op, bind_op->device_addr,
 198					  bind_op->size);
 199	}
 200
 201	/*
 202	 * This shouldn't happen unless something went wrong
 203	 * in drm_sched.
 204	 */
 205	WARN_ON(1);
 206	return -EINVAL;
 207}
 208
 209static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
 210{
 211	drm_gpuvm_bo_put(bind_op->gpuvm_bo);
 212
 213	kfree(bind_op->new_va);
 214	kfree(bind_op->prev_va);
 215	kfree(bind_op->next_va);
 216
 217	if (bind_op->pvr_obj)
 218		pvr_gem_object_put(bind_op->pvr_obj);
 219
 220	if (bind_op->mmu_op_ctx)
 221		pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
 222}
 223
 224static int
 225pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
 226			struct pvr_vm_context *vm_ctx,
 227			struct pvr_gem_object *pvr_obj, u64 offset,
 228			u64 device_addr, u64 size)
 229{
 230	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
 231	const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
 232	const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
 233	struct sg_table *sgt;
 234	u64 offset_plus_size;
 235	int err;
 236
 237	if (check_add_overflow(offset, size, &offset_plus_size))
 238		return -EINVAL;
 239
 240	if (is_user &&
 241	    !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
 242		return -EINVAL;
 243	}
 244
 245	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
 246	    offset & ~PAGE_MASK || size & ~PAGE_MASK ||
 247	    offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
 248		return -EINVAL;
 249
 250	bind_op->type = PVR_VM_BIND_TYPE_MAP;
 251
 252	dma_resv_lock(obj->resv, NULL);
 253	bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
 254	dma_resv_unlock(obj->resv);
 255	if (IS_ERR(bind_op->gpuvm_bo))
 256		return PTR_ERR(bind_op->gpuvm_bo);
 257
 258	bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
 259	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
 260	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
 261	if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
 262		err = -ENOMEM;
 263		goto err_bind_op_fini;
 264	}
 265
 266	/* Pin pages so they're ready for use. */
 267	sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
 268	err = PTR_ERR_OR_ZERO(sgt);
 269	if (err)
 270		goto err_bind_op_fini;
 271
 272	bind_op->mmu_op_ctx =
 273		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
 274	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
 275	if (err) {
 276		bind_op->mmu_op_ctx = NULL;
 277		goto err_bind_op_fini;
 278	}
 279
 280	bind_op->pvr_obj = pvr_obj;
 281	bind_op->vm_ctx = vm_ctx;
 282	bind_op->device_addr = device_addr;
 283	bind_op->size = size;
 284	bind_op->offset = offset;
 285
 286	return 0;
 287
 288err_bind_op_fini:
 289	pvr_vm_bind_op_fini(bind_op);
 290
 291	return err;
 292}
 293
 294static int
 295pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
 296			  struct pvr_vm_context *vm_ctx,
 297			  struct pvr_gem_object *pvr_obj,
 298			  u64 device_addr, u64 size)
 299{
 300	int err;
 301
 302	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
 303		return -EINVAL;
 304
 305	bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
 306
 307	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
 308	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
 309	if (!bind_op->prev_va || !bind_op->next_va) {
 310		err = -ENOMEM;
 311		goto err_bind_op_fini;
 312	}
 313
 314	bind_op->mmu_op_ctx =
 315		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
 316	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
 317	if (err) {
 318		bind_op->mmu_op_ctx = NULL;
 319		goto err_bind_op_fini;
 320	}
 321
 322	bind_op->pvr_obj = pvr_obj;
 323	bind_op->vm_ctx = vm_ctx;
 324	bind_op->device_addr = device_addr;
 325	bind_op->size = size;
 326
 327	return 0;
 328
 329err_bind_op_fini:
 330	pvr_vm_bind_op_fini(bind_op);
 331
 332	return err;
 333}
 334
 335/**
 336 * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
 337 * @op: gpuva op containing the remap details.
 338 * @op_ctx: Operation context.
 339 *
 340 * Context: Called by drm_gpuvm_sm_map following a successful mapping while
 341 * @op_ctx.vm_ctx mutex is held.
 342 *
 343 * Return:
 344 *  * 0 on success, or
 345 *  * Any error returned by pvr_mmu_map().
 346 */
 347static int
 348pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
 349{
 350	struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
 351	struct pvr_vm_bind_op *ctx = op_ctx;
 352	int err;
 353
 354	if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
 355		return -EINVAL;
 356
 357	err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
 358			  op->map.va.addr);
 359	if (err)
 360		return err;
 361
 362	drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
 363	drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
 364	ctx->new_va = NULL;
 365
 366	return 0;
 367}
 368
 369/**
 370 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
 371 * @op: gpuva op containing the unmap details.
 372 * @op_ctx: Operation context.
 373 *
 374 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
 375 * @op_ctx.vm_ctx mutex is held.
 376 *
 377 * Return:
 378 *  * 0 on success, or
 379 *  * Any error returned by pvr_mmu_unmap().
 380 */
 381static int
 382pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
 383{
 384	struct pvr_vm_bind_op *ctx = op_ctx;
 385
 386	int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
 387				op->unmap.va->va.range);
 388
 389	if (err)
 390		return err;
 391
 392	drm_gpuva_unmap(&op->unmap);
 393	drm_gpuva_unlink(op->unmap.va);
 394	kfree(to_pvr_vm_gpuva(op->unmap.va));
 395
 396	return 0;
 397}
 398
 399/**
 400 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
 401 * @op: gpuva op containing the remap details.
 402 * @op_ctx: Operation context.
 403 *
 404 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
 405 * mapping or unmapping operation causes a region to be split. The
 406 * @op_ctx.vm_ctx mutex is held.
 407 *
 408 * Return:
 409 *  * 0 on success, or
 410 *  * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
 411 */
 412static int
 413pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
 414{
 415	struct pvr_vm_bind_op *ctx = op_ctx;
 416	u64 va_start = 0, va_range = 0;
 417	int err;
 418
 419	drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
 420	err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
 421	if (err)
 422		return err;
 423
 424	/* No actual remap required: the page table tree depth is fixed to 3,
 425	 * and we use 4k page table entries only for now.
 426	 */
 427	drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
 428
 429	if (op->remap.prev) {
 430		pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
 431		drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
 432		ctx->prev_va = NULL;
 433	}
 434
 435	if (op->remap.next) {
 436		pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
 437		drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
 438		ctx->next_va = NULL;
 439	}
 440
 441	drm_gpuva_unlink(op->remap.unmap->va);
 442	kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
 443
 444	return 0;
 445}
 446
 447/*
 448 * Public API
 449 *
 450 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
 451 */
 452
 453/**
 454 * pvr_device_addr_is_valid() - Tests whether a device-virtual address
 455 *                              is valid.
 456 * @device_addr: Virtual device address to test.
 457 *
 458 * Return:
 459 *  * %true if @device_addr is within the valid range for a device page
 460 *    table and is aligned to the device page size, or
 461 *  * %false otherwise.
 462 */
 463bool
 464pvr_device_addr_is_valid(u64 device_addr)
 465{
 466	return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
 467	       (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
 468}
 469
 470/**
 471 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
 472 * address and associated size are both valid.
 473 * @vm_ctx: Target VM context.
 474 * @device_addr: Virtual device address to test.
 475 * @size: Size of the range based at @device_addr to test.
 476 *
 477 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
 478 * @device_addr + @size) to verify a device-virtual address range initially
 479 * seems intuitive, but it produces a false-negative when the address range
 480 * is right at the end of device-virtual address space.
 481 *
 482 * This function catches that corner case, as well as checking that
 483 * @size is non-zero.
 484 *
 485 * Return:
 486 *  * %true if @device_addr is device page aligned; @size is device page
 487 *    aligned; the range specified by @device_addr and @size is within the
 488 *    bounds of the device-virtual address space, and @size is non-zero, or
 489 *  * %false otherwise.
 490 */
 491bool
 492pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
 493				   u64 device_addr, u64 size)
 494{
 495	return pvr_device_addr_is_valid(device_addr) &&
 496	       drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
 497	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
 498	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
 499}
 500
 501static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
 502{
 503	kfree(to_pvr_vm_context(gpuvm));
 504}
 505
 506static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
 507	.vm_free = pvr_gpuvm_free,
 508	.sm_step_map = pvr_vm_gpuva_map,
 509	.sm_step_remap = pvr_vm_gpuva_remap,
 510	.sm_step_unmap = pvr_vm_gpuva_unmap,
 511};
 512
 513static void
 514fw_mem_context_init(void *cpu_ptr, void *priv)
 515{
 516	struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
 517	struct pvr_vm_context *vm_ctx = priv;
 518
 519	fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
 520	fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
 521}
 522
 523/**
 524 * pvr_vm_create_context() - Create a new VM context.
 525 * @pvr_dev: Target PowerVR device.
 526 * @is_userspace_context: %true if this context is for userspace. This will
 527 *                        create a firmware memory context for the VM context
 528 *                        and disable warnings when tearing down mappings.
 529 *
 530 * Return:
 531 *  * A handle to the newly-minted VM context on success,
 532 *  * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
 533 *    missing or has an unsupported value,
 534 *  * -%ENOMEM if allocation of the structure behind the opaque handle fails,
 535 *    or
 536 *  * Any error encountered while setting up internal structures.
 537 */
 538struct pvr_vm_context *
 539pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
 540{
 541	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
 542
 543	struct pvr_vm_context *vm_ctx;
 544	u16 device_addr_bits;
 545
 546	int err;
 547
 548	err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
 549				&device_addr_bits);
 550	if (err) {
 551		drm_err(drm_dev,
 552			"Failed to get device virtual address space bits\n");
 553		return ERR_PTR(err);
 554	}
 555
 556	if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
 557		drm_err(drm_dev,
 558			"Device has unsupported virtual address space size\n");
 559		return ERR_PTR(-EINVAL);
 560	}
 561
 562	vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
 563	if (!vm_ctx)
 564		return ERR_PTR(-ENOMEM);
 565
 566	vm_ctx->pvr_dev = pvr_dev;
 567
 568	vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
 569	err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
 570	if (err)
 571		goto err_free;
 572
 573	if (is_userspace_context) {
 574		err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
 575					   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
 576					   fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
 577
 578		if (err)
 579			goto err_page_table_destroy;
 580	}
 581
 582	drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
 583	drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
 584		       is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
 585		       0, &pvr_dev->base, &vm_ctx->dummy_gem,
 586		       0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
 587
 588	mutex_init(&vm_ctx->lock);
 589	kref_init(&vm_ctx->ref_count);
 590
 591	return vm_ctx;
 592
 593err_page_table_destroy:
 594	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
 595
 596err_free:
 597	kfree(vm_ctx);
 598
 599	return ERR_PTR(err);
 600}
 601
 602/**
 603 * pvr_vm_context_release() - Teardown a VM context.
 604 * @ref_count: Pointer to reference counter of the VM context.
 605 *
 606 * This function also ensures that no mappings are left dangling by calling
 607 * pvr_vm_unmap_all.
 608 */
 609static void
 610pvr_vm_context_release(struct kref *ref_count)
 611{
 612	struct pvr_vm_context *vm_ctx =
 613		container_of(ref_count, struct pvr_vm_context, ref_count);
 614
 615	if (vm_ctx->fw_mem_ctx_obj)
 616		pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
 617
 618	pvr_vm_unmap_all(vm_ctx);
 
 619
 620	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
 621	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
 622	mutex_destroy(&vm_ctx->lock);
 623
 624	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
 625}
 626
 627/**
 628 * pvr_vm_context_lookup() - Look up VM context from handle
 629 * @pvr_file: Pointer to pvr_file structure.
 630 * @handle: Object handle.
 631 *
 632 * Takes reference on VM context object. Call pvr_vm_context_put() to release.
 633 *
 634 * Returns:
 635 *  * The requested object on success, or
 636 *  * %NULL on failure (object does not exist in list, or is not a VM context)
 637 */
 638struct pvr_vm_context *
 639pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
 640{
 641	struct pvr_vm_context *vm_ctx;
 642
 643	xa_lock(&pvr_file->vm_ctx_handles);
 644	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
 645	pvr_vm_context_get(vm_ctx);
 
 
 646	xa_unlock(&pvr_file->vm_ctx_handles);
 647
 648	return vm_ctx;
 649}
 650
 651/**
 652 * pvr_vm_context_put() - Release a reference on a VM context
 653 * @vm_ctx: Target VM context.
 654 *
 655 * Returns:
 656 *  * %true if the VM context was destroyed, or
 657 *  * %false if there are any references still remaining.
 658 */
 659bool
 660pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
 661{
 662	if (vm_ctx)
 663		return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
 664
 665	return true;
 666}
 667
 668/**
 669 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
 670 * given file.
 671 * @pvr_file: Pointer to pvr_file structure.
 672 *
 673 * Removes all vm_contexts associated with @pvr_file from the device VM context
 674 * list and drops initial references. vm_contexts will then be destroyed once
 675 * all outstanding references are dropped.
 676 */
 677void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
 678{
 679	struct pvr_vm_context *vm_ctx;
 680	unsigned long handle;
 681
 682	xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
 683		/* vm_ctx is not used here because that would create a race with xa_erase */
 684		pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
 685	}
 686}
 687
 688static int
 689pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
 690{
 691	struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
 692	struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
 693
 694	/* Acquire lock on the GEM object being mapped/unmapped. */
 
 
 
 
 695	return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
 696}
 697
 698/**
 699 * pvr_vm_map() - Map a section of physical memory into a section of
 700 * device-virtual memory.
 701 * @vm_ctx: Target VM context.
 702 * @pvr_obj: Target PowerVR memory object.
 703 * @pvr_obj_offset: Offset into @pvr_obj to map from.
 704 * @device_addr: Virtual device address at the start of the requested mapping.
 705 * @size: Size of the requested mapping.
 706 *
 707 * No handle is returned to represent the mapping. Instead, callers should
 708 * remember @device_addr and use that as a handle.
 709 *
 710 * Return:
 711 *  * 0 on success,
 712 *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
 713 *    address; the region specified by @pvr_obj_offset and @size does not fall
 714 *    entirely within @pvr_obj, or any part of the specified region of @pvr_obj
 715 *    is not device-virtual page-aligned,
 716 *  * Any error encountered while performing internal operations required to
 717 *    destroy the mapping (returned from pvr_vm_gpuva_map or
 718 *    pvr_vm_gpuva_remap).
 719 */
 720int
 721pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
 722	   u64 pvr_obj_offset, u64 device_addr, u64 size)
 723{
 724	struct pvr_vm_bind_op bind_op = {0};
 725	struct drm_gpuvm_exec vm_exec = {
 726		.vm = &vm_ctx->gpuvm_mgr,
 727		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
 728			 DRM_EXEC_IGNORE_DUPLICATES,
 729		.extra = {
 730			.fn = pvr_vm_lock_extra,
 731			.priv = &bind_op,
 732		},
 733	};
 734
 735	int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
 736					  pvr_obj_offset, device_addr,
 737					  size);
 738
 739	if (err)
 740		return err;
 741
 742	pvr_gem_object_get(pvr_obj);
 743
 744	err = drm_gpuvm_exec_lock(&vm_exec);
 745	if (err)
 746		goto err_cleanup;
 747
 748	err = pvr_vm_bind_op_exec(&bind_op);
 749
 750	drm_gpuvm_exec_unlock(&vm_exec);
 751
 752err_cleanup:
 753	pvr_vm_bind_op_fini(&bind_op);
 754
 755	return err;
 756}
 757
 758/**
 759 * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
 760 * memory.
 761 * @vm_ctx: Target VM context.
 762 * @pvr_obj: Target PowerVR memory object.
 763 * @device_addr: Virtual device address at the start of the target mapping.
 764 * @size: Size of the target mapping.
 765 *
 766 * Return:
 767 *  * 0 on success,
 768 *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
 769 *    address,
 770 *  * Any error encountered while performing internal operations required to
 771 *    destroy the mapping (returned from pvr_vm_gpuva_unmap or
 772 *    pvr_vm_gpuva_remap).
 773 *
 774 * The vm_ctx->lock must be held when calling this function.
 775 */
 776static int
 777pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
 778			struct pvr_gem_object *pvr_obj,
 779			u64 device_addr, u64 size)
 780{
 781	struct pvr_vm_bind_op bind_op = {0};
 782	struct drm_gpuvm_exec vm_exec = {
 783		.vm = &vm_ctx->gpuvm_mgr,
 784		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
 785			 DRM_EXEC_IGNORE_DUPLICATES,
 786		.extra = {
 787			.fn = pvr_vm_lock_extra,
 788			.priv = &bind_op,
 789		},
 790	};
 791
 792	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
 793					    device_addr, size);
 794	if (err)
 795		return err;
 796
 797	pvr_gem_object_get(pvr_obj);
 798
 799	err = drm_gpuvm_exec_lock(&vm_exec);
 800	if (err)
 801		goto err_cleanup;
 802
 803	err = pvr_vm_bind_op_exec(&bind_op);
 804
 805	drm_gpuvm_exec_unlock(&vm_exec);
 806
 807err_cleanup:
 808	pvr_vm_bind_op_fini(&bind_op);
 809
 810	return err;
 811}
 812
 813/**
 814 * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
 815 * memory.
 816 * @vm_ctx: Target VM context.
 817 * @pvr_obj: Target PowerVR memory object.
 818 * @device_addr: Virtual device address at the start of the target mapping.
 819 * @size: Size of the target mapping.
 820 *
 821 * Return:
 822 *  * 0 on success,
 823 *  * Any error encountered by pvr_vm_unmap_obj_locked.
 824 */
 825int
 826pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
 827		 u64 device_addr, u64 size)
 828{
 829	int err;
 830
 831	mutex_lock(&vm_ctx->lock);
 832	err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
 833	mutex_unlock(&vm_ctx->lock);
 834
 835	return err;
 836}
 837
 838/**
 839 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
 840 * @vm_ctx: Target VM context.
 841 * @device_addr: Virtual device address at the start of the target mapping.
 842 * @size: Size of the target mapping.
 843 *
 844 * Return:
 845 *  * 0 on success,
 846 *  * Any error encountered by drm_gpuva_find,
 847 *  * Any error encountered by pvr_vm_unmap_obj_locked.
 848 */
 849int
 850pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
 851{
 852	struct pvr_gem_object *pvr_obj;
 853	struct drm_gpuva *va;
 854	int err;
 855
 856	mutex_lock(&vm_ctx->lock);
 857
 858	va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
 859	if (va) {
 860		pvr_obj = gem_to_pvr_gem(va->gem.obj);
 861		err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
 862					      va->va.addr, va->va.range);
 863	} else {
 864		err = -ENOENT;
 865	}
 866
 867	mutex_unlock(&vm_ctx->lock);
 868
 869	return err;
 870}
 871
 872/**
 873 * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
 874 * @vm_ctx: Target VM context.
 875 *
 876 * This function ensures that no mappings are left dangling by unmapping them
 877 * all in order of ascending device-virtual address.
 878 */
 879void
 880pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
 881{
 882	mutex_lock(&vm_ctx->lock);
 883
 884	for (;;) {
 885		struct pvr_gem_object *pvr_obj;
 886		struct drm_gpuva *va;
 887
 888		va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
 889					  vm_ctx->gpuvm_mgr.mm_start,
 890					  vm_ctx->gpuvm_mgr.mm_range);
 891		if (!va)
 892			break;
 893
 894		pvr_obj = gem_to_pvr_gem(va->gem.obj);
 895
 896		WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
 897						va->va.addr, va->va.range));
 898	}
 899
 900	mutex_unlock(&vm_ctx->lock);
 901}
 902
 903/* Static data areas are determined by firmware. */
 904static const struct drm_pvr_static_data_area static_data_areas[] = {
 905	{
 906		.area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
 907		.location_heap_id = DRM_PVR_HEAP_GENERAL,
 908		.offset = 0,
 909		.size = 128,
 910	},
 911	{
 912		.area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
 913		.location_heap_id = DRM_PVR_HEAP_GENERAL,
 914		.offset = 128,
 915		.size = 1024,
 916	},
 917	{
 918		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
 919		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
 920		.offset = 0,
 921		.size = 128,
 922	},
 923	{
 924		.area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
 925		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
 926		.offset = 128,
 927		.size = 128,
 928	},
 929	{
 930		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
 931		.location_heap_id = DRM_PVR_HEAP_USC_CODE,
 932		.offset = 0,
 933		.size = 128,
 934	},
 935};
 936
 937#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
 938
 939/*
 940 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
 941 * static data area for each heap.
 942 */
 943static const struct drm_pvr_heap pvr_heaps[] = {
 944	[DRM_PVR_HEAP_GENERAL] = {
 945		.base = ROGUE_GENERAL_HEAP_BASE,
 946		.size = ROGUE_GENERAL_HEAP_SIZE,
 947		.flags = 0,
 948		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 949	},
 950	[DRM_PVR_HEAP_PDS_CODE_DATA] = {
 951		.base = ROGUE_PDSCODEDATA_HEAP_BASE,
 952		.size = ROGUE_PDSCODEDATA_HEAP_SIZE,
 953		.flags = 0,
 954		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 955	},
 956	[DRM_PVR_HEAP_USC_CODE] = {
 957		.base = ROGUE_USCCODE_HEAP_BASE,
 958		.size = ROGUE_USCCODE_HEAP_SIZE,
 959		.flags = 0,
 960		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 961	},
 962	[DRM_PVR_HEAP_RGNHDR] = {
 963		.base = ROGUE_RGNHDR_HEAP_BASE,
 964		.size = ROGUE_RGNHDR_HEAP_SIZE,
 965		.flags = 0,
 966		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 967	},
 968	[DRM_PVR_HEAP_VIS_TEST] = {
 969		.base = ROGUE_VISTEST_HEAP_BASE,
 970		.size = ROGUE_VISTEST_HEAP_SIZE,
 971		.flags = 0,
 972		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 973	},
 974	[DRM_PVR_HEAP_TRANSFER_FRAG] = {
 975		.base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
 976		.size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
 977		.flags = 0,
 978		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 979	},
 980};
 981
 982int
 983pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
 984			  struct drm_pvr_ioctl_dev_query_args *args)
 985{
 986	struct drm_pvr_dev_query_static_data_areas query = {0};
 987	int err;
 988
 989	if (!args->pointer) {
 990		args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
 991		return 0;
 992	}
 993
 994	err = PVR_UOBJ_GET(query, args->size, args->pointer);
 995	if (err < 0)
 996		return err;
 997
 998	if (!query.static_data_areas.array) {
 999		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
1000		query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
1001		goto copy_out;
1002	}
1003
1004	if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
1005		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
1006
1007	err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
1008	if (err < 0)
1009		return err;
1010
1011copy_out:
1012	err = PVR_UOBJ_SET(args->pointer, args->size, query);
1013	if (err < 0)
1014		return err;
1015
1016	args->size = sizeof(query);
1017	return 0;
1018}
1019
1020int
1021pvr_heap_info_get(const struct pvr_device *pvr_dev,
1022		  struct drm_pvr_ioctl_dev_query_args *args)
1023{
1024	struct drm_pvr_dev_query_heap_info query = {0};
1025	u64 dest;
1026	int err;
1027
1028	if (!args->pointer) {
1029		args->size = sizeof(struct drm_pvr_dev_query_heap_info);
1030		return 0;
1031	}
1032
1033	err = PVR_UOBJ_GET(query, args->size, args->pointer);
1034	if (err < 0)
1035		return err;
1036
1037	if (!query.heaps.array) {
1038		query.heaps.count = ARRAY_SIZE(pvr_heaps);
1039		query.heaps.stride = sizeof(struct drm_pvr_heap);
1040		goto copy_out;
1041	}
1042
1043	if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
1044		query.heaps.count = ARRAY_SIZE(pvr_heaps);
1045
1046	/* Region header heap is only present if BRN63142 is present. */
1047	dest = query.heaps.array;
1048	for (size_t i = 0; i < query.heaps.count; i++) {
1049		struct drm_pvr_heap heap = pvr_heaps[i];
1050
1051		if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
1052			heap.size = 0;
1053
1054		err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
1055		if (err < 0)
1056			return err;
1057
1058		dest += query.heaps.stride;
1059	}
1060
1061copy_out:
1062	err = PVR_UOBJ_SET(args->pointer, args->size, query);
1063	if (err < 0)
1064		return err;
1065
1066	args->size = sizeof(query);
1067	return 0;
1068}
1069
1070/**
1071 * pvr_heap_contains_range() - Determine if a given heap contains the specified
1072 *                             device-virtual address range.
1073 * @pvr_heap: Target heap.
1074 * @start: Inclusive start of the target range.
1075 * @end: Inclusive end of the target range.
1076 *
1077 * It is an error to call this function with values of @start and @end that do
1078 * not satisfy the condition @start <= @end.
1079 */
1080static __always_inline bool
1081pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
1082{
1083	return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
1084}
1085
1086/**
1087 * pvr_find_heap_containing() - Find a heap which contains the specified
1088 *                              device-virtual address range.
1089 * @pvr_dev: Target PowerVR device.
1090 * @start: Start of the target range.
1091 * @size: Size of the target range.
1092 *
1093 * Return:
1094 *  * A pointer to a constant instance of struct drm_pvr_heap representing the
1095 *    heap containing the entire range specified by @start and @size on
1096 *    success, or
1097 *  * %NULL if no such heap exists.
1098 */
1099const struct drm_pvr_heap *
1100pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1101{
1102	u64 end;
1103
1104	if (check_add_overflow(start, size - 1, &end))
1105		return NULL;
1106
1107	/*
1108	 * There are no guarantees about the order of address ranges in
1109	 * &pvr_heaps, so iterate over the entire array for a heap whose
1110	 * range completely encompasses the given range.
1111	 */
1112	for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1113		/* Filter heaps that present only with an associated quirk */
1114		if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1115		    !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1116			continue;
1117		}
1118
1119		if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1120			return &pvr_heaps[heap_id];
1121	}
1122
1123	return NULL;
1124}
1125
1126/**
1127 * pvr_vm_find_gem_object() - Look up a buffer object from a given
1128 *                            device-virtual address.
1129 * @vm_ctx: [IN] Target VM context.
1130 * @device_addr: [IN] Virtual device address at the start of the required
1131 *               object.
1132 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1133 *                     of the mapped region within the buffer object. May be
1134 *                     %NULL if this information is not required.
1135 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1136 *                   region. May be %NULL if this information is not required.
1137 *
1138 * If successful, a reference will be taken on the buffer object. The caller
1139 * must drop the reference with pvr_gem_object_put().
1140 *
1141 * Return:
1142 *  * The PowerVR buffer object mapped at @device_addr if one exists, or
1143 *  * %NULL otherwise.
1144 */
1145struct pvr_gem_object *
1146pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1147		       u64 *mapped_offset_out, u64 *mapped_size_out)
1148{
1149	struct pvr_gem_object *pvr_obj;
1150	struct drm_gpuva *va;
1151
1152	mutex_lock(&vm_ctx->lock);
1153
1154	va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1155	if (!va)
1156		goto err_unlock;
1157
1158	pvr_obj = gem_to_pvr_gem(va->gem.obj);
1159	pvr_gem_object_get(pvr_obj);
1160
1161	if (mapped_offset_out)
1162		*mapped_offset_out = va->gem.offset;
1163	if (mapped_size_out)
1164		*mapped_size_out = va->va.range;
1165
1166	mutex_unlock(&vm_ctx->lock);
1167
1168	return pvr_obj;
1169
1170err_unlock:
1171	mutex_unlock(&vm_ctx->lock);
1172
1173	return NULL;
1174}
1175
1176/**
1177 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1178 * @vm_ctx: Target VM context.
1179 *
1180 * Returns:
1181 *  * FW object representing firmware memory context, or
1182 *  * %NULL if this VM context does not have a firmware memory context.
1183 */
1184struct pvr_fw_object *
1185pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1186{
1187	return vm_ctx->fw_mem_ctx_obj;
1188}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only OR MIT
   2/* Copyright (c) 2023 Imagination Technologies Ltd. */
   3
   4#include "pvr_vm.h"
   5
   6#include "pvr_device.h"
   7#include "pvr_drv.h"
   8#include "pvr_gem.h"
   9#include "pvr_mmu.h"
  10#include "pvr_rogue_fwif.h"
  11#include "pvr_rogue_heap_config.h"
  12
  13#include <drm/drm_exec.h>
  14#include <drm/drm_gem.h>
  15#include <drm/drm_gpuvm.h>
  16
 
  17#include <linux/container_of.h>
  18#include <linux/err.h>
  19#include <linux/errno.h>
  20#include <linux/gfp_types.h>
  21#include <linux/kref.h>
  22#include <linux/mutex.h>
  23#include <linux/stddef.h>
  24
  25/**
  26 * DOC: Memory context
  27 *
  28 * This is the "top level" datatype in the VM code. It's exposed in the public
  29 * API as an opaque handle.
  30 */
  31
  32/**
  33 * struct pvr_vm_context - Context type used to represent a single VM.
  34 */
  35struct pvr_vm_context {
  36	/**
  37	 * @pvr_dev: The PowerVR device to which this context is bound.
  38	 * This binding is immutable for the life of the context.
  39	 */
  40	struct pvr_device *pvr_dev;
  41
  42	/** @mmu_ctx: The context for binding to physical memory. */
  43	struct pvr_mmu_context *mmu_ctx;
  44
  45	/** @gpuvm_mgr: GPUVM object associated with this context. */
  46	struct drm_gpuvm gpuvm_mgr;
  47
  48	/** @lock: Global lock on this VM. */
  49	struct mutex lock;
  50
  51	/**
  52	 * @fw_mem_ctx_obj: Firmware object representing firmware memory
  53	 * context.
  54	 */
  55	struct pvr_fw_object *fw_mem_ctx_obj;
  56
  57	/** @ref_count: Reference count of object. */
  58	struct kref ref_count;
  59
  60	/**
  61	 * @dummy_gem: GEM object to enable VM reservation. All private BOs
  62	 * should use the @dummy_gem.resv and not their own _resv field.
  63	 */
  64	struct drm_gem_object dummy_gem;
  65};
  66
  67static inline
  68struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
  69{
  70	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
  71}
  72
  73struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
  74{
  75	if (vm_ctx)
  76		kref_get(&vm_ctx->ref_count);
  77
  78	return vm_ctx;
  79}
  80
  81/**
  82 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
  83 *                                     page table structure behind a VM context.
  84 * @vm_ctx: Target VM context.
  85 */
  86dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
  87{
  88	return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
  89}
  90
  91/**
  92 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
  93 * @vm_ctx: Target VM context.
  94 *
  95 * This is used to allow private BOs to share a dma_resv for faster fence
  96 * updates.
  97 *
  98 * Returns: The dma_resv pointer.
  99 */
 100struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
 101{
 102	return vm_ctx->dummy_gem.resv;
 103}
 104
 105/**
 106 * DOC: Memory mappings
 107 */
 108
 109/**
 110 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
 111 */
 112struct pvr_vm_gpuva {
 113	/** @base: The wrapped drm_gpuva object. */
 114	struct drm_gpuva base;
 115};
 116
 
 
 117enum pvr_vm_bind_type {
 118	PVR_VM_BIND_TYPE_MAP,
 119	PVR_VM_BIND_TYPE_UNMAP,
 120};
 121
 122/**
 123 * struct pvr_vm_bind_op - Context of a map/unmap operation.
 124 */
 125struct pvr_vm_bind_op {
 126	/** @type: Map or unmap. */
 127	enum pvr_vm_bind_type type;
 128
 129	/** @pvr_obj: Object associated with mapping (map only). */
 130	struct pvr_gem_object *pvr_obj;
 131
 132	/**
 133	 * @vm_ctx: VM context where the mapping will be created or destroyed.
 134	 */
 135	struct pvr_vm_context *vm_ctx;
 136
 137	/** @mmu_op_ctx: MMU op context. */
 138	struct pvr_mmu_op_context *mmu_op_ctx;
 139
 140	/** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
 141	struct drm_gpuvm_bo *gpuvm_bo;
 142
 143	/**
 144	 * @new_va: Prealloced VA mapping object (init in callback).
 145	 * Used when creating a mapping.
 146	 */
 147	struct pvr_vm_gpuva *new_va;
 148
 149	/**
 150	 * @prev_va: Prealloced VA mapping object (init in callback).
 151	 * Used when a mapping or unmapping operation overlaps an existing
 152	 * mapping and splits away the beginning into a new mapping.
 153	 */
 154	struct pvr_vm_gpuva *prev_va;
 155
 156	/**
 157	 * @next_va: Prealloced VA mapping object (init in callback).
 158	 * Used when a mapping or unmapping operation overlaps an existing
 159	 * mapping and splits away the end into a new mapping.
 160	 */
 161	struct pvr_vm_gpuva *next_va;
 162
 163	/** @offset: Offset into @pvr_obj to begin mapping from. */
 164	u64 offset;
 165
 166	/** @device_addr: Device-virtual address at the start of the mapping. */
 167	u64 device_addr;
 168
 169	/** @size: Size of the desired mapping. */
 170	u64 size;
 171};
 172
 173/**
 174 * pvr_vm_bind_op_exec() - Execute a single bind op.
 175 * @bind_op: Bind op context.
 176 *
 177 * Returns:
 178 *  * 0 on success,
 179 *  * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
 180 *    a callback function.
 181 */
 182static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
 183{
 184	switch (bind_op->type) {
 185	case PVR_VM_BIND_TYPE_MAP:
 186		return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
 187					bind_op, bind_op->device_addr,
 188					bind_op->size,
 189					gem_from_pvr_gem(bind_op->pvr_obj),
 190					bind_op->offset);
 191
 192	case PVR_VM_BIND_TYPE_UNMAP:
 193		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
 194					  bind_op, bind_op->device_addr,
 195					  bind_op->size);
 196	}
 197
 198	/*
 199	 * This shouldn't happen unless something went wrong
 200	 * in drm_sched.
 201	 */
 202	WARN_ON(1);
 203	return -EINVAL;
 204}
 205
 206static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
 207{
 208	drm_gpuvm_bo_put(bind_op->gpuvm_bo);
 209
 210	kfree(bind_op->new_va);
 211	kfree(bind_op->prev_va);
 212	kfree(bind_op->next_va);
 213
 214	if (bind_op->pvr_obj)
 215		pvr_gem_object_put(bind_op->pvr_obj);
 216
 217	if (bind_op->mmu_op_ctx)
 218		pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
 219}
 220
 221static int
 222pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
 223			struct pvr_vm_context *vm_ctx,
 224			struct pvr_gem_object *pvr_obj, u64 offset,
 225			u64 device_addr, u64 size)
 226{
 227	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
 228	const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
 229	const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
 230	struct sg_table *sgt;
 231	u64 offset_plus_size;
 232	int err;
 233
 234	if (check_add_overflow(offset, size, &offset_plus_size))
 235		return -EINVAL;
 236
 237	if (is_user &&
 238	    !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
 239		return -EINVAL;
 240	}
 241
 242	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
 243	    offset & ~PAGE_MASK || size & ~PAGE_MASK ||
 244	    offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
 245		return -EINVAL;
 246
 247	bind_op->type = PVR_VM_BIND_TYPE_MAP;
 248
 249	dma_resv_lock(obj->resv, NULL);
 250	bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
 251	dma_resv_unlock(obj->resv);
 252	if (IS_ERR(bind_op->gpuvm_bo))
 253		return PTR_ERR(bind_op->gpuvm_bo);
 254
 255	bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
 256	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
 257	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
 258	if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
 259		err = -ENOMEM;
 260		goto err_bind_op_fini;
 261	}
 262
 263	/* Pin pages so they're ready for use. */
 264	sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
 265	err = PTR_ERR_OR_ZERO(sgt);
 266	if (err)
 267		goto err_bind_op_fini;
 268
 269	bind_op->mmu_op_ctx =
 270		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
 271	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
 272	if (err) {
 273		bind_op->mmu_op_ctx = NULL;
 274		goto err_bind_op_fini;
 275	}
 276
 277	bind_op->pvr_obj = pvr_obj;
 278	bind_op->vm_ctx = vm_ctx;
 279	bind_op->device_addr = device_addr;
 280	bind_op->size = size;
 281	bind_op->offset = offset;
 282
 283	return 0;
 284
 285err_bind_op_fini:
 286	pvr_vm_bind_op_fini(bind_op);
 287
 288	return err;
 289}
 290
 291static int
 292pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
 293			  struct pvr_vm_context *vm_ctx, u64 device_addr,
 294			  u64 size)
 
 295{
 296	int err;
 297
 298	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
 299		return -EINVAL;
 300
 301	bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
 302
 303	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
 304	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
 305	if (!bind_op->prev_va || !bind_op->next_va) {
 306		err = -ENOMEM;
 307		goto err_bind_op_fini;
 308	}
 309
 310	bind_op->mmu_op_ctx =
 311		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
 312	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
 313	if (err) {
 314		bind_op->mmu_op_ctx = NULL;
 315		goto err_bind_op_fini;
 316	}
 317
 
 318	bind_op->vm_ctx = vm_ctx;
 319	bind_op->device_addr = device_addr;
 320	bind_op->size = size;
 321
 322	return 0;
 323
 324err_bind_op_fini:
 325	pvr_vm_bind_op_fini(bind_op);
 326
 327	return err;
 328}
 329
 330/**
 331 * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
 332 * @op: gpuva op containing the remap details.
 333 * @op_ctx: Operation context.
 334 *
 335 * Context: Called by drm_gpuvm_sm_map following a successful mapping while
 336 * @op_ctx.vm_ctx mutex is held.
 337 *
 338 * Return:
 339 *  * 0 on success, or
 340 *  * Any error returned by pvr_mmu_map().
 341 */
 342static int
 343pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
 344{
 345	struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
 346	struct pvr_vm_bind_op *ctx = op_ctx;
 347	int err;
 348
 349	if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
 350		return -EINVAL;
 351
 352	err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
 353			  op->map.va.addr);
 354	if (err)
 355		return err;
 356
 357	drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
 358	drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
 359	ctx->new_va = NULL;
 360
 361	return 0;
 362}
 363
 364/**
 365 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
 366 * @op: gpuva op containing the unmap details.
 367 * @op_ctx: Operation context.
 368 *
 369 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
 370 * @op_ctx.vm_ctx mutex is held.
 371 *
 372 * Return:
 373 *  * 0 on success, or
 374 *  * Any error returned by pvr_mmu_unmap().
 375 */
 376static int
 377pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
 378{
 379	struct pvr_vm_bind_op *ctx = op_ctx;
 380
 381	int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
 382				op->unmap.va->va.range);
 383
 384	if (err)
 385		return err;
 386
 387	drm_gpuva_unmap(&op->unmap);
 388	drm_gpuva_unlink(op->unmap.va);
 
 389
 390	return 0;
 391}
 392
 393/**
 394 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
 395 * @op: gpuva op containing the remap details.
 396 * @op_ctx: Operation context.
 397 *
 398 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
 399 * mapping or unmapping operation causes a region to be split. The
 400 * @op_ctx.vm_ctx mutex is held.
 401 *
 402 * Return:
 403 *  * 0 on success, or
 404 *  * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
 405 */
 406static int
 407pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
 408{
 409	struct pvr_vm_bind_op *ctx = op_ctx;
 410	u64 va_start = 0, va_range = 0;
 411	int err;
 412
 413	drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
 414	err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
 415	if (err)
 416		return err;
 417
 418	/* No actual remap required: the page table tree depth is fixed to 3,
 419	 * and we use 4k page table entries only for now.
 420	 */
 421	drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
 422
 423	if (op->remap.prev) {
 424		pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
 425		drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
 426		ctx->prev_va = NULL;
 427	}
 428
 429	if (op->remap.next) {
 430		pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
 431		drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
 432		ctx->next_va = NULL;
 433	}
 434
 435	drm_gpuva_unlink(op->remap.unmap->va);
 
 436
 437	return 0;
 438}
 439
 440/*
 441 * Public API
 442 *
 443 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
 444 */
 445
 446/**
 447 * pvr_device_addr_is_valid() - Tests whether a device-virtual address
 448 *                              is valid.
 449 * @device_addr: Virtual device address to test.
 450 *
 451 * Return:
 452 *  * %true if @device_addr is within the valid range for a device page
 453 *    table and is aligned to the device page size, or
 454 *  * %false otherwise.
 455 */
 456bool
 457pvr_device_addr_is_valid(u64 device_addr)
 458{
 459	return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
 460	       (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
 461}
 462
 463/**
 464 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
 465 * address and associated size are both valid.
 466 * @vm_ctx: Target VM context.
 467 * @device_addr: Virtual device address to test.
 468 * @size: Size of the range based at @device_addr to test.
 469 *
 470 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
 471 * @device_addr + @size) to verify a device-virtual address range initially
 472 * seems intuitive, but it produces a false-negative when the address range
 473 * is right at the end of device-virtual address space.
 474 *
 475 * This function catches that corner case, as well as checking that
 476 * @size is non-zero.
 477 *
 478 * Return:
 479 *  * %true if @device_addr is device page aligned; @size is device page
 480 *    aligned; the range specified by @device_addr and @size is within the
 481 *    bounds of the device-virtual address space, and @size is non-zero, or
 482 *  * %false otherwise.
 483 */
 484bool
 485pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
 486				   u64 device_addr, u64 size)
 487{
 488	return pvr_device_addr_is_valid(device_addr) &&
 489	       drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
 490	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
 491	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
 492}
 493
 494static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
 495{
 496	kfree(to_pvr_vm_context(gpuvm));
 497}
 498
 499static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
 500	.vm_free = pvr_gpuvm_free,
 501	.sm_step_map = pvr_vm_gpuva_map,
 502	.sm_step_remap = pvr_vm_gpuva_remap,
 503	.sm_step_unmap = pvr_vm_gpuva_unmap,
 504};
 505
 506static void
 507fw_mem_context_init(void *cpu_ptr, void *priv)
 508{
 509	struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
 510	struct pvr_vm_context *vm_ctx = priv;
 511
 512	fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
 513	fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
 514}
 515
 516/**
 517 * pvr_vm_create_context() - Create a new VM context.
 518 * @pvr_dev: Target PowerVR device.
 519 * @is_userspace_context: %true if this context is for userspace. This will
 520 *                        create a firmware memory context for the VM context
 521 *                        and disable warnings when tearing down mappings.
 522 *
 523 * Return:
 524 *  * A handle to the newly-minted VM context on success,
 525 *  * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
 526 *    missing or has an unsupported value,
 527 *  * -%ENOMEM if allocation of the structure behind the opaque handle fails,
 528 *    or
 529 *  * Any error encountered while setting up internal structures.
 530 */
 531struct pvr_vm_context *
 532pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
 533{
 534	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
 535
 536	struct pvr_vm_context *vm_ctx;
 537	u16 device_addr_bits;
 538
 539	int err;
 540
 541	err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
 542				&device_addr_bits);
 543	if (err) {
 544		drm_err(drm_dev,
 545			"Failed to get device virtual address space bits\n");
 546		return ERR_PTR(err);
 547	}
 548
 549	if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
 550		drm_err(drm_dev,
 551			"Device has unsupported virtual address space size\n");
 552		return ERR_PTR(-EINVAL);
 553	}
 554
 555	vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
 556	if (!vm_ctx)
 557		return ERR_PTR(-ENOMEM);
 558
 559	vm_ctx->pvr_dev = pvr_dev;
 560
 561	vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
 562	err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
 563	if (err)
 564		goto err_free;
 565
 566	if (is_userspace_context) {
 567		err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
 568					   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
 569					   fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
 570
 571		if (err)
 572			goto err_page_table_destroy;
 573	}
 574
 575	drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
 576	drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
 577		       is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
 578		       0, &pvr_dev->base, &vm_ctx->dummy_gem,
 579		       0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
 580
 581	mutex_init(&vm_ctx->lock);
 582	kref_init(&vm_ctx->ref_count);
 583
 584	return vm_ctx;
 585
 586err_page_table_destroy:
 587	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
 588
 589err_free:
 590	kfree(vm_ctx);
 591
 592	return ERR_PTR(err);
 593}
 594
 595/**
 596 * pvr_vm_context_release() - Teardown a VM context.
 597 * @ref_count: Pointer to reference counter of the VM context.
 598 *
 599 * This function ensures that no mappings are left dangling by unmapping them
 600 * all in order of ascending device-virtual address.
 601 */
 602static void
 603pvr_vm_context_release(struct kref *ref_count)
 604{
 605	struct pvr_vm_context *vm_ctx =
 606		container_of(ref_count, struct pvr_vm_context, ref_count);
 607
 608	if (vm_ctx->fw_mem_ctx_obj)
 609		pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
 610
 611	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
 612			     vm_ctx->gpuvm_mgr.mm_range));
 613
 614	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
 615	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
 616	mutex_destroy(&vm_ctx->lock);
 617
 618	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
 619}
 620
 621/**
 622 * pvr_vm_context_lookup() - Look up VM context from handle
 623 * @pvr_file: Pointer to pvr_file structure.
 624 * @handle: Object handle.
 625 *
 626 * Takes reference on VM context object. Call pvr_vm_context_put() to release.
 627 *
 628 * Returns:
 629 *  * The requested object on success, or
 630 *  * %NULL on failure (object does not exist in list, or is not a VM context)
 631 */
 632struct pvr_vm_context *
 633pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
 634{
 635	struct pvr_vm_context *vm_ctx;
 636
 637	xa_lock(&pvr_file->vm_ctx_handles);
 638	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
 639	if (vm_ctx)
 640		kref_get(&vm_ctx->ref_count);
 641
 642	xa_unlock(&pvr_file->vm_ctx_handles);
 643
 644	return vm_ctx;
 645}
 646
 647/**
 648 * pvr_vm_context_put() - Release a reference on a VM context
 649 * @vm_ctx: Target VM context.
 650 *
 651 * Returns:
 652 *  * %true if the VM context was destroyed, or
 653 *  * %false if there are any references still remaining.
 654 */
 655bool
 656pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
 657{
 658	if (vm_ctx)
 659		return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
 660
 661	return true;
 662}
 663
 664/**
 665 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
 666 * given file.
 667 * @pvr_file: Pointer to pvr_file structure.
 668 *
 669 * Removes all vm_contexts associated with @pvr_file from the device VM context
 670 * list and drops initial references. vm_contexts will then be destroyed once
 671 * all outstanding references are dropped.
 672 */
 673void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
 674{
 675	struct pvr_vm_context *vm_ctx;
 676	unsigned long handle;
 677
 678	xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
 679		/* vm_ctx is not used here because that would create a race with xa_erase */
 680		pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
 681	}
 682}
 683
 684static int
 685pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
 686{
 687	struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
 688	struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
 689
 690	/* Unmap operations don't have an object to lock. */
 691	if (!pvr_obj)
 692		return 0;
 693
 694	/* Acquire lock on the GEM being mapped. */
 695	return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
 696}
 697
 698/**
 699 * pvr_vm_map() - Map a section of physical memory into a section of
 700 * device-virtual memory.
 701 * @vm_ctx: Target VM context.
 702 * @pvr_obj: Target PowerVR memory object.
 703 * @pvr_obj_offset: Offset into @pvr_obj to map from.
 704 * @device_addr: Virtual device address at the start of the requested mapping.
 705 * @size: Size of the requested mapping.
 706 *
 707 * No handle is returned to represent the mapping. Instead, callers should
 708 * remember @device_addr and use that as a handle.
 709 *
 710 * Return:
 711 *  * 0 on success,
 712 *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
 713 *    address; the region specified by @pvr_obj_offset and @size does not fall
 714 *    entirely within @pvr_obj, or any part of the specified region of @pvr_obj
 715 *    is not device-virtual page-aligned,
 716 *  * Any error encountered while performing internal operations required to
 717 *    destroy the mapping (returned from pvr_vm_gpuva_map or
 718 *    pvr_vm_gpuva_remap).
 719 */
 720int
 721pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
 722	   u64 pvr_obj_offset, u64 device_addr, u64 size)
 723{
 724	struct pvr_vm_bind_op bind_op = {0};
 725	struct drm_gpuvm_exec vm_exec = {
 726		.vm = &vm_ctx->gpuvm_mgr,
 727		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
 728			 DRM_EXEC_IGNORE_DUPLICATES,
 729		.extra = {
 730			.fn = pvr_vm_lock_extra,
 731			.priv = &bind_op,
 732		},
 733	};
 734
 735	int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
 736					  pvr_obj_offset, device_addr,
 737					  size);
 738
 739	if (err)
 740		return err;
 741
 742	pvr_gem_object_get(pvr_obj);
 743
 744	err = drm_gpuvm_exec_lock(&vm_exec);
 745	if (err)
 746		goto err_cleanup;
 747
 748	err = pvr_vm_bind_op_exec(&bind_op);
 749
 750	drm_gpuvm_exec_unlock(&vm_exec);
 751
 752err_cleanup:
 753	pvr_vm_bind_op_fini(&bind_op);
 754
 755	return err;
 756}
 757
 758/**
 759 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
 
 760 * @vm_ctx: Target VM context.
 
 761 * @device_addr: Virtual device address at the start of the target mapping.
 762 * @size: Size of the target mapping.
 763 *
 764 * Return:
 765 *  * 0 on success,
 766 *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
 767 *    address,
 768 *  * Any error encountered while performing internal operations required to
 769 *    destroy the mapping (returned from pvr_vm_gpuva_unmap or
 770 *    pvr_vm_gpuva_remap).
 
 
 771 */
 772int
 773pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
 
 
 774{
 775	struct pvr_vm_bind_op bind_op = {0};
 776	struct drm_gpuvm_exec vm_exec = {
 777		.vm = &vm_ctx->gpuvm_mgr,
 778		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
 779			 DRM_EXEC_IGNORE_DUPLICATES,
 780		.extra = {
 781			.fn = pvr_vm_lock_extra,
 782			.priv = &bind_op,
 783		},
 784	};
 785
 786	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
 787					    size);
 788	if (err)
 789		return err;
 790
 
 
 791	err = drm_gpuvm_exec_lock(&vm_exec);
 792	if (err)
 793		goto err_cleanup;
 794
 795	err = pvr_vm_bind_op_exec(&bind_op);
 796
 797	drm_gpuvm_exec_unlock(&vm_exec);
 798
 799err_cleanup:
 800	pvr_vm_bind_op_fini(&bind_op);
 801
 802	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803}
 804
 805/* Static data areas are determined by firmware. */
 806static const struct drm_pvr_static_data_area static_data_areas[] = {
 807	{
 808		.area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
 809		.location_heap_id = DRM_PVR_HEAP_GENERAL,
 810		.offset = 0,
 811		.size = 128,
 812	},
 813	{
 814		.area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
 815		.location_heap_id = DRM_PVR_HEAP_GENERAL,
 816		.offset = 128,
 817		.size = 1024,
 818	},
 819	{
 820		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
 821		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
 822		.offset = 0,
 823		.size = 128,
 824	},
 825	{
 826		.area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
 827		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
 828		.offset = 128,
 829		.size = 128,
 830	},
 831	{
 832		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
 833		.location_heap_id = DRM_PVR_HEAP_USC_CODE,
 834		.offset = 0,
 835		.size = 128,
 836	},
 837};
 838
 839#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
 840
 841/*
 842 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
 843 * static data area for each heap.
 844 */
 845static const struct drm_pvr_heap pvr_heaps[] = {
 846	[DRM_PVR_HEAP_GENERAL] = {
 847		.base = ROGUE_GENERAL_HEAP_BASE,
 848		.size = ROGUE_GENERAL_HEAP_SIZE,
 849		.flags = 0,
 850		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 851	},
 852	[DRM_PVR_HEAP_PDS_CODE_DATA] = {
 853		.base = ROGUE_PDSCODEDATA_HEAP_BASE,
 854		.size = ROGUE_PDSCODEDATA_HEAP_SIZE,
 855		.flags = 0,
 856		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 857	},
 858	[DRM_PVR_HEAP_USC_CODE] = {
 859		.base = ROGUE_USCCODE_HEAP_BASE,
 860		.size = ROGUE_USCCODE_HEAP_SIZE,
 861		.flags = 0,
 862		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 863	},
 864	[DRM_PVR_HEAP_RGNHDR] = {
 865		.base = ROGUE_RGNHDR_HEAP_BASE,
 866		.size = ROGUE_RGNHDR_HEAP_SIZE,
 867		.flags = 0,
 868		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 869	},
 870	[DRM_PVR_HEAP_VIS_TEST] = {
 871		.base = ROGUE_VISTEST_HEAP_BASE,
 872		.size = ROGUE_VISTEST_HEAP_SIZE,
 873		.flags = 0,
 874		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 875	},
 876	[DRM_PVR_HEAP_TRANSFER_FRAG] = {
 877		.base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
 878		.size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
 879		.flags = 0,
 880		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
 881	},
 882};
 883
 884int
 885pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
 886			  struct drm_pvr_ioctl_dev_query_args *args)
 887{
 888	struct drm_pvr_dev_query_static_data_areas query = {0};
 889	int err;
 890
 891	if (!args->pointer) {
 892		args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
 893		return 0;
 894	}
 895
 896	err = PVR_UOBJ_GET(query, args->size, args->pointer);
 897	if (err < 0)
 898		return err;
 899
 900	if (!query.static_data_areas.array) {
 901		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
 902		query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
 903		goto copy_out;
 904	}
 905
 906	if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
 907		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
 908
 909	err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
 910	if (err < 0)
 911		return err;
 912
 913copy_out:
 914	err = PVR_UOBJ_SET(args->pointer, args->size, query);
 915	if (err < 0)
 916		return err;
 917
 918	args->size = sizeof(query);
 919	return 0;
 920}
 921
 922int
 923pvr_heap_info_get(const struct pvr_device *pvr_dev,
 924		  struct drm_pvr_ioctl_dev_query_args *args)
 925{
 926	struct drm_pvr_dev_query_heap_info query = {0};
 927	u64 dest;
 928	int err;
 929
 930	if (!args->pointer) {
 931		args->size = sizeof(struct drm_pvr_dev_query_heap_info);
 932		return 0;
 933	}
 934
 935	err = PVR_UOBJ_GET(query, args->size, args->pointer);
 936	if (err < 0)
 937		return err;
 938
 939	if (!query.heaps.array) {
 940		query.heaps.count = ARRAY_SIZE(pvr_heaps);
 941		query.heaps.stride = sizeof(struct drm_pvr_heap);
 942		goto copy_out;
 943	}
 944
 945	if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
 946		query.heaps.count = ARRAY_SIZE(pvr_heaps);
 947
 948	/* Region header heap is only present if BRN63142 is present. */
 949	dest = query.heaps.array;
 950	for (size_t i = 0; i < query.heaps.count; i++) {
 951		struct drm_pvr_heap heap = pvr_heaps[i];
 952
 953		if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
 954			heap.size = 0;
 955
 956		err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
 957		if (err < 0)
 958			return err;
 959
 960		dest += query.heaps.stride;
 961	}
 962
 963copy_out:
 964	err = PVR_UOBJ_SET(args->pointer, args->size, query);
 965	if (err < 0)
 966		return err;
 967
 968	args->size = sizeof(query);
 969	return 0;
 970}
 971
 972/**
 973 * pvr_heap_contains_range() - Determine if a given heap contains the specified
 974 *                             device-virtual address range.
 975 * @pvr_heap: Target heap.
 976 * @start: Inclusive start of the target range.
 977 * @end: Inclusive end of the target range.
 978 *
 979 * It is an error to call this function with values of @start and @end that do
 980 * not satisfy the condition @start <= @end.
 981 */
 982static __always_inline bool
 983pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
 984{
 985	return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
 986}
 987
 988/**
 989 * pvr_find_heap_containing() - Find a heap which contains the specified
 990 *                              device-virtual address range.
 991 * @pvr_dev: Target PowerVR device.
 992 * @start: Start of the target range.
 993 * @size: Size of the target range.
 994 *
 995 * Return:
 996 *  * A pointer to a constant instance of struct drm_pvr_heap representing the
 997 *    heap containing the entire range specified by @start and @size on
 998 *    success, or
 999 *  * %NULL if no such heap exists.
1000 */
1001const struct drm_pvr_heap *
1002pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1003{
1004	u64 end;
1005
1006	if (check_add_overflow(start, size - 1, &end))
1007		return NULL;
1008
1009	/*
1010	 * There are no guarantees about the order of address ranges in
1011	 * &pvr_heaps, so iterate over the entire array for a heap whose
1012	 * range completely encompasses the given range.
1013	 */
1014	for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1015		/* Filter heaps that present only with an associated quirk */
1016		if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1017		    !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1018			continue;
1019		}
1020
1021		if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1022			return &pvr_heaps[heap_id];
1023	}
1024
1025	return NULL;
1026}
1027
1028/**
1029 * pvr_vm_find_gem_object() - Look up a buffer object from a given
1030 *                            device-virtual address.
1031 * @vm_ctx: [IN] Target VM context.
1032 * @device_addr: [IN] Virtual device address at the start of the required
1033 *               object.
1034 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1035 *                     of the mapped region within the buffer object. May be
1036 *                     %NULL if this information is not required.
1037 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1038 *                   region. May be %NULL if this information is not required.
1039 *
1040 * If successful, a reference will be taken on the buffer object. The caller
1041 * must drop the reference with pvr_gem_object_put().
1042 *
1043 * Return:
1044 *  * The PowerVR buffer object mapped at @device_addr if one exists, or
1045 *  * %NULL otherwise.
1046 */
1047struct pvr_gem_object *
1048pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1049		       u64 *mapped_offset_out, u64 *mapped_size_out)
1050{
1051	struct pvr_gem_object *pvr_obj;
1052	struct drm_gpuva *va;
1053
1054	mutex_lock(&vm_ctx->lock);
1055
1056	va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1057	if (!va)
1058		goto err_unlock;
1059
1060	pvr_obj = gem_to_pvr_gem(va->gem.obj);
1061	pvr_gem_object_get(pvr_obj);
1062
1063	if (mapped_offset_out)
1064		*mapped_offset_out = va->gem.offset;
1065	if (mapped_size_out)
1066		*mapped_size_out = va->va.range;
1067
1068	mutex_unlock(&vm_ctx->lock);
1069
1070	return pvr_obj;
1071
1072err_unlock:
1073	mutex_unlock(&vm_ctx->lock);
1074
1075	return NULL;
1076}
1077
1078/**
1079 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1080 * @vm_ctx: Target VM context.
1081 *
1082 * Returns:
1083 *  * FW object representing firmware memory context, or
1084 *  * %NULL if this VM context does not have a firmware memory context.
1085 */
1086struct pvr_fw_object *
1087pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1088{
1089	return vm_ctx->fw_mem_ctx_obj;
1090}