Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
   2
   3#ifndef __DRM_GPUVM_H__
   4#define __DRM_GPUVM_H__
   5
   6/*
   7 * Copyright (c) 2022 Red Hat.
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included in
  17 * all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25 * OTHER DEALINGS IN THE SOFTWARE.
  26 */
  27
  28#include <linux/dma-resv.h>
  29#include <linux/list.h>
  30#include <linux/rbtree.h>
  31#include <linux/types.h>
  32
  33#include <drm/drm_device.h>
  34#include <drm/drm_gem.h>
  35#include <drm/drm_exec.h>
  36
  37struct drm_gpuvm;
  38struct drm_gpuvm_bo;
  39struct drm_gpuvm_ops;
  40
  41/**
  42 * enum drm_gpuva_flags - flags for struct drm_gpuva
  43 */
  44enum drm_gpuva_flags {
  45	/**
  46	 * @DRM_GPUVA_INVALIDATED:
  47	 *
  48	 * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
  49	 */
  50	DRM_GPUVA_INVALIDATED = (1 << 0),
  51
  52	/**
  53	 * @DRM_GPUVA_SPARSE:
  54	 *
  55	 * Flag indicating that the &drm_gpuva is a sparse mapping.
  56	 */
  57	DRM_GPUVA_SPARSE = (1 << 1),
  58
  59	/**
  60	 * @DRM_GPUVA_USERBITS: user defined bits
  61	 */
  62	DRM_GPUVA_USERBITS = (1 << 2),
  63};
  64
  65/**
  66 * struct drm_gpuva - structure to track a GPU VA mapping
  67 *
  68 * This structure represents a GPU VA mapping and is associated with a
  69 * &drm_gpuvm.
  70 *
  71 * Typically, this structure is embedded in bigger driver structures.
  72 */
  73struct drm_gpuva {
  74	/**
  75	 * @vm: the &drm_gpuvm this object is associated with
  76	 */
  77	struct drm_gpuvm *vm;
  78
  79	/**
  80	 * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
  81	 * &drm_gem_object
  82	 */
  83	struct drm_gpuvm_bo *vm_bo;
  84
  85	/**
  86	 * @flags: the &drm_gpuva_flags for this mapping
  87	 */
  88	enum drm_gpuva_flags flags;
  89
  90	/**
  91	 * @va: structure containing the address and range of the &drm_gpuva
  92	 */
  93	struct {
  94		/**
  95		 * @va.addr: the start address
  96		 */
  97		u64 addr;
  98
  99		/*
 100		 * @range: the range
 101		 */
 102		u64 range;
 103	} va;
 104
 105	/**
 106	 * @gem: structure containing the &drm_gem_object and it's offset
 107	 */
 108	struct {
 109		/**
 110		 * @gem.offset: the offset within the &drm_gem_object
 111		 */
 112		u64 offset;
 113
 114		/**
 115		 * @gem.obj: the mapped &drm_gem_object
 116		 */
 117		struct drm_gem_object *obj;
 118
 119		/**
 120		 * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
 121		 */
 122		struct list_head entry;
 123	} gem;
 124
 125	/**
 126	 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
 127	 */
 128	struct {
 129		/**
 130		 * @rb.node: the rb-tree node
 131		 */
 132		struct rb_node node;
 133
 134		/**
 135		 * @rb.entry: The &list_head to additionally connect &drm_gpuvas
 136		 * in the same order they appear in the interval tree. This is
 137		 * useful to keep iterating &drm_gpuvas from a start node found
 138		 * through the rb-tree while doing modifications on the rb-tree
 139		 * itself.
 140		 */
 141		struct list_head entry;
 142
 143		/**
 144		 * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
 145		 */
 146		u64 __subtree_last;
 147	} rb;
 148};
 149
 150int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
 151void drm_gpuva_remove(struct drm_gpuva *va);
 152
 153void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
 154void drm_gpuva_unlink(struct drm_gpuva *va);
 155
 156struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
 157				 u64 addr, u64 range);
 158struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
 159				       u64 addr, u64 range);
 160struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
 161struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
 162
 163static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
 164				  struct drm_gem_object *obj, u64 offset)
 165{
 166	va->va.addr = addr;
 167	va->va.range = range;
 168	va->gem.obj = obj;
 169	va->gem.offset = offset;
 170}
 171
 172/**
 173 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
 174 * invalidated
 175 * @va: the &drm_gpuva to set the invalidate flag for
 176 * @invalidate: indicates whether the &drm_gpuva is invalidated
 177 */
 178static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
 179{
 180	if (invalidate)
 181		va->flags |= DRM_GPUVA_INVALIDATED;
 182	else
 183		va->flags &= ~DRM_GPUVA_INVALIDATED;
 184}
 185
 186/**
 187 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
 188 * is invalidated
 189 * @va: the &drm_gpuva to check
 190 *
 191 * Returns: %true if the GPU VA is invalidated, %false otherwise
 192 */
 193static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
 194{
 195	return va->flags & DRM_GPUVA_INVALIDATED;
 196}
 197
 198/**
 199 * enum drm_gpuvm_flags - flags for struct drm_gpuvm
 200 */
 201enum drm_gpuvm_flags {
 202	/**
 203	 * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
 204	 * GPUVM's &dma_resv lock
 205	 */
 206	DRM_GPUVM_RESV_PROTECTED = BIT(0),
 207
 208	/**
 209	 * @DRM_GPUVM_USERBITS: user defined bits
 210	 */
 211	DRM_GPUVM_USERBITS = BIT(1),
 212};
 213
 214/**
 215 * struct drm_gpuvm - DRM GPU VA Manager
 216 *
 217 * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
 218 * &maple_tree structures. Typically, this structure is embedded in bigger
 219 * driver structures.
 220 *
 221 * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
 222 * pages.
 223 *
 224 * There should be one manager instance per GPU virtual address space.
 225 */
 226struct drm_gpuvm {
 227	/**
 228	 * @name: the name of the DRM GPU VA space
 229	 */
 230	const char *name;
 231
 232	/**
 233	 * @flags: the &drm_gpuvm_flags of this GPUVM
 234	 */
 235	enum drm_gpuvm_flags flags;
 236
 237	/**
 238	 * @drm: the &drm_device this VM lives in
 239	 */
 240	struct drm_device *drm;
 241
 242	/**
 243	 * @mm_start: start of the VA space
 244	 */
 245	u64 mm_start;
 246
 247	/**
 248	 * @mm_range: length of the VA space
 249	 */
 250	u64 mm_range;
 251
 252	/**
 253	 * @rb: structures to track &drm_gpuva entries
 254	 */
 255	struct {
 256		/**
 257		 * @rb.tree: the rb-tree to track GPU VA mappings
 258		 */
 259		struct rb_root_cached tree;
 260
 261		/**
 262		 * @rb.list: the &list_head to track GPU VA mappings
 263		 */
 264		struct list_head list;
 265	} rb;
 266
 267	/**
 268	 * @kref: reference count of this object
 269	 */
 270	struct kref kref;
 271
 272	/**
 273	 * @kernel_alloc_node:
 274	 *
 275	 * &drm_gpuva representing the address space cutout reserved for
 276	 * the kernel
 277	 */
 278	struct drm_gpuva kernel_alloc_node;
 279
 280	/**
 281	 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
 282	 */
 283	const struct drm_gpuvm_ops *ops;
 284
 285	/**
 286	 * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
 287	 */
 288	struct drm_gem_object *r_obj;
 289
 290	/**
 291	 * @extobj: structure holding the extobj list
 292	 */
 293	struct {
 294		/**
 295		 * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
 296		 * external object
 297		 */
 298		struct list_head list;
 299
 300		/**
 301		 * @extobj.local_list: pointer to the local list temporarily
 302		 * storing entries from the external object list
 303		 */
 304		struct list_head *local_list;
 305
 306		/**
 307		 * @extobj.lock: spinlock to protect the extobj list
 308		 */
 309		spinlock_t lock;
 310	} extobj;
 311
 312	/**
 313	 * @evict: structure holding the evict list and evict list lock
 314	 */
 315	struct {
 316		/**
 317		 * @evict.list: &list_head storing &drm_gpuvm_bos currently
 318		 * being evicted
 319		 */
 320		struct list_head list;
 321
 322		/**
 323		 * @evict.local_list: pointer to the local list temporarily
 324		 * storing entries from the evicted object list
 325		 */
 326		struct list_head *local_list;
 327
 328		/**
 329		 * @evict.lock: spinlock to protect the evict list
 330		 */
 331		spinlock_t lock;
 332	} evict;
 333};
 334
 335void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
 336		    enum drm_gpuvm_flags flags,
 337		    struct drm_device *drm,
 338		    struct drm_gem_object *r_obj,
 339		    u64 start_offset, u64 range,
 340		    u64 reserve_offset, u64 reserve_range,
 341		    const struct drm_gpuvm_ops *ops);
 342
 343/**
 344 * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
 345 * @gpuvm: the &drm_gpuvm to acquire the reference of
 346 *
 347 * This function acquires an additional reference to @gpuvm. It is illegal to
 348 * call this without already holding a reference. No locks required.
 349 *
 350 * Returns: the &struct drm_gpuvm pointer
 351 */
 352static inline struct drm_gpuvm *
 353drm_gpuvm_get(struct drm_gpuvm *gpuvm)
 354{
 355	kref_get(&gpuvm->kref);
 356
 357	return gpuvm;
 358}
 359
 360void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
 361
 362bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
 363bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
 364
 365struct drm_gem_object *
 366drm_gpuvm_resv_object_alloc(struct drm_device *drm);
 367
 368/**
 369 * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
 370 * set
 371 * @gpuvm: the &drm_gpuvm
 372 *
 373 * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
 374 */
 375static inline bool
 376drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
 377{
 378	return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
 379}
 380
 381/**
 382 * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
 383 * @gpuvm__: the &drm_gpuvm
 384 *
 385 * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
 386 */
 387#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
 388
 389/**
 390 * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
 391 * &dma_resv
 392 * @gpuvm__: the &drm_gpuvm
 393 *
 394 * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
 395 * &dma_resv
 396 */
 397#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
 398
 399#define drm_gpuvm_resv_held(gpuvm__) \
 400	dma_resv_held(drm_gpuvm_resv(gpuvm__))
 401
 402#define drm_gpuvm_resv_assert_held(gpuvm__) \
 403	dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
 404
 405#define drm_gpuvm_resv_held(gpuvm__) \
 406	dma_resv_held(drm_gpuvm_resv(gpuvm__))
 407
 408#define drm_gpuvm_resv_assert_held(gpuvm__) \
 409	dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
 410
 411/**
 412 * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
 413 * external object
 414 * @gpuvm: the &drm_gpuvm to check
 415 * @obj: the &drm_gem_object to check
 416 *
 417 * Returns: true if the &drm_gem_object &dma_resv differs from the
 418 * &drm_gpuvms &dma_resv, false otherwise
 419 */
 420static inline bool
 421drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
 422		    struct drm_gem_object *obj)
 423{
 424	return obj && obj->resv != drm_gpuvm_resv(gpuvm);
 425}
 426
 427static inline struct drm_gpuva *
 428__drm_gpuva_next(struct drm_gpuva *va)
 429{
 430	if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
 431		return list_next_entry(va, rb.entry);
 432
 433	return NULL;
 434}
 435
 436/**
 437 * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
 438 * @va__: &drm_gpuva structure to assign to in each iteration step
 439 * @gpuvm__: &drm_gpuvm to walk over
 440 * @start__: starting offset, the first gpuva will overlap this
 441 * @end__: ending offset, the last gpuva will start before this (but may
 442 * overlap)
 443 *
 444 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
 445 * between @start__ and @end__. It is implemented similarly to list_for_each(),
 446 * but is using the &drm_gpuvm's internal interval tree to accelerate
 447 * the search for the starting &drm_gpuva, and hence isn't safe against removal
 448 * of elements. It assumes that @end__ is within (or is the upper limit of) the
 449 * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
 450 * @kernel_alloc_node.
 451 */
 452#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
 453	for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
 454	     va__ && (va__->va.addr < (end__)); \
 455	     va__ = __drm_gpuva_next(va__))
 456
 457/**
 458 * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
 459 * &drm_gpuvas
 460 * @va__: &drm_gpuva to assign to in each iteration step
 461 * @next__: another &drm_gpuva to use as temporary storage
 462 * @gpuvm__: &drm_gpuvm to walk over
 463 * @start__: starting offset, the first gpuva will overlap this
 464 * @end__: ending offset, the last gpuva will start before this (but may
 465 * overlap)
 466 *
 467 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
 468 * between @start__ and @end__. It is implemented similarly to
 469 * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
 470 * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
 471 * against removal of elements. It assumes that @end__ is within (or is the
 472 * upper limit of) the &drm_gpuvm. This iterator does not skip over the
 473 * &drm_gpuvm's @kernel_alloc_node.
 474 */
 475#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
 476	for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
 477	     next__ = __drm_gpuva_next(va__); \
 478	     va__ && (va__->va.addr < (end__)); \
 479	     va__ = next__, next__ = __drm_gpuva_next(va__))
 480
 481/**
 482 * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
 483 * @va__: &drm_gpuva to assign to in each iteration step
 484 * @gpuvm__: &drm_gpuvm to walk over
 485 *
 486 * This iterator walks over all &drm_gpuva structures associated with the given
 487 * &drm_gpuvm.
 488 */
 489#define drm_gpuvm_for_each_va(va__, gpuvm__) \
 490	list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
 491
 492/**
 493 * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
 494 * @va__: &drm_gpuva to assign to in each iteration step
 495 * @next__: another &drm_gpuva to use as temporary storage
 496 * @gpuvm__: &drm_gpuvm to walk over
 497 *
 498 * This iterator walks over all &drm_gpuva structures associated with the given
 499 * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
 500 * hence safe against the removal of elements.
 501 */
 502#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
 503	list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
 504
 505/**
 506 * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
 507 *
 508 * This structure should be created on the stack as &drm_exec should be.
 509 *
 510 * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
 511 */
 512struct drm_gpuvm_exec {
 513	/**
 514	 * @exec: the &drm_exec structure
 515	 */
 516	struct drm_exec exec;
 517
 518	/**
 519	 * @flags: the flags for the struct drm_exec
 520	 */
 521	uint32_t flags;
 522
 523	/**
 524	 * @vm: the &drm_gpuvm to lock its DMA reservations
 525	 */
 526	struct drm_gpuvm *vm;
 527
 528	/**
 529	 * @num_fences: the number of fences to reserve for the &dma_resv of the
 530	 * locked &drm_gem_objects
 531	 */
 532	unsigned int num_fences;
 533
 534	/**
 535	 * @extra: Callback and corresponding private data for the driver to
 536	 * lock arbitrary additional &drm_gem_objects.
 537	 */
 538	struct {
 539		/**
 540		 * @extra.fn: The driver callback to lock additional
 541		 * &drm_gem_objects.
 542		 */
 543		int (*fn)(struct drm_gpuvm_exec *vm_exec);
 544
 545		/**
 546		 * @extra.priv: driver private data for the @fn callback
 547		 */
 548		void *priv;
 549	} extra;
 550};
 551
 552int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
 553			 struct drm_exec *exec,
 554			 unsigned int num_fences);
 555
 556int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
 557			      struct drm_exec *exec,
 558			      unsigned int num_fences);
 559
 560int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
 561			    struct drm_exec *exec,
 562			    u64 addr, u64 range,
 563			    unsigned int num_fences);
 564
 565int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
 566
 567int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
 568			      struct drm_gem_object **objs,
 569			      unsigned int num_objs);
 570
 571int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
 572			      u64 addr, u64 range);
 573
 574/**
 575 * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
 576 * @vm_exec: the &drm_gpuvm_exec wrapper
 577 *
 578 * Releases all dma-resv locks of all &drm_gem_objects previously acquired
 579 * through drm_gpuvm_exec_lock() or its variants.
 580 *
 581 * Returns: 0 on success, negative error code on failure.
 582 */
 583static inline void
 584drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
 585{
 586	drm_exec_fini(&vm_exec->exec);
 587}
 588
 589int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
 590void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
 591			      struct drm_exec *exec,
 592			      struct dma_fence *fence,
 593			      enum dma_resv_usage private_usage,
 594			      enum dma_resv_usage extobj_usage);
 595
 596/**
 597 * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
 598 * @vm_exec: the &drm_gpuvm_exec wrapper
 599 * @fence: fence to add
 600 * @private_usage: private dma-resv usage
 601 * @extobj_usage: extobj dma-resv usage
 602 *
 603 * See drm_gpuvm_resv_add_fence().
 604 */
 605static inline void
 606drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
 607			      struct dma_fence *fence,
 608			      enum dma_resv_usage private_usage,
 609			      enum dma_resv_usage extobj_usage)
 610{
 611	drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
 612				 private_usage, extobj_usage);
 613}
 614
 615/**
 616 * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
 617 * @vm_exec: the &drm_gpuvm_exec wrapper
 618 *
 619 * See drm_gpuvm_validate().
 620 *
 621 * Returns: 0 on success, negative error code on failure.
 622 */
 623static inline int
 624drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
 625{
 626	return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
 627}
 628
 629/**
 630 * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
 631 * &drm_gem_object combination
 632 *
 633 * This structure is an abstraction representing a &drm_gpuvm and
 634 * &drm_gem_object combination. It serves as an indirection to accelerate
 635 * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
 636 * &drm_gem_object.
 637 *
 638 * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
 639 * accelerate validation.
 640 *
 641 * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
 642 * a GEM object is mapped first in a GPU-VM and release the instance once the
 643 * last mapping of the GEM object in this GPU-VM is unmapped.
 644 */
 645struct drm_gpuvm_bo {
 646	/**
 647	 * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
 648	 * counted pointer.
 649	 */
 650	struct drm_gpuvm *vm;
 651
 652	/**
 653	 * @obj: The &drm_gem_object being mapped in @vm. This is a reference
 654	 * counted pointer.
 655	 */
 656	struct drm_gem_object *obj;
 657
 658	/**
 659	 * @evicted: Indicates whether the &drm_gem_object is evicted; field
 660	 * protected by the &drm_gem_object's dma-resv lock.
 661	 */
 662	bool evicted;
 663
 664	/**
 665	 * @kref: The reference count for this &drm_gpuvm_bo.
 666	 */
 667	struct kref kref;
 668
 669	/**
 670	 * @list: Structure containing all &list_heads.
 671	 */
 672	struct {
 673		/**
 674		 * @list.gpuva: The list of linked &drm_gpuvas.
 675		 *
 676		 * It is safe to access entries from this list as long as the
 677		 * GEM's gpuva lock is held. See also struct drm_gem_object.
 678		 */
 679		struct list_head gpuva;
 680
 681		/**
 682		 * @list.entry: Structure containing all &list_heads serving as
 683		 * entry.
 684		 */
 685		struct {
 686			/**
 687			 * @list.entry.gem: List entry to attach to the
 688			 * &drm_gem_objects gpuva list.
 689			 */
 690			struct list_head gem;
 691
 692			/**
 693			 * @list.entry.evict: List entry to attach to the
 694			 * &drm_gpuvms extobj list.
 695			 */
 696			struct list_head extobj;
 697
 698			/**
 699			 * @list.entry.evict: List entry to attach to the
 700			 * &drm_gpuvms evict list.
 701			 */
 702			struct list_head evict;
 703		} entry;
 704	} list;
 705};
 706
 707struct drm_gpuvm_bo *
 708drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
 709		    struct drm_gem_object *obj);
 710
 711struct drm_gpuvm_bo *
 712drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
 713		    struct drm_gem_object *obj);
 714struct drm_gpuvm_bo *
 715drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
 716
 717/**
 718 * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
 719 * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
 720 *
 721 * This function acquires an additional reference to @vm_bo. It is illegal to
 722 * call this without already holding a reference. No locks required.
 723 *
 724 * Returns: the &struct vm_bo pointer
 725 */
 726static inline struct drm_gpuvm_bo *
 727drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
 728{
 729	kref_get(&vm_bo->kref);
 730	return vm_bo;
 731}
 732
 733bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
 734
 735struct drm_gpuvm_bo *
 736drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
 737		  struct drm_gem_object *obj);
 738
 739void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
 740
 741/**
 742 * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
 743 * to/from the &drm_gpuvms evicted list
 744 * @obj: the &drm_gem_object
 745 * @evict: indicates whether @obj is evicted
 746 *
 747 * See drm_gpuvm_bo_evict().
 748 */
 749static inline void
 750drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
 751{
 752	struct drm_gpuvm_bo *vm_bo;
 753
 754	drm_gem_gpuva_assert_lock_held(obj);
 755	drm_gem_for_each_gpuvm_bo(vm_bo, obj)
 756		drm_gpuvm_bo_evict(vm_bo, evict);
 757}
 758
 759void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
 760
 761/**
 762 * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
 763 * @va__: &drm_gpuva structure to assign to in each iteration step
 764 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
 765 *
 766 * This iterator walks over all &drm_gpuva structures associated with the
 767 * &drm_gpuvm_bo.
 768 *
 769 * The caller must hold the GEM's gpuva lock.
 770 */
 771#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
 772	list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
 773
 774/**
 775 * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
 776 * &drm_gpuva
 777 * @va__: &drm_gpuva structure to assign to in each iteration step
 778 * @next__: &next &drm_gpuva to store the next step
 779 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
 780 *
 781 * This iterator walks over all &drm_gpuva structures associated with the
 782 * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
 783 * it is save against removal of elements.
 784 *
 785 * The caller must hold the GEM's gpuva lock.
 786 */
 787#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
 788	list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
 789
 790/**
 791 * enum drm_gpuva_op_type - GPU VA operation type
 792 *
 793 * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
 794 */
 795enum drm_gpuva_op_type {
 796	/**
 797	 * @DRM_GPUVA_OP_MAP: the map op type
 798	 */
 799	DRM_GPUVA_OP_MAP,
 800
 801	/**
 802	 * @DRM_GPUVA_OP_REMAP: the remap op type
 803	 */
 804	DRM_GPUVA_OP_REMAP,
 805
 806	/**
 807	 * @DRM_GPUVA_OP_UNMAP: the unmap op type
 808	 */
 809	DRM_GPUVA_OP_UNMAP,
 810
 811	/**
 812	 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
 813	 */
 814	DRM_GPUVA_OP_PREFETCH,
 815};
 816
 817/**
 818 * struct drm_gpuva_op_map - GPU VA map operation
 819 *
 820 * This structure represents a single map operation generated by the
 821 * DRM GPU VA manager.
 822 */
 823struct drm_gpuva_op_map {
 824	/**
 825	 * @va: structure containing address and range of a map
 826	 * operation
 827	 */
 828	struct {
 829		/**
 830		 * @va.addr: the base address of the new mapping
 831		 */
 832		u64 addr;
 833
 834		/**
 835		 * @va.range: the range of the new mapping
 836		 */
 837		u64 range;
 838	} va;
 839
 840	/**
 841	 * @gem: structure containing the &drm_gem_object and it's offset
 842	 */
 843	struct {
 844		/**
 845		 * @gem.offset: the offset within the &drm_gem_object
 846		 */
 847		u64 offset;
 848
 849		/**
 850		 * @gem.obj: the &drm_gem_object to map
 851		 */
 852		struct drm_gem_object *obj;
 853	} gem;
 854};
 855
 856/**
 857 * struct drm_gpuva_op_unmap - GPU VA unmap operation
 858 *
 859 * This structure represents a single unmap operation generated by the
 860 * DRM GPU VA manager.
 861 */
 862struct drm_gpuva_op_unmap {
 863	/**
 864	 * @va: the &drm_gpuva to unmap
 865	 */
 866	struct drm_gpuva *va;
 867
 868	/**
 869	 * @keep:
 870	 *
 871	 * Indicates whether this &drm_gpuva is physically contiguous with the
 872	 * original mapping request.
 873	 *
 874	 * Optionally, if &keep is set, drivers may keep the actual page table
 875	 * mappings for this &drm_gpuva, adding the missing page table entries
 876	 * only and update the &drm_gpuvm accordingly.
 877	 */
 878	bool keep;
 879};
 880
 881/**
 882 * struct drm_gpuva_op_remap - GPU VA remap operation
 883 *
 884 * This represents a single remap operation generated by the DRM GPU VA manager.
 885 *
 886 * A remap operation is generated when an existing GPU VA mmapping is split up
 887 * by inserting a new GPU VA mapping or by partially unmapping existent
 888 * mapping(s), hence it consists of a maximum of two map and one unmap
 889 * operation.
 890 *
 891 * The @unmap operation takes care of removing the original existing mapping.
 892 * @prev is used to remap the preceding part, @next the subsequent part.
 893 *
 894 * If either a new mapping's start address is aligned with the start address
 895 * of the old mapping or the new mapping's end address is aligned with the
 896 * end address of the old mapping, either @prev or @next is NULL.
 897 *
 898 * Note, the reason for a dedicated remap operation, rather than arbitrary
 899 * unmap and map operations, is to give drivers the chance of extracting driver
 900 * specific data for creating the new mappings from the unmap operations's
 901 * &drm_gpuva structure which typically is embedded in larger driver specific
 902 * structures.
 903 */
 904struct drm_gpuva_op_remap {
 905	/**
 906	 * @prev: the preceding part of a split mapping
 907	 */
 908	struct drm_gpuva_op_map *prev;
 909
 910	/**
 911	 * @next: the subsequent part of a split mapping
 912	 */
 913	struct drm_gpuva_op_map *next;
 914
 915	/**
 916	 * @unmap: the unmap operation for the original existing mapping
 917	 */
 918	struct drm_gpuva_op_unmap *unmap;
 919};
 920
 921/**
 922 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
 923 *
 924 * This structure represents a single prefetch operation generated by the
 925 * DRM GPU VA manager.
 926 */
 927struct drm_gpuva_op_prefetch {
 928	/**
 929	 * @va: the &drm_gpuva to prefetch
 930	 */
 931	struct drm_gpuva *va;
 932};
 933
 934/**
 935 * struct drm_gpuva_op - GPU VA operation
 936 *
 937 * This structure represents a single generic operation.
 938 *
 939 * The particular type of the operation is defined by @op.
 940 */
 941struct drm_gpuva_op {
 942	/**
 943	 * @entry:
 944	 *
 945	 * The &list_head used to distribute instances of this struct within
 946	 * &drm_gpuva_ops.
 947	 */
 948	struct list_head entry;
 949
 950	/**
 951	 * @op: the type of the operation
 952	 */
 953	enum drm_gpuva_op_type op;
 954
 955	union {
 956		/**
 957		 * @map: the map operation
 958		 */
 959		struct drm_gpuva_op_map map;
 960
 961		/**
 962		 * @remap: the remap operation
 963		 */
 964		struct drm_gpuva_op_remap remap;
 965
 966		/**
 967		 * @unmap: the unmap operation
 968		 */
 969		struct drm_gpuva_op_unmap unmap;
 970
 971		/**
 972		 * @prefetch: the prefetch operation
 973		 */
 974		struct drm_gpuva_op_prefetch prefetch;
 975	};
 976};
 977
 978/**
 979 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
 980 */
 981struct drm_gpuva_ops {
 982	/**
 983	 * @list: the &list_head
 984	 */
 985	struct list_head list;
 986};
 987
 988/**
 989 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
 990 * @op: &drm_gpuva_op to assign in each iteration step
 991 * @ops: &drm_gpuva_ops to walk
 992 *
 993 * This iterator walks over all ops within a given list of operations.
 994 */
 995#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
 996
 997/**
 998 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
 999 * @op: &drm_gpuva_op to assign in each iteration step
1000 * @next: &next &drm_gpuva_op to store the next step
1001 * @ops: &drm_gpuva_ops to walk
1002 *
1003 * This iterator walks over all ops within a given list of operations. It is
1004 * implemented with list_for_each_safe(), so save against removal of elements.
1005 */
1006#define drm_gpuva_for_each_op_safe(op, next, ops) \
1007	list_for_each_entry_safe(op, next, &(ops)->list, entry)
1008
1009/**
1010 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
1011 * @op: &drm_gpuva_op to assign in each iteration step
1012 * @ops: &drm_gpuva_ops to walk
1013 *
1014 * This iterator walks over all ops within a given list of operations beginning
1015 * from the given operation in reverse order.
1016 */
1017#define drm_gpuva_for_each_op_from_reverse(op, ops) \
1018	list_for_each_entry_from_reverse(op, &(ops)->list, entry)
1019
1020/**
1021 * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
1022 * @op: &drm_gpuva_op to assign in each iteration step
1023 * @ops: &drm_gpuva_ops to walk
1024 *
1025 * This iterator walks over all ops within a given list of operations in reverse
1026 */
1027#define drm_gpuva_for_each_op_reverse(op, ops) \
1028	list_for_each_entry_reverse(op, &(ops)->list, entry)
1029
1030/**
1031 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
1032 * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
1033 */
1034#define drm_gpuva_first_op(ops) \
1035	list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
1036
1037/**
1038 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
1039 * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
1040 */
1041#define drm_gpuva_last_op(ops) \
1042	list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
1043
1044/**
1045 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
1046 * @op: the current &drm_gpuva_op
1047 */
1048#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
1049
1050/**
1051 * drm_gpuva_next_op() - next &drm_gpuva_op in the list
1052 * @op: the current &drm_gpuva_op
1053 */
1054#define drm_gpuva_next_op(op) list_next_entry(op, entry)
1055
1056struct drm_gpuva_ops *
1057drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
1058			    u64 addr, u64 range,
1059			    struct drm_gem_object *obj, u64 offset);
1060struct drm_gpuva_ops *
1061drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
1062			      u64 addr, u64 range);
1063
1064struct drm_gpuva_ops *
1065drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
1066				 u64 addr, u64 range);
1067
1068struct drm_gpuva_ops *
1069drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
1070
1071void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
1072			struct drm_gpuva_ops *ops);
1073
1074static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
1075					  struct drm_gpuva_op_map *op)
1076{
1077	drm_gpuva_init(va, op->va.addr, op->va.range,
1078		       op->gem.obj, op->gem.offset);
1079}
1080
1081/**
1082 * struct drm_gpuvm_ops - callbacks for split/merge steps
1083 *
1084 * This structure defines the callbacks used by &drm_gpuvm_sm_map and
1085 * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
1086 * operations to drivers.
1087 */
1088struct drm_gpuvm_ops {
1089	/**
1090	 * @vm_free: called when the last reference of a struct drm_gpuvm is
1091	 * dropped
1092	 *
1093	 * This callback is mandatory.
1094	 */
1095	void (*vm_free)(struct drm_gpuvm *gpuvm);
1096
1097	/**
1098	 * @op_alloc: called when the &drm_gpuvm allocates
1099	 * a struct drm_gpuva_op
1100	 *
1101	 * Some drivers may want to embed struct drm_gpuva_op into driver
1102	 * specific structures. By implementing this callback drivers can
1103	 * allocate memory accordingly.
1104	 *
1105	 * This callback is optional.
1106	 */
1107	struct drm_gpuva_op *(*op_alloc)(void);
1108
1109	/**
1110	 * @op_free: called when the &drm_gpuvm frees a
1111	 * struct drm_gpuva_op
1112	 *
1113	 * Some drivers may want to embed struct drm_gpuva_op into driver
1114	 * specific structures. By implementing this callback drivers can
1115	 * free the previously allocated memory accordingly.
1116	 *
1117	 * This callback is optional.
1118	 */
1119	void (*op_free)(struct drm_gpuva_op *op);
1120
1121	/**
1122	 * @vm_bo_alloc: called when the &drm_gpuvm allocates
1123	 * a struct drm_gpuvm_bo
1124	 *
1125	 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1126	 * specific structures. By implementing this callback drivers can
1127	 * allocate memory accordingly.
1128	 *
1129	 * This callback is optional.
1130	 */
1131	struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
1132
1133	/**
1134	 * @vm_bo_free: called when the &drm_gpuvm frees a
1135	 * struct drm_gpuvm_bo
1136	 *
1137	 * Some drivers may want to embed struct drm_gpuvm_bo into driver
1138	 * specific structures. By implementing this callback drivers can
1139	 * free the previously allocated memory accordingly.
1140	 *
1141	 * This callback is optional.
1142	 */
1143	void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
1144
1145	/**
1146	 * @vm_bo_validate: called from drm_gpuvm_validate()
1147	 *
1148	 * Drivers receive this callback for every evicted &drm_gem_object being
1149	 * mapped in the corresponding &drm_gpuvm.
1150	 *
1151	 * Typically, drivers would call their driver specific variant of
1152	 * ttm_bo_validate() from within this callback.
1153	 */
1154	int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
1155			      struct drm_exec *exec);
1156
1157	/**
1158	 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
1159	 * mapping once all previous steps were completed
1160	 *
1161	 * The &priv pointer matches the one the driver passed to
1162	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1163	 *
1164	 * Can be NULL if &drm_gpuvm_sm_map is used.
1165	 */
1166	int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
1167
1168	/**
1169	 * @sm_step_remap: called from &drm_gpuvm_sm_map and
1170	 * &drm_gpuvm_sm_unmap to split up an existent mapping
1171	 *
1172	 * This callback is called when existent mapping needs to be split up.
1173	 * This is the case when either a newly requested mapping overlaps or
1174	 * is enclosed by an existent mapping or a partial unmap of an existent
1175	 * mapping is requested.
1176	 *
1177	 * The &priv pointer matches the one the driver passed to
1178	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1179	 *
1180	 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1181	 * used.
1182	 */
1183	int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
1184
1185	/**
1186	 * @sm_step_unmap: called from &drm_gpuvm_sm_map and
1187	 * &drm_gpuvm_sm_unmap to unmap an existent mapping
1188	 *
1189	 * This callback is called when existent mapping needs to be unmapped.
1190	 * This is the case when either a newly requested mapping encloses an
1191	 * existent mapping or an unmap of an existent mapping is requested.
1192	 *
1193	 * The &priv pointer matches the one the driver passed to
1194	 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
1195	 *
1196	 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
1197	 * used.
1198	 */
1199	int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
1200};
1201
1202int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
1203		     u64 addr, u64 range,
1204		     struct drm_gem_object *obj, u64 offset);
1205
1206int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
1207		       u64 addr, u64 range);
1208
1209void drm_gpuva_map(struct drm_gpuvm *gpuvm,
1210		   struct drm_gpuva *va,
1211		   struct drm_gpuva_op_map *op);
1212
1213void drm_gpuva_remap(struct drm_gpuva *prev,
1214		     struct drm_gpuva *next,
1215		     struct drm_gpuva_op_remap *op);
1216
1217void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
1218
1219/**
1220 * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
1221 * the unmap stage of a remap op.
1222 * @op: Remap op.
1223 * @start_addr: Output pointer for the start of the required unmap.
1224 * @range: Output pointer for the length of the required unmap.
1225 *
1226 * The given start address and range will be set such that they represent the
1227 * range of the address space that was previously covered by the mapping being
1228 * re-mapped, but is now empty.
1229 */
1230static inline void
1231drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
1232				  u64 *start_addr, u64 *range)
1233{
1234	const u64 va_start = op->prev ?
1235			     op->prev->va.addr + op->prev->va.range :
1236			     op->unmap->va->va.addr;
1237	const u64 va_end = op->next ?
1238			   op->next->va.addr :
1239			   op->unmap->va->va.addr + op->unmap->va->va.range;
1240
1241	if (start_addr)
1242		*start_addr = va_start;
1243	if (range)
1244		*range = va_end - va_start;
1245}
1246
1247#endif /* __DRM_GPUVM_H__ */