Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/*
   3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice shall be included in
  13 * all copies or substantial portions of the Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21 * OTHER DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#ifndef KFD_PRIV_H_INCLUDED
  25#define KFD_PRIV_H_INCLUDED
  26
  27#include <linux/hashtable.h>
  28#include <linux/mmu_notifier.h>
  29#include <linux/memremap.h>
  30#include <linux/mutex.h>
  31#include <linux/types.h>
  32#include <linux/atomic.h>
  33#include <linux/workqueue.h>
  34#include <linux/spinlock.h>
  35#include <linux/kfd_ioctl.h>
  36#include <linux/idr.h>
  37#include <linux/kfifo.h>
  38#include <linux/seq_file.h>
  39#include <linux/kref.h>
  40#include <linux/sysfs.h>
  41#include <linux/device_cgroup.h>
  42#include <drm/drm_file.h>
  43#include <drm/drm_drv.h>
  44#include <drm/drm_device.h>
  45#include <drm/drm_ioctl.h>
  46#include <kgd_kfd_interface.h>
  47#include <linux/swap.h>
  48
  49#include "amd_shared.h"
  50#include "amdgpu.h"
  51
  52#define KFD_MAX_RING_ENTRY_SIZE	8
  53
  54#define KFD_SYSFS_FILE_MODE 0444
  55
  56/* GPU ID hash width in bits */
  57#define KFD_GPU_ID_HASH_WIDTH 16
  58
  59/* Use upper bits of mmap offset to store KFD driver specific information.
  60 * BITS[63:62] - Encode MMAP type
  61 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
  62 * BITS[45:0]  - MMAP offset value
  63 *
  64 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
  65 *  defines are w.r.t to PAGE_SIZE
  66 */
  67#define KFD_MMAP_TYPE_SHIFT	62
  68#define KFD_MMAP_TYPE_MASK	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
  69#define KFD_MMAP_TYPE_DOORBELL	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
  70#define KFD_MMAP_TYPE_EVENTS	(0x2ULL << KFD_MMAP_TYPE_SHIFT)
  71#define KFD_MMAP_TYPE_RESERVED_MEM	(0x1ULL << KFD_MMAP_TYPE_SHIFT)
  72#define KFD_MMAP_TYPE_MMIO	(0x0ULL << KFD_MMAP_TYPE_SHIFT)
  73
  74#define KFD_MMAP_GPU_ID_SHIFT 46
  75#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
  76				<< KFD_MMAP_GPU_ID_SHIFT)
  77#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
  78				& KFD_MMAP_GPU_ID_MASK)
  79#define KFD_MMAP_GET_GPU_ID(offset)    ((offset & KFD_MMAP_GPU_ID_MASK) \
  80				>> KFD_MMAP_GPU_ID_SHIFT)
  81
  82/*
  83 * When working with cp scheduler we should assign the HIQ manually or via
  84 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
  85 * definitions for Kaveri. In Kaveri only the first ME queues participates
  86 * in the cp scheduling taking that in mind we set the HIQ slot in the
  87 * second ME.
  88 */
  89#define KFD_CIK_HIQ_PIPE 4
  90#define KFD_CIK_HIQ_QUEUE 0
  91
  92/* Macro for allocating structures */
  93#define kfd_alloc_struct(ptr_to_struct)	\
  94	((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
  95
  96#define KFD_MAX_NUM_OF_PROCESSES 512
  97#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
  98
  99/*
 100 * Size of the per-process TBA+TMA buffer: 2 pages
 101 *
 102 * The first chunk is the TBA used for the CWSR ISA code. The second
 103 * chunk is used as TMA for user-mode trap handler setup in daisy-chain mode.
 104 */
 105#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
 106#define KFD_CWSR_TMA_OFFSET (PAGE_SIZE + 2048)
 107
 108#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE		\
 109	(KFD_MAX_NUM_OF_PROCESSES *			\
 110			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
 111
 112#define KFD_KERNEL_QUEUE_SIZE 2048
 113
 114#define KFD_UNMAP_LATENCY_MS	(4000)
 115
 116#define KFD_MAX_SDMA_QUEUES	128
 117
 118/*
 119 * 512 = 0x200
 120 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
 121 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
 122 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
 123 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
 124 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
 125 */
 126#define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
 127
 128/**
 129 * enum kfd_ioctl_flags - KFD ioctl flags
 130 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how
 131 * userspace can use a given ioctl.
 132 */
 133enum kfd_ioctl_flags {
 134	/*
 135	 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE:
 136	 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially
 137	 * perform privileged operations and load arbitrary data into MQDs and
 138	 * eventually HQD registers when the queue is mapped by HWS. In order to
 139	 * prevent this we should perform additional security checks.
 140	 *
 141	 * This is equivalent to callers with the CHECKPOINT_RESTORE capability.
 142	 *
 143	 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE,
 144	 * we also allow ioctls with SYS_ADMIN capability.
 145	 */
 146	KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0),
 147};
 148/*
 149 * Kernel module parameter to specify maximum number of supported queues per
 150 * device
 151 */
 152extern int max_num_of_queues_per_device;
 153
 154
 155/* Kernel module parameter to specify the scheduling policy */
 156extern int sched_policy;
 157
 158/*
 159 * Kernel module parameter to specify the maximum process
 160 * number per HW scheduler
 161 */
 162extern int hws_max_conc_proc;
 163
 164extern int cwsr_enable;
 165
 166/*
 167 * Kernel module parameter to specify whether to send sigterm to HSA process on
 168 * unhandled exception
 169 */
 170extern int send_sigterm;
 171
 172/*
 173 * This kernel module is used to simulate large bar machine on non-large bar
 174 * enabled machines.
 175 */
 176extern int debug_largebar;
 177
 178/* Set sh_mem_config.retry_disable on GFX v9 */
 179extern int amdgpu_noretry;
 180
 181/* Halt if HWS hang is detected */
 182extern int halt_if_hws_hang;
 183
 184/* Whether MEC FW support GWS barriers */
 185extern bool hws_gws_support;
 186
 187/* Queue preemption timeout in ms */
 188extern int queue_preemption_timeout_ms;
 189
 190/*
 191 * Don't evict process queues on vm fault
 192 */
 193extern int amdgpu_no_queue_eviction_on_vm_fault;
 194
 195/* Enable eviction debug messages */
 196extern bool debug_evictions;
 197
 198extern struct mutex kfd_processes_mutex;
 199
 200enum cache_policy {
 201	cache_policy_coherent,
 202	cache_policy_noncoherent
 203};
 204
 205#define KFD_GC_VERSION(dev) (amdgpu_ip_version((dev)->adev, GC_HWIP, 0))
 206#define KFD_IS_SOC15(dev)   ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
 207#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
 208	((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) ||	\
 209	 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) ||	\
 210	 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)))
 211
 212struct kfd_node;
 213
 214struct kfd_event_interrupt_class {
 215	bool (*interrupt_isr)(struct kfd_node *dev,
 216			const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
 217			bool *patched_flag);
 218	void (*interrupt_wq)(struct kfd_node *dev,
 219			const uint32_t *ih_ring_entry);
 220};
 221
 222struct kfd_device_info {
 223	uint32_t gfx_target_version;
 224	const struct kfd_event_interrupt_class *event_interrupt_class;
 225	unsigned int max_pasid_bits;
 226	unsigned int max_no_of_hqd;
 227	unsigned int doorbell_size;
 228	size_t ih_ring_entry_size;
 229	uint8_t num_of_watch_points;
 230	uint16_t mqd_size_aligned;
 231	bool supports_cwsr;
 232	bool needs_pci_atomics;
 233	uint32_t no_atomic_fw_version;
 234	unsigned int num_sdma_queues_per_engine;
 235	unsigned int num_reserved_sdma_queues_per_engine;
 236	DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
 237};
 238
 239unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev);
 240unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev);
 241
 242struct kfd_mem_obj {
 243	uint32_t range_start;
 244	uint32_t range_end;
 245	uint64_t gpu_addr;
 246	uint32_t *cpu_ptr;
 247	void *gtt_mem;
 248};
 249
 250struct kfd_vmid_info {
 251	uint32_t first_vmid_kfd;
 252	uint32_t last_vmid_kfd;
 253	uint32_t vmid_num_kfd;
 254};
 255
 256#define MAX_KFD_NODES	8
 257
 258struct kfd_dev;
 259
 260struct kfd_node {
 261	unsigned int node_id;
 262	struct amdgpu_device *adev;     /* Duplicated here along with keeping
 263					 * a copy in kfd_dev to save a hop
 264					 */
 265	const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with
 266					      * keeping a copy in kfd_dev to
 267					      * save a hop
 268					      */
 269	struct kfd_vmid_info vm_info;
 270	unsigned int id;                /* topology stub index */
 271	uint32_t xcc_mask; /* Instance mask of XCCs present */
 272	struct amdgpu_xcp *xcp;
 273
 274	/* Interrupts */
 275	struct kfifo ih_fifo;
 276	struct work_struct interrupt_work;
 277	spinlock_t interrupt_lock;
 278
 279	/*
 280	 * Interrupts of interest to KFD are copied
 281	 * from the HW ring into a SW ring.
 282	 */
 283	bool interrupts_active;
 284	uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */
 285
 286	/* QCM Device instance */
 287	struct device_queue_manager *dqm;
 288
 289	/* Global GWS resource shared between processes */
 290	void *gws;
 291	bool gws_debug_workaround;
 292
 293	/* Clients watching SMI events */
 294	struct list_head smi_clients;
 295	spinlock_t smi_lock;
 296	uint32_t reset_seq_num;
 297
 298	/* SRAM ECC flag */
 299	atomic_t sram_ecc_flag;
 300
 301	/*spm process id */
 302	unsigned int spm_pasid;
 303
 304	/* Maximum process number mapped to HW scheduler */
 305	unsigned int max_proc_per_quantum;
 306
 307	unsigned int compute_vmid_bitmap;
 308
 309	struct kfd_local_mem_info local_mem_info;
 310
 311	struct kfd_dev *kfd;
 312
 313	/* Track per device allocated watch points */
 314	uint32_t alloc_watch_ids;
 315	spinlock_t watch_points_lock;
 316};
 317
 318struct kfd_dev {
 319	struct amdgpu_device *adev;
 320
 321	struct kfd_device_info device_info;
 322
 323	u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
 324					   * page used by kernel queue
 325					   */
 326
 327	struct kgd2kfd_shared_resources shared_resources;
 328
 329	const struct kfd2kgd_calls *kfd2kgd;
 330	struct mutex doorbell_mutex;
 331
 332	void *gtt_mem;
 333	uint64_t gtt_start_gpu_addr;
 334	void *gtt_start_cpu_ptr;
 335	void *gtt_sa_bitmap;
 336	struct mutex gtt_sa_lock;
 337	unsigned int gtt_sa_chunk_size;
 338	unsigned int gtt_sa_num_of_chunks;
 339
 340	bool init_complete;
 341
 342	/* Firmware versions */
 343	uint16_t mec_fw_version;
 344	uint16_t mec2_fw_version;
 345	uint16_t sdma_fw_version;
 346
 347	/* CWSR */
 348	bool cwsr_enabled;
 349	const void *cwsr_isa;
 350	unsigned int cwsr_isa_size;
 351
 352	/* xGMI */
 353	uint64_t hive_id;
 354
 355	bool pci_atomic_requested;
 356
 357	/* Compute Profile ref. count */
 358	atomic_t compute_profile;
 359
 360	struct ida doorbell_ida;
 361	unsigned int max_doorbell_slices;
 362
 363	int noretry;
 364
 365	struct kfd_node *nodes[MAX_KFD_NODES];
 366	unsigned int num_nodes;
 367
 368	struct workqueue_struct *ih_wq;
 369
 370	/* Kernel doorbells for KFD device */
 371	struct amdgpu_bo *doorbells;
 372
 373	/* bitmap for dynamic doorbell allocation from doorbell object */
 374	unsigned long *doorbell_bitmap;
 375};
 376
 377enum kfd_mempool {
 378	KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
 379	KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
 380	KFD_MEMPOOL_FRAMEBUFFER = 3,
 381};
 382
 383/* Character device interface */
 384int kfd_chardev_init(void);
 385void kfd_chardev_exit(void);
 386
 387/**
 388 * enum kfd_unmap_queues_filter - Enum for queue filters.
 389 *
 390 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
 391 *						running queues list.
 392 *
 393 * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues
 394 *						in the run list.
 395 *
 396 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
 397 *						specific process.
 398 *
 399 */
 400enum kfd_unmap_queues_filter {
 401	KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1,
 402	KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2,
 403	KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3
 404};
 405
 406/**
 407 * enum kfd_queue_type - Enum for various queue types.
 408 *
 409 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
 410 *
 411 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type.
 412 *
 413 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
 414 *
 415 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
 416 *
 417 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface.
 418 *
 419 * @KFD_QUEUE_TYPE_SDMA_BY_ENG_ID:  SDMA user mode queue with target SDMA engine ID.
 420 */
 421enum kfd_queue_type  {
 422	KFD_QUEUE_TYPE_COMPUTE,
 423	KFD_QUEUE_TYPE_SDMA,
 424	KFD_QUEUE_TYPE_HIQ,
 425	KFD_QUEUE_TYPE_DIQ,
 426	KFD_QUEUE_TYPE_SDMA_XGMI,
 427	KFD_QUEUE_TYPE_SDMA_BY_ENG_ID
 428};
 429
 430enum kfd_queue_format {
 431	KFD_QUEUE_FORMAT_PM4,
 432	KFD_QUEUE_FORMAT_AQL
 433};
 434
 435enum KFD_QUEUE_PRIORITY {
 436	KFD_QUEUE_PRIORITY_MINIMUM = 0,
 437	KFD_QUEUE_PRIORITY_MAXIMUM = 15
 438};
 439
 440/**
 441 * struct queue_properties
 442 *
 443 * @type: The queue type.
 444 *
 445 * @queue_id: Queue identifier.
 446 *
 447 * @queue_address: Queue ring buffer address.
 448 *
 449 * @queue_size: Queue ring buffer size.
 450 *
 451 * @priority: Defines the queue priority relative to other queues in the
 452 * process.
 453 * This is just an indication and HW scheduling may override the priority as
 454 * necessary while keeping the relative prioritization.
 455 * the priority granularity is from 0 to f which f is the highest priority.
 456 * currently all queues are initialized with the highest priority.
 457 *
 458 * @queue_percent: This field is partially implemented and currently a zero in
 459 * this field defines that the queue is non active.
 460 *
 461 * @read_ptr: User space address which points to the number of dwords the
 462 * cp read from the ring buffer. This field updates automatically by the H/W.
 463 *
 464 * @write_ptr: Defines the number of dwords written to the ring buffer.
 465 *
 466 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring
 467 * buffer. This field should be similar to write_ptr and the user should
 468 * update this field after updating the write_ptr.
 469 *
 470 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
 471 *
 472 * @is_interop: Defines if this is a interop queue. Interop queue means that
 473 * the queue can access both graphics and compute resources.
 474 *
 475 * @is_evicted: Defines if the queue is evicted. Only active queues
 476 * are evicted, rendering them inactive.
 477 *
 478 * @is_active: Defines if the queue is active or not. @is_active and
 479 * @is_evicted are protected by the DQM lock.
 480 *
 481 * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
 482 * @is_gws should be protected by the DQM lock, since changing it can yield the
 483 * possibility of updating DQM state on number of GWS queues.
 484 *
 485 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
 486 * of the queue.
 487 *
 488 * This structure represents the queue properties for each queue no matter if
 489 * it's user mode or kernel mode queue.
 490 *
 491 */
 492
 493struct queue_properties {
 494	enum kfd_queue_type type;
 495	enum kfd_queue_format format;
 496	unsigned int queue_id;
 497	uint64_t queue_address;
 498	uint64_t  queue_size;
 499	uint32_t priority;
 500	uint32_t queue_percent;
 501	void __user *read_ptr;
 502	void __user *write_ptr;
 503	void __iomem *doorbell_ptr;
 504	uint32_t doorbell_off;
 505	bool is_interop;
 506	bool is_evicted;
 507	bool is_suspended;
 508	bool is_being_destroyed;
 509	bool is_active;
 510	bool is_gws;
 511	uint32_t pm4_target_xcc;
 512	bool is_dbg_wa;
 513	bool is_user_cu_masked;
 514	/* Not relevant for user mode queues in cp scheduling */
 515	unsigned int vmid;
 516	/* Relevant only for sdma queues*/
 517	uint32_t sdma_engine_id;
 518	uint32_t sdma_queue_id;
 519	uint32_t sdma_vm_addr;
 520	/* Relevant only for VI */
 521	uint64_t eop_ring_buffer_address;
 522	uint32_t eop_ring_buffer_size;
 523	uint64_t ctx_save_restore_area_address;
 524	uint32_t ctx_save_restore_area_size;
 525	uint32_t ctl_stack_size;
 526	uint64_t tba_addr;
 527	uint64_t tma_addr;
 528	uint64_t exception_status;
 529
 530	struct amdgpu_bo *wptr_bo;
 531	struct amdgpu_bo *rptr_bo;
 532	struct amdgpu_bo *ring_bo;
 533	struct amdgpu_bo *eop_buf_bo;
 534	struct amdgpu_bo *cwsr_bo;
 535};
 536
 537#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 &&	\
 538			    (q).queue_address != 0 &&	\
 539			    (q).queue_percent > 0 &&	\
 540			    !(q).is_evicted &&		\
 541			    !(q).is_suspended)
 542
 543enum mqd_update_flag {
 544	UPDATE_FLAG_DBG_WA_ENABLE = 1,
 545	UPDATE_FLAG_DBG_WA_DISABLE = 2,
 546	UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
 547};
 548
 549struct mqd_update_info {
 550	union {
 551		struct {
 552			uint32_t count; /* Must be a multiple of 32 */
 553			uint32_t *ptr;
 554		} cu_mask;
 555	};
 556	enum mqd_update_flag update_flag;
 557};
 558
 559/**
 560 * struct queue
 561 *
 562 * @list: Queue linked list.
 563 *
 564 * @mqd: The queue MQD (memory queue descriptor).
 565 *
 566 * @mqd_mem_obj: The MQD local gpu memory object.
 567 *
 568 * @gart_mqd_addr: The MQD gart mc address.
 569 *
 570 * @properties: The queue properties.
 571 *
 572 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
 573 *	 that the queue should be executed on.
 574 *
 575 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
 576 *	  id.
 577 *
 578 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
 579 *
 580 * @process: The kfd process that created this queue.
 581 *
 582 * @device: The kfd device that created this queue.
 583 *
 584 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL
 585 * otherwise.
 586 *
 587 * This structure represents user mode compute queues.
 588 * It contains all the necessary data to handle such queues.
 589 *
 590 */
 591
 592struct queue {
 593	struct list_head list;
 594	void *mqd;
 595	struct kfd_mem_obj *mqd_mem_obj;
 596	uint64_t gart_mqd_addr;
 597	struct queue_properties properties;
 598
 599	uint32_t mec;
 600	uint32_t pipe;
 601	uint32_t queue;
 602
 603	unsigned int sdma_id;
 604	unsigned int doorbell_id;
 605
 606	struct kfd_process	*process;
 607	struct kfd_node		*device;
 608	void *gws;
 609
 610	/* procfs */
 611	struct kobject kobj;
 612
 613	void *gang_ctx_bo;
 614	uint64_t gang_ctx_gpu_addr;
 615	void *gang_ctx_cpu_ptr;
 616
 617	struct amdgpu_bo *wptr_bo_gart;
 618};
 619
 620enum KFD_MQD_TYPE {
 621	KFD_MQD_TYPE_HIQ = 0,		/* for hiq */
 622	KFD_MQD_TYPE_CP,		/* for cp queues and diq */
 623	KFD_MQD_TYPE_SDMA,		/* for sdma queues */
 624	KFD_MQD_TYPE_DIQ,		/* for diq */
 625	KFD_MQD_TYPE_MAX
 626};
 627
 628enum KFD_PIPE_PRIORITY {
 629	KFD_PIPE_PRIORITY_CS_LOW = 0,
 630	KFD_PIPE_PRIORITY_CS_MEDIUM,
 631	KFD_PIPE_PRIORITY_CS_HIGH
 632};
 633
 634struct scheduling_resources {
 635	unsigned int vmid_mask;
 636	enum kfd_queue_type type;
 637	uint64_t queue_mask;
 638	uint64_t gws_mask;
 639	uint32_t oac_mask;
 640	uint32_t gds_heap_base;
 641	uint32_t gds_heap_size;
 642};
 643
 644struct process_queue_manager {
 645	/* data */
 646	struct kfd_process	*process;
 647	struct list_head	queues;
 648	unsigned long		*queue_slot_bitmap;
 649};
 650
 651struct qcm_process_device {
 652	/* The Device Queue Manager that owns this data */
 653	struct device_queue_manager *dqm;
 654	struct process_queue_manager *pqm;
 655	/* Queues list */
 656	struct list_head queues_list;
 657	struct list_head priv_queue_list;
 658
 659	unsigned int queue_count;
 660	unsigned int vmid;
 661	bool is_debug;
 662	unsigned int evicted; /* eviction counter, 0=active */
 663
 664	/* This flag tells if we should reset all wavefronts on
 665	 * process termination
 666	 */
 667	bool reset_wavefronts;
 668
 669	/* This flag tells us if this process has a GWS-capable
 670	 * queue that will be mapped into the runlist. It's
 671	 * possible to request a GWS BO, but not have the queue
 672	 * currently mapped, and this changes how the MAP_PROCESS
 673	 * PM4 packet is configured.
 674	 */
 675	bool mapped_gws_queue;
 676
 677	/* All the memory management data should be here too */
 678	uint64_t gds_context_area;
 679	/* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
 680	uint64_t page_table_base;
 681	uint32_t sh_mem_config;
 682	uint32_t sh_mem_bases;
 683	uint32_t sh_mem_ape1_base;
 684	uint32_t sh_mem_ape1_limit;
 685	uint32_t gds_size;
 686	uint32_t num_gws;
 687	uint32_t num_oac;
 688	uint32_t sh_hidden_private_base;
 689
 690	/* CWSR memory */
 691	struct kgd_mem *cwsr_mem;
 692	void *cwsr_kaddr;
 693	uint64_t cwsr_base;
 694	uint64_t tba_addr;
 695	uint64_t tma_addr;
 696
 697	/* IB memory */
 698	struct kgd_mem *ib_mem;
 699	uint64_t ib_base;
 700	void *ib_kaddr;
 701
 702	/* doorbells for kfd process */
 703	struct amdgpu_bo *proc_doorbells;
 704
 705	/* bitmap for dynamic doorbell allocation from the bo */
 706	unsigned long *doorbell_bitmap;
 707};
 708
 709/* KFD Memory Eviction */
 710
 711/* Approx. wait time before attempting to restore evicted BOs */
 712#define PROCESS_RESTORE_TIME_MS 100
 713/* Approx. back off time if restore fails due to lack of memory */
 714#define PROCESS_BACK_OFF_TIME_MS 100
 715/* Approx. time before evicting the process again */
 716#define PROCESS_ACTIVE_TIME_MS 10
 717
 718/* 8 byte handle containing GPU ID in the most significant 4 bytes and
 719 * idr_handle in the least significant 4 bytes
 720 */
 721#define MAKE_HANDLE(gpu_id, idr_handle) \
 722	(((uint64_t)(gpu_id) << 32) + idr_handle)
 723#define GET_GPU_ID(handle) (handle >> 32)
 724#define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
 725
 726enum kfd_pdd_bound {
 727	PDD_UNBOUND = 0,
 728	PDD_BOUND,
 729	PDD_BOUND_SUSPENDED,
 730};
 731
 732#define MAX_SYSFS_FILENAME_LEN 15
 733
 734/*
 735 * SDMA counter runs at 100MHz frequency.
 736 * We display SDMA activity in microsecond granularity in sysfs.
 737 * As a result, the divisor is 100.
 738 */
 739#define SDMA_ACTIVITY_DIVISOR  100
 740
 741/* Data that is per-process-per device. */
 742struct kfd_process_device {
 743	/* The device that owns this data. */
 744	struct kfd_node *dev;
 745
 746	/* The process that owns this kfd_process_device. */
 747	struct kfd_process *process;
 748
 749	/* per-process-per device QCM data structure */
 750	struct qcm_process_device qpd;
 751
 752	/*Apertures*/
 753	uint64_t lds_base;
 754	uint64_t lds_limit;
 755	uint64_t gpuvm_base;
 756	uint64_t gpuvm_limit;
 757	uint64_t scratch_base;
 758	uint64_t scratch_limit;
 759
 760	/* VM context for GPUVM allocations */
 761	struct file *drm_file;
 762	void *drm_priv;
 763
 764	/* GPUVM allocations storage */
 765	struct idr alloc_idr;
 766
 767	/* Flag used to tell the pdd has dequeued from the dqm.
 768	 * This is used to prevent dev->dqm->ops.process_termination() from
 769	 * being called twice when it is already called in IOMMU callback
 770	 * function.
 771	 */
 772	bool already_dequeued;
 773	bool runtime_inuse;
 774
 775	/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
 776	enum kfd_pdd_bound bound;
 777
 778	/* VRAM usage */
 779	atomic64_t vram_usage;
 780	struct attribute attr_vram;
 781	char vram_filename[MAX_SYSFS_FILENAME_LEN];
 782
 783	/* SDMA activity tracking */
 784	uint64_t sdma_past_activity_counter;
 785	struct attribute attr_sdma;
 786	char sdma_filename[MAX_SYSFS_FILENAME_LEN];
 787
 788	/* Eviction activity tracking */
 789	uint64_t last_evict_timestamp;
 790	atomic64_t evict_duration_counter;
 791	struct attribute attr_evict;
 792
 793	struct kobject *kobj_stats;
 794
 795	/*
 796	 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
 797	 * that is associated with device encoded by "this" struct instance. The
 798	 * value reflects CU usage by all of the waves launched by this process
 799	 * on this device. A very important property of occupancy parameter is
 800	 * that its value is a snapshot of current use.
 801	 *
 802	 * Following is to be noted regarding how this parameter is reported:
 803	 *
 804	 *  The number of waves that a CU can launch is limited by couple of
 805	 *  parameters. These are encoded by struct amdgpu_cu_info instance
 806	 *  that is part of every device definition. For GFX9 devices this
 807	 *  translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
 808	 *  do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
 809	 *  when they do use scratch memory. This could change for future
 810	 *  devices and therefore this example should be considered as a guide.
 811	 *
 812	 *  All CU's of a device are available for the process. This may not be true
 813	 *  under certain conditions - e.g. CU masking.
 814	 *
 815	 *  Finally number of CU's that are occupied by a process is affected by both
 816	 *  number of CU's a device has along with number of other competing processes
 817	 */
 818	struct attribute attr_cu_occupancy;
 819
 820	/* sysfs counters for GPU retry fault and page migration tracking */
 821	struct kobject *kobj_counters;
 822	struct attribute attr_faults;
 823	struct attribute attr_page_in;
 824	struct attribute attr_page_out;
 825	uint64_t faults;
 826	uint64_t page_in;
 827	uint64_t page_out;
 828
 829	/* Exception code status*/
 830	uint64_t exception_status;
 831	void *vm_fault_exc_data;
 832	size_t vm_fault_exc_data_size;
 833
 834	/* Tracks debug per-vmid request settings */
 835	uint32_t spi_dbg_override;
 836	uint32_t spi_dbg_launch_mode;
 837	uint32_t watch_points[4];
 838	uint32_t alloc_watch_ids;
 839
 840	/*
 841	 * If this process has been checkpointed before, then the user
 842	 * application will use the original gpu_id on the
 843	 * checkpointed node to refer to this device.
 844	 */
 845	uint32_t user_gpu_id;
 846
 847	void *proc_ctx_bo;
 848	uint64_t proc_ctx_gpu_addr;
 849	void *proc_ctx_cpu_ptr;
 850
 851	/* Tracks queue reset status */
 852	bool has_reset_queue;
 853};
 854
 855#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
 856
 857struct svm_range_list {
 858	struct mutex			lock;
 859	struct rb_root_cached		objects;
 860	struct list_head		list;
 861	struct work_struct		deferred_list_work;
 862	struct list_head		deferred_range_list;
 863	struct list_head                criu_svm_metadata_list;
 864	spinlock_t			deferred_list_lock;
 865	atomic_t			evicted_ranges;
 866	atomic_t			drain_pagefaults;
 867	struct delayed_work		restore_work;
 868	DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
 869	struct task_struct		*faulting_task;
 870	/* check point ts decides if page fault recovery need be dropped */
 871	uint64_t			checkpoint_ts[MAX_GPU_INSTANCE];
 872
 873	/* Default granularity to use in buffer migration
 874	 * and restoration of backing memory while handling
 875	 * recoverable page faults
 876	 */
 877	uint8_t default_granularity;
 878};
 879
 880/* Process data */
 881struct kfd_process {
 882	/*
 883	 * kfd_process are stored in an mm_struct*->kfd_process*
 884	 * hash table (kfd_processes in kfd_process.c)
 885	 */
 886	struct hlist_node kfd_processes;
 887
 888	/*
 889	 * Opaque pointer to mm_struct. We don't hold a reference to
 890	 * it so it should never be dereferenced from here. This is
 891	 * only used for looking up processes by their mm.
 892	 */
 893	void *mm;
 894
 895	struct kref ref;
 896	struct work_struct release_work;
 897
 898	struct mutex mutex;
 899
 900	/*
 901	 * In any process, the thread that started main() is the lead
 902	 * thread and outlives the rest.
 903	 * It is here because amd_iommu_bind_pasid wants a task_struct.
 904	 * It can also be used for safely getting a reference to the
 905	 * mm_struct of the process.
 906	 */
 907	struct task_struct *lead_thread;
 908
 909	/* We want to receive a notification when the mm_struct is destroyed */
 910	struct mmu_notifier mmu_notifier;
 911
 912	u32 pasid;
 913
 914	/*
 915	 * Array of kfd_process_device pointers,
 916	 * one for each device the process is using.
 917	 */
 918	struct kfd_process_device *pdds[MAX_GPU_INSTANCE];
 919	uint32_t n_pdds;
 920
 921	struct process_queue_manager pqm;
 922
 923	/*Is the user space process 32 bit?*/
 924	bool is_32bit_user_mode;
 925
 926	/* Event-related data */
 927	struct mutex event_mutex;
 928	/* Event ID allocator and lookup */
 929	struct idr event_idr;
 930	/* Event page */
 931	u64 signal_handle;
 932	struct kfd_signal_page *signal_page;
 933	size_t signal_mapped_size;
 934	size_t signal_event_count;
 935	bool signal_event_limit_reached;
 936
 937	/* Information used for memory eviction */
 938	void *kgd_process_info;
 939	/* Eviction fence that is attached to all the BOs of this process. The
 940	 * fence will be triggered during eviction and new one will be created
 941	 * during restore
 942	 */
 943	struct dma_fence __rcu *ef;
 944
 945	/* Work items for evicting and restoring BOs */
 946	struct delayed_work eviction_work;
 947	struct delayed_work restore_work;
 948	/* seqno of the last scheduled eviction */
 949	unsigned int last_eviction_seqno;
 950	/* Approx. the last timestamp (in jiffies) when the process was
 951	 * restored after an eviction
 952	 */
 953	unsigned long last_restore_timestamp;
 954
 955	/* Indicates device process is debug attached with reserved vmid. */
 956	bool debug_trap_enabled;
 957
 958	/* per-process-per device debug event fd file */
 959	struct file *dbg_ev_file;
 960
 961	/* If the process is a kfd debugger, we need to know so we can clean
 962	 * up at exit time.  If a process enables debugging on itself, it does
 963	 * its own clean-up, so we don't set the flag here.  We track this by
 964	 * counting the number of processes this process is debugging.
 965	 */
 966	atomic_t debugged_process_count;
 967
 968	/* If the process is a debugged, this is the debugger process */
 969	struct kfd_process *debugger_process;
 970
 971	/* Kobj for our procfs */
 972	struct kobject *kobj;
 973	struct kobject *kobj_queues;
 974	struct attribute attr_pasid;
 975
 976	/* Keep track cwsr init */
 977	bool has_cwsr;
 978
 979	/* Exception code enable mask and status */
 980	uint64_t exception_enable_mask;
 981	uint64_t exception_status;
 982
 983	/* Used to drain stale interrupts */
 984	wait_queue_head_t wait_irq_drain;
 985	bool irq_drain_is_open;
 986
 987	/* shared virtual memory registered by this process */
 988	struct svm_range_list svms;
 989
 990	bool xnack_enabled;
 991
 992	/* Work area for debugger event writer worker. */
 993	struct work_struct debug_event_workarea;
 994
 995	/* Tracks debug per-vmid request for debug flags */
 996	u32 dbg_flags;
 997
 998	atomic_t poison;
 999	/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
1000	bool queues_paused;
1001
1002	/* Tracks runtime enable status */
1003	struct semaphore runtime_enable_sema;
1004	bool is_runtime_retry;
1005	struct kfd_runtime_info runtime_info;
1006};
1007
1008#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
1009extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
1010extern struct srcu_struct kfd_processes_srcu;
1011
1012/**
1013 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer.
1014 *
1015 * @filep: pointer to file structure.
1016 * @p: amdkfd process pointer.
1017 * @data: pointer to arg that was copied from user.
1018 *
1019 * Return: returns ioctl completion code.
1020 */
1021typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
1022				void *data);
1023
1024struct amdkfd_ioctl_desc {
1025	unsigned int cmd;
1026	int flags;
1027	amdkfd_ioctl_t *func;
1028	unsigned int cmd_drv;
1029	const char *name;
1030};
1031bool kfd_dev_is_large_bar(struct kfd_node *dev);
1032
1033int kfd_process_create_wq(void);
1034void kfd_process_destroy_wq(void);
1035void kfd_cleanup_processes(void);
1036struct kfd_process *kfd_create_process(struct task_struct *thread);
1037struct kfd_process *kfd_get_process(const struct task_struct *task);
1038struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
1039struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
1040
1041int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
1042int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1043				uint32_t *gpuid, uint32_t *gpuidx);
1044static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
1045				uint32_t gpuidx, uint32_t *gpuid) {
1046	return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL;
1047}
1048static inline struct kfd_process_device *kfd_process_device_from_gpuidx(
1049				struct kfd_process *p, uint32_t gpuidx) {
1050	return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL;
1051}
1052
1053void kfd_unref_process(struct kfd_process *p);
1054int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger);
1055int kfd_process_restore_queues(struct kfd_process *p);
1056void kfd_suspend_all_processes(void);
1057int kfd_resume_all_processes(void);
1058
1059struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process,
1060							 uint32_t gpu_id);
1061
1062int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id);
1063
1064int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1065			       struct file *drm_file);
1066struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1067						struct kfd_process *p);
1068struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1069							struct kfd_process *p);
1070struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1071							struct kfd_process *p);
1072
1073bool kfd_process_xnack_mode(struct kfd_process *p, bool supported);
1074
1075int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
1076			  struct vm_area_struct *vma);
1077
1078/* KFD process API for creating and translating handles */
1079int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1080					void *mem);
1081void *kfd_process_device_translate_handle(struct kfd_process_device *p,
1082					int handle);
1083void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1084					int handle);
1085struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid);
1086
1087/* PASIDs */
1088int kfd_pasid_init(void);
1089void kfd_pasid_exit(void);
1090bool kfd_set_pasid_limit(unsigned int new_limit);
1091unsigned int kfd_get_pasid_limit(void);
1092u32 kfd_pasid_alloc(void);
1093void kfd_pasid_free(u32 pasid);
1094
1095/* Doorbells */
1096size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
1097int kfd_doorbell_init(struct kfd_dev *kfd);
1098void kfd_doorbell_fini(struct kfd_dev *kfd);
1099int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
1100		      struct vm_area_struct *vma);
1101void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
1102					unsigned int *doorbell_off);
1103void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
1104u32 read_kernel_doorbell(u32 __iomem *db);
1105void write_kernel_doorbell(void __iomem *db, u32 value);
1106void write_kernel_doorbell64(void __iomem *db, u64 value);
1107unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
1108					struct kfd_process_device *pdd,
1109					unsigned int doorbell_id);
1110phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
1111int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
1112				struct kfd_process_device *pdd);
1113void kfd_free_process_doorbells(struct kfd_dev *kfd,
1114				struct kfd_process_device *pdd);
1115/* GTT Sub-Allocator */
1116
1117int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1118			struct kfd_mem_obj **mem_obj);
1119
1120int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj);
1121
1122extern struct device *kfd_device;
1123
1124/* KFD's procfs */
1125void kfd_procfs_init(void);
1126void kfd_procfs_shutdown(void);
1127int kfd_procfs_add_queue(struct queue *q);
1128void kfd_procfs_del_queue(struct queue *q);
1129
1130/* Topology */
1131int kfd_topology_init(void);
1132void kfd_topology_shutdown(void);
1133int kfd_topology_add_device(struct kfd_node *gpu);
1134int kfd_topology_remove_device(struct kfd_node *gpu);
1135struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
1136						uint32_t proximity_domain);
1137struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
1138						uint32_t proximity_domain);
1139struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
1140struct kfd_node *kfd_device_by_id(uint32_t gpu_id);
1141struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev);
1142static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
1143					uint32_t vmid)
1144{
1145	return (node->interrupt_bitmap & (1 << node_id)) != 0 &&
1146	       (node->compute_vmid_bitmap & (1 << vmid)) != 0;
1147}
1148static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
1149					uint32_t node_id, uint32_t vmid) {
1150	struct kfd_dev *dev = adev->kfd.dev;
1151	uint32_t i;
1152
1153	if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
1154	    KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4))
1155		return dev->nodes[0];
1156
1157	for (i = 0; i < dev->num_nodes; i++)
1158		if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid))
1159			return dev->nodes[i];
1160
1161	return NULL;
1162}
1163int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev);
1164int kfd_numa_node_to_apic_id(int numa_node_id);
1165
1166/* Interrupts */
1167#define	KFD_IRQ_FENCE_CLIENTID	0xff
1168#define	KFD_IRQ_FENCE_SOURCEID	0xff
1169#define	KFD_IRQ_IS_FENCE(client, source)				\
1170				((client) == KFD_IRQ_FENCE_CLIENTID &&	\
1171				(source) == KFD_IRQ_FENCE_SOURCEID)
1172int kfd_interrupt_init(struct kfd_node *dev);
1173void kfd_interrupt_exit(struct kfd_node *dev);
1174bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry);
1175bool interrupt_is_wanted(struct kfd_node *dev,
1176				const uint32_t *ih_ring_entry,
1177				uint32_t *patched_ihre, bool *flag);
1178int kfd_process_drain_interrupts(struct kfd_process_device *pdd);
1179void kfd_process_close_interrupt_drain(unsigned int pasid);
1180
1181/* amdkfd Apertures */
1182int kfd_init_apertures(struct kfd_process *process);
1183
1184void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1185				  uint64_t tba_addr,
1186				  uint64_t tma_addr);
1187void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1188				     bool enabled);
1189
1190/* CWSR initialization */
1191int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep);
1192
1193/* CRIU */
1194/*
1195 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private
1196 * structures:
1197 * kfd_criu_process_priv_data
1198 * kfd_criu_device_priv_data
1199 * kfd_criu_bo_priv_data
1200 * kfd_criu_queue_priv_data
1201 * kfd_criu_event_priv_data
1202 * kfd_criu_svm_range_priv_data
1203 */
1204
1205#define KFD_CRIU_PRIV_VERSION 1
1206
1207struct kfd_criu_process_priv_data {
1208	uint32_t version;
1209	uint32_t xnack_mode;
1210};
1211
1212struct kfd_criu_device_priv_data {
1213	/* For future use */
1214	uint64_t reserved;
1215};
1216
1217struct kfd_criu_bo_priv_data {
1218	uint64_t user_addr;
1219	uint32_t idr_handle;
1220	uint32_t mapped_gpuids[MAX_GPU_INSTANCE];
1221};
1222
1223/*
1224 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data,
1225 * kfd_criu_svm_range_priv_data is the object type
1226 */
1227enum kfd_criu_object_type {
1228	KFD_CRIU_OBJECT_TYPE_QUEUE,
1229	KFD_CRIU_OBJECT_TYPE_EVENT,
1230	KFD_CRIU_OBJECT_TYPE_SVM_RANGE,
1231};
1232
1233struct kfd_criu_svm_range_priv_data {
1234	uint32_t object_type;
1235	uint64_t start_addr;
1236	uint64_t size;
1237	/* Variable length array of attributes */
1238	struct kfd_ioctl_svm_attribute attrs[];
1239};
1240
1241struct kfd_criu_queue_priv_data {
1242	uint32_t object_type;
1243	uint64_t q_address;
1244	uint64_t q_size;
1245	uint64_t read_ptr_addr;
1246	uint64_t write_ptr_addr;
1247	uint64_t doorbell_off;
1248	uint64_t eop_ring_buffer_address;
1249	uint64_t ctx_save_restore_area_address;
1250	uint32_t gpu_id;
1251	uint32_t type;
1252	uint32_t format;
1253	uint32_t q_id;
1254	uint32_t priority;
1255	uint32_t q_percent;
1256	uint32_t doorbell_id;
1257	uint32_t gws;
1258	uint32_t sdma_id;
1259	uint32_t eop_ring_buffer_size;
1260	uint32_t ctx_save_restore_area_size;
1261	uint32_t ctl_stack_size;
1262	uint32_t mqd_size;
1263};
1264
1265struct kfd_criu_event_priv_data {
1266	uint32_t object_type;
1267	uint64_t user_handle;
1268	uint32_t event_id;
1269	uint32_t auto_reset;
1270	uint32_t type;
1271	uint32_t signaled;
1272
1273	union {
1274		struct kfd_hsa_memory_exception_data memory_exception_data;
1275		struct kfd_hsa_hw_exception_data hw_exception_data;
1276	};
1277};
1278
1279int kfd_process_get_queue_info(struct kfd_process *p,
1280			       uint32_t *num_queues,
1281			       uint64_t *priv_data_sizes);
1282
1283int kfd_criu_checkpoint_queues(struct kfd_process *p,
1284			 uint8_t __user *user_priv_data,
1285			 uint64_t *priv_data_offset);
1286
1287int kfd_criu_restore_queue(struct kfd_process *p,
1288			   uint8_t __user *user_priv_data,
1289			   uint64_t *priv_data_offset,
1290			   uint64_t max_priv_data_size);
1291
1292int kfd_criu_checkpoint_events(struct kfd_process *p,
1293			 uint8_t __user *user_priv_data,
1294			 uint64_t *priv_data_offset);
1295
1296int kfd_criu_restore_event(struct file *devkfd,
1297			   struct kfd_process *p,
1298			   uint8_t __user *user_priv_data,
1299			   uint64_t *priv_data_offset,
1300			   uint64_t max_priv_data_size);
1301/* CRIU - End */
1302
1303/* Queue Context Management */
1304int init_queue(struct queue **q, const struct queue_properties *properties);
1305void uninit_queue(struct queue *q);
1306void print_queue_properties(struct queue_properties *q);
1307void print_queue(struct queue *q);
1308int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
1309			 u64 expected_size);
1310void kfd_queue_buffer_put(struct amdgpu_bo **bo);
1311int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties);
1312int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties);
1313void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo);
1314int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd,
1315			   struct queue_properties *properties);
1316void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev);
1317
1318struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
1319		struct kfd_node *dev);
1320struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
1321		struct kfd_node *dev);
1322struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
1323		struct kfd_node *dev);
1324struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
1325		struct kfd_node *dev);
1326struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
1327		struct kfd_node *dev);
1328struct mqd_manager *mqd_manager_init_v12(enum KFD_MQD_TYPE type,
1329		struct kfd_node *dev);
1330struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev);
1331void device_queue_manager_uninit(struct device_queue_manager *dqm);
1332struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
1333					enum kfd_queue_type type);
1334void kernel_queue_uninit(struct kernel_queue *kq);
1335int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
1336int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id);
1337
1338/* Process Queue Manager */
1339struct process_queue_node {
1340	struct queue *q;
1341	struct kernel_queue *kq;
1342	struct list_head process_queue_list;
1343};
1344
1345void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
1346void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
1347int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
1348void pqm_uninit(struct process_queue_manager *pqm);
1349int pqm_create_queue(struct process_queue_manager *pqm,
1350			    struct kfd_node *dev,
1351			    struct queue_properties *properties,
1352			    unsigned int *qid,
1353			    const struct kfd_criu_queue_priv_data *q_data,
1354			    const void *restore_mqd,
1355			    const void *restore_ctl_stack,
1356			    uint32_t *p_doorbell_offset_in_process);
1357int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
1358int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
1359			struct queue_properties *p);
1360int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
1361			struct mqd_update_info *minfo);
1362int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
1363			void *gws);
1364struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
1365						unsigned int qid);
1366struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
1367						unsigned int qid);
1368int pqm_get_wave_state(struct process_queue_manager *pqm,
1369		       unsigned int qid,
1370		       void __user *ctl_stack,
1371		       u32 *ctl_stack_used_size,
1372		       u32 *save_area_used_size);
1373int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
1374			   uint64_t exception_clear_mask,
1375			   void __user *buf,
1376			   int *num_qss_entries,
1377			   uint32_t *entry_size);
1378
1379int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
1380			      uint64_t fence_value,
1381			      unsigned int timeout_ms);
1382
1383int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
1384				  unsigned int qid,
1385				  u32 *mqd_size,
1386				  u32 *ctl_stack_size);
1387/* Packet Manager */
1388
1389#define KFD_FENCE_COMPLETED (100)
1390#define KFD_FENCE_INIT   (10)
1391
1392struct packet_manager {
1393	struct device_queue_manager *dqm;
1394	struct kernel_queue *priv_queue;
1395	struct mutex lock;
1396	bool allocated;
1397	struct kfd_mem_obj *ib_buffer_obj;
1398	unsigned int ib_size_bytes;
1399	bool is_over_subscription;
1400
1401	const struct packet_manager_funcs *pmf;
1402};
1403
1404struct packet_manager_funcs {
1405	/* Support ASIC-specific packet formats for PM4 packets */
1406	int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
1407			struct qcm_process_device *qpd);
1408	int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
1409			uint64_t ib, size_t ib_size_in_dwords, bool chain);
1410	int (*set_resources)(struct packet_manager *pm, uint32_t *buffer,
1411			struct scheduling_resources *res);
1412	int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
1413			struct queue *q, bool is_static);
1414	int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
1415			enum kfd_unmap_queues_filter mode,
1416			uint32_t filter_param, bool reset);
1417	int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer,
1418			uint32_t grace_period);
1419	int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
1420			uint64_t fence_address,	uint64_t fence_value);
1421	int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
1422
1423	/* Packet sizes */
1424	int map_process_size;
1425	int runlist_size;
1426	int set_resources_size;
1427	int map_queues_size;
1428	int unmap_queues_size;
1429	int set_grace_period_size;
1430	int query_status_size;
1431	int release_mem_size;
1432};
1433
1434extern const struct packet_manager_funcs kfd_vi_pm_funcs;
1435extern const struct packet_manager_funcs kfd_v9_pm_funcs;
1436extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs;
1437
1438int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
1439void pm_uninit(struct packet_manager *pm);
1440int pm_send_set_resources(struct packet_manager *pm,
1441				struct scheduling_resources *res);
1442int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
1443int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
1444				uint64_t fence_value);
1445
1446int pm_send_unmap_queue(struct packet_manager *pm,
1447			enum kfd_unmap_queues_filter mode,
1448			uint32_t filter_param, bool reset);
1449
1450void pm_release_ib(struct packet_manager *pm);
1451
1452int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period);
1453
1454/* Following PM funcs can be shared among VI and AI */
1455unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
1456
1457uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
1458
1459/* Events */
1460extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
1461extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
1462extern const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3;
1463extern const struct kfd_event_interrupt_class event_interrupt_class_v10;
1464extern const struct kfd_event_interrupt_class event_interrupt_class_v11;
1465
1466extern const struct kfd_device_global_init_class device_global_init_class_cik;
1467
1468int kfd_event_init_process(struct kfd_process *p);
1469void kfd_event_free_process(struct kfd_process *p);
1470int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
1471int kfd_wait_on_events(struct kfd_process *p,
1472		       uint32_t num_events, void __user *data,
1473		       bool all, uint32_t *user_timeout_ms,
1474		       uint32_t *wait_result);
1475void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
1476				uint32_t valid_id_bits);
1477void kfd_signal_hw_exception_event(u32 pasid);
1478int kfd_set_event(struct kfd_process *p, uint32_t event_id);
1479int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
1480int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset);
1481
1482int kfd_event_create(struct file *devkfd, struct kfd_process *p,
1483		     uint32_t event_type, bool auto_reset, uint32_t node_id,
1484		     uint32_t *event_id, uint32_t *event_trigger_data,
1485		     uint64_t *event_page_offset, uint32_t *event_slot_index);
1486
1487int kfd_get_num_events(struct kfd_process *p);
1488int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
1489
1490void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
1491				struct kfd_vm_fault_info *info,
1492				struct kfd_hsa_memory_exception_data *data);
1493
1494void kfd_signal_reset_event(struct kfd_node *dev);
1495
1496void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
1497
1498static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
1499				 enum TLB_FLUSH_TYPE type)
1500{
1501	struct amdgpu_device *adev = pdd->dev->adev;
1502	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1503
1504	amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
1505}
1506
1507static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
1508{
1509	return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
1510	       (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
1511	       KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
1512}
1513
1514int kfd_send_exception_to_runtime(struct kfd_process *p,
1515				unsigned int queue_id,
1516				uint64_t error_reason);
1517bool kfd_is_locked(void);
1518
1519/* Compute profile */
1520void kfd_inc_compute_active(struct kfd_node *dev);
1521void kfd_dec_compute_active(struct kfd_node *dev);
1522
1523/* Cgroup Support */
1524/* Check with device cgroup if @kfd device is accessible */
1525static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
1526{
1527#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
1528	struct drm_device *ddev;
1529
1530	if (node->xcp)
1531		ddev = node->xcp->ddev;
1532	else
1533		ddev = adev_to_drm(node->adev);
1534
1535	return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
1536					  ddev->render->index,
1537					  DEVCG_ACC_WRITE | DEVCG_ACC_READ);
1538#else
1539	return 0;
1540#endif
1541}
1542
1543static inline bool kfd_is_first_node(struct kfd_node *node)
1544{
1545	return (node == node->kfd->nodes[0]);
1546}
1547
1548/* Debugfs */
1549#if defined(CONFIG_DEBUG_FS)
1550
1551void kfd_debugfs_init(void);
1552void kfd_debugfs_fini(void);
1553int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
1554int pqm_debugfs_mqds(struct seq_file *m, void *data);
1555int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
1556int dqm_debugfs_hqds(struct seq_file *m, void *data);
1557int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
1558int pm_debugfs_runlist(struct seq_file *m, void *data);
1559
1560int kfd_debugfs_hang_hws(struct kfd_node *dev);
1561int pm_debugfs_hang_hws(struct packet_manager *pm);
1562int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
1563
1564#else
1565
1566static inline void kfd_debugfs_init(void) {}
1567static inline void kfd_debugfs_fini(void) {}
1568
1569#endif
1570
1571#endif