Linux Audio

Check our new training course

Loading...
v5.9
 
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include <linux/device.h>
  24#include <linux/export.h>
  25#include <linux/err.h>
  26#include <linux/fs.h>
  27#include <linux/file.h>
  28#include <linux/sched.h>
  29#include <linux/slab.h>
  30#include <linux/uaccess.h>
  31#include <linux/compat.h>
  32#include <uapi/linux/kfd_ioctl.h>
  33#include <linux/time.h>
  34#include <linux/mm.h>
  35#include <linux/mman.h>
 
  36#include <linux/dma-buf.h>
  37#include <asm/processor.h>
  38#include "kfd_priv.h"
  39#include "kfd_device_queue_manager.h"
  40#include "kfd_dbgmgr.h"
  41#include "amdgpu_amdkfd.h"
  42#include "kfd_smi_events.h"
 
 
  43
  44static long kfd_ioctl(struct file *, unsigned int, unsigned long);
  45static int kfd_open(struct inode *, struct file *);
  46static int kfd_release(struct inode *, struct file *);
  47static int kfd_mmap(struct file *, struct vm_area_struct *);
  48
  49static const char kfd_dev_name[] = "kfd";
  50
  51static const struct file_operations kfd_fops = {
  52	.owner = THIS_MODULE,
  53	.unlocked_ioctl = kfd_ioctl,
  54	.compat_ioctl = compat_ptr_ioctl,
  55	.open = kfd_open,
  56	.release = kfd_release,
  57	.mmap = kfd_mmap,
  58};
  59
  60static int kfd_char_dev_major = -1;
  61static struct class *kfd_class;
  62struct device *kfd_device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63
  64int kfd_chardev_init(void)
  65{
  66	int err = 0;
  67
  68	kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
  69	err = kfd_char_dev_major;
  70	if (err < 0)
  71		goto err_register_chrdev;
  72
  73	kfd_class = class_create(THIS_MODULE, kfd_dev_name);
  74	err = PTR_ERR(kfd_class);
  75	if (IS_ERR(kfd_class))
  76		goto err_class_create;
  77
  78	kfd_device = device_create(kfd_class, NULL,
  79					MKDEV(kfd_char_dev_major, 0),
  80					NULL, kfd_dev_name);
  81	err = PTR_ERR(kfd_device);
  82	if (IS_ERR(kfd_device))
  83		goto err_device_create;
  84
  85	return 0;
  86
  87err_device_create:
  88	class_destroy(kfd_class);
  89err_class_create:
  90	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
  91err_register_chrdev:
  92	return err;
  93}
  94
  95void kfd_chardev_exit(void)
  96{
  97	device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
  98	class_destroy(kfd_class);
  99	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
 100}
 101
 102struct device *kfd_chardev(void)
 103{
 104	return kfd_device;
 105}
 106
 107
 108static int kfd_open(struct inode *inode, struct file *filep)
 109{
 110	struct kfd_process *process;
 111	bool is_32bit_user_mode;
 112
 113	if (iminor(inode) != 0)
 114		return -ENODEV;
 115
 116	is_32bit_user_mode = in_compat_syscall();
 117
 118	if (is_32bit_user_mode) {
 119		dev_warn(kfd_device,
 120			"Process %d (32-bit) failed to open /dev/kfd\n"
 121			"32-bit processes are not supported by amdkfd\n",
 122			current->pid);
 123		return -EPERM;
 124	}
 125
 126	process = kfd_create_process(filep);
 127	if (IS_ERR(process))
 128		return PTR_ERR(process);
 129
 130	if (kfd_is_locked()) {
 131		dev_dbg(kfd_device, "kfd is locked!\n"
 132				"process %d unreferenced", process->pasid);
 133		kfd_unref_process(process);
 134		return -EAGAIN;
 135	}
 136
 137	/* filep now owns the reference returned by kfd_create_process */
 138	filep->private_data = process;
 139
 140	dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
 141		process->pasid, process->is_32bit_user_mode);
 142
 143	return 0;
 144}
 145
 146static int kfd_release(struct inode *inode, struct file *filep)
 147{
 148	struct kfd_process *process = filep->private_data;
 149
 150	if (process)
 151		kfd_unref_process(process);
 152
 153	return 0;
 154}
 155
 156static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
 157					void *data)
 158{
 159	struct kfd_ioctl_get_version_args *args = data;
 160
 161	args->major_version = KFD_IOCTL_MAJOR_VERSION;
 162	args->minor_version = KFD_IOCTL_MINOR_VERSION;
 163
 164	return 0;
 165}
 166
 167static int set_queue_properties_from_user(struct queue_properties *q_properties,
 168				struct kfd_ioctl_create_queue_args *args)
 169{
 170	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
 
 
 
 
 
 171		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
 172		return -EINVAL;
 173	}
 174
 175	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
 176		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
 177		return -EINVAL;
 178	}
 179
 180	if ((args->ring_base_address) &&
 181		(!access_ok((const void __user *) args->ring_base_address,
 182			sizeof(uint64_t)))) {
 183		pr_err("Can't access ring base address\n");
 184		return -EFAULT;
 185	}
 186
 187	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
 188		pr_err("Ring size must be a power of 2 or 0\n");
 189		return -EINVAL;
 190	}
 191
 192	if (!access_ok((const void __user *) args->read_pointer_address,
 193			sizeof(uint32_t))) {
 194		pr_err("Can't access read pointer\n");
 195		return -EFAULT;
 196	}
 197
 198	if (!access_ok((const void __user *) args->write_pointer_address,
 199			sizeof(uint32_t))) {
 200		pr_err("Can't access write pointer\n");
 201		return -EFAULT;
 202	}
 203
 204	if (args->eop_buffer_address &&
 205		!access_ok((const void __user *) args->eop_buffer_address,
 206			sizeof(uint32_t))) {
 207		pr_debug("Can't access eop buffer");
 208		return -EFAULT;
 209	}
 210
 211	if (args->ctx_save_restore_address &&
 212		!access_ok((const void __user *) args->ctx_save_restore_address,
 213			sizeof(uint32_t))) {
 214		pr_debug("Can't access ctx save restore buffer");
 215		return -EFAULT;
 216	}
 217
 218	q_properties->is_interop = false;
 219	q_properties->is_gws = false;
 220	q_properties->queue_percent = args->queue_percentage;
 
 
 221	q_properties->priority = args->queue_priority;
 222	q_properties->queue_address = args->ring_base_address;
 223	q_properties->queue_size = args->ring_size;
 224	q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
 225	q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
 226	q_properties->eop_ring_buffer_address = args->eop_buffer_address;
 227	q_properties->eop_ring_buffer_size = args->eop_buffer_size;
 228	q_properties->ctx_save_restore_area_address =
 229			args->ctx_save_restore_address;
 230	q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
 231	q_properties->ctl_stack_size = args->ctl_stack_size;
 
 232	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
 233		args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
 234		q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
 235	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
 236		q_properties->type = KFD_QUEUE_TYPE_SDMA;
 237	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
 238		q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
 
 
 239	else
 240		return -ENOTSUPP;
 241
 242	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
 243		q_properties->format = KFD_QUEUE_FORMAT_AQL;
 244	else
 245		q_properties->format = KFD_QUEUE_FORMAT_PM4;
 246
 247	pr_debug("Queue Percentage: %d, %d\n",
 248			q_properties->queue_percent, args->queue_percentage);
 249
 250	pr_debug("Queue Priority: %d, %d\n",
 251			q_properties->priority, args->queue_priority);
 252
 253	pr_debug("Queue Address: 0x%llX, 0x%llX\n",
 254			q_properties->queue_address, args->ring_base_address);
 255
 256	pr_debug("Queue Size: 0x%llX, %u\n",
 257			q_properties->queue_size, args->ring_size);
 258
 259	pr_debug("Queue r/w Pointers: %px, %px\n",
 260			q_properties->read_ptr,
 261			q_properties->write_ptr);
 262
 263	pr_debug("Queue Format: %d\n", q_properties->format);
 264
 265	pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
 266
 267	pr_debug("Queue CTX save area: 0x%llX\n",
 268			q_properties->ctx_save_restore_area_address);
 269
 270	return 0;
 271}
 272
 273static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 274					void *data)
 275{
 276	struct kfd_ioctl_create_queue_args *args = data;
 277	struct kfd_dev *dev;
 278	int err = 0;
 279	unsigned int queue_id;
 280	struct kfd_process_device *pdd;
 281	struct queue_properties q_properties;
 282	uint32_t doorbell_offset_in_process = 0;
 283
 284	memset(&q_properties, 0, sizeof(struct queue_properties));
 285
 286	pr_debug("Creating queue ioctl\n");
 287
 288	err = set_queue_properties_from_user(&q_properties, args);
 289	if (err)
 290		return err;
 291
 292	pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
 293	dev = kfd_device_by_id(args->gpu_id);
 294	if (!dev) {
 295		pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
 296		return -EINVAL;
 297	}
 298
 299	mutex_lock(&p->mutex);
 300
 
 
 
 
 
 
 
 
 301	pdd = kfd_bind_process_to_device(dev, p);
 302	if (IS_ERR(pdd)) {
 303		err = -ESRCH;
 304		goto err_bind_process;
 305	}
 306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307	pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
 308			p->pasid,
 309			dev->id);
 310
 311	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
 312			&doorbell_offset_in_process);
 313	if (err != 0)
 314		goto err_create_queue;
 315
 316	args->queue_id = queue_id;
 317
 318
 319	/* Return gpu_id as doorbell offset for mmap usage */
 320	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
 321	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
 322	if (KFD_IS_SOC15(dev->device_info->asic_family))
 323		/* On SOC15 ASICs, include the doorbell offset within the
 324		 * process doorbell frame, which is 2 pages.
 325		 */
 326		args->doorbell_offset |= doorbell_offset_in_process;
 327
 328	mutex_unlock(&p->mutex);
 329
 330	pr_debug("Queue id %d was created successfully\n", args->queue_id);
 331
 332	pr_debug("Ring buffer address == 0x%016llX\n",
 333			args->ring_base_address);
 334
 335	pr_debug("Read ptr address    == 0x%016llX\n",
 336			args->read_pointer_address);
 337
 338	pr_debug("Write ptr address   == 0x%016llX\n",
 339			args->write_pointer_address);
 340
 
 341	return 0;
 342
 343err_create_queue:
 
 
 
 
 344err_bind_process:
 
 345	mutex_unlock(&p->mutex);
 346	return err;
 347}
 348
 349static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
 350					void *data)
 351{
 352	int retval;
 353	struct kfd_ioctl_destroy_queue_args *args = data;
 354
 355	pr_debug("Destroying queue id %d for pasid 0x%x\n",
 356				args->queue_id,
 357				p->pasid);
 358
 359	mutex_lock(&p->mutex);
 360
 361	retval = pqm_destroy_queue(&p->pqm, args->queue_id);
 362
 363	mutex_unlock(&p->mutex);
 364	return retval;
 365}
 366
 367static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
 368					void *data)
 369{
 370	int retval;
 371	struct kfd_ioctl_update_queue_args *args = data;
 372	struct queue_properties properties;
 373
 374	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
 
 
 
 
 
 375		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
 376		return -EINVAL;
 377	}
 378
 379	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
 380		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
 381		return -EINVAL;
 382	}
 383
 384	if ((args->ring_base_address) &&
 385		(!access_ok((const void __user *) args->ring_base_address,
 386			sizeof(uint64_t)))) {
 387		pr_err("Can't access ring base address\n");
 388		return -EFAULT;
 389	}
 390
 391	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
 392		pr_err("Ring size must be a power of 2 or 0\n");
 393		return -EINVAL;
 394	}
 395
 396	properties.queue_address = args->ring_base_address;
 397	properties.queue_size = args->ring_size;
 398	properties.queue_percent = args->queue_percentage;
 
 
 399	properties.priority = args->queue_priority;
 400
 401	pr_debug("Updating queue id %d for pasid 0x%x\n",
 402			args->queue_id, p->pasid);
 403
 404	mutex_lock(&p->mutex);
 405
 406	retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
 407
 408	mutex_unlock(&p->mutex);
 409
 410	return retval;
 411}
 412
 413static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
 414					void *data)
 415{
 416	int retval;
 417	const int max_num_cus = 1024;
 418	struct kfd_ioctl_set_cu_mask_args *args = data;
 419	struct queue_properties properties;
 420	uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
 421	size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
 422
 423	if ((args->num_cu_mask % 32) != 0) {
 424		pr_debug("num_cu_mask 0x%x must be a multiple of 32",
 425				args->num_cu_mask);
 426		return -EINVAL;
 427	}
 428
 429	properties.cu_mask_count = args->num_cu_mask;
 430	if (properties.cu_mask_count == 0) {
 431		pr_debug("CU mask cannot be 0");
 432		return -EINVAL;
 433	}
 434
 435	/* To prevent an unreasonably large CU mask size, set an arbitrary
 436	 * limit of max_num_cus bits.  We can then just drop any CU mask bits
 437	 * past max_num_cus bits and just use the first max_num_cus bits.
 438	 */
 439	if (properties.cu_mask_count > max_num_cus) {
 440		pr_debug("CU mask cannot be greater than 1024 bits");
 441		properties.cu_mask_count = max_num_cus;
 442		cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
 443	}
 444
 445	properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
 446	if (!properties.cu_mask)
 447		return -ENOMEM;
 448
 449	retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
 450	if (retval) {
 451		pr_debug("Could not copy CU mask from userspace");
 452		kfree(properties.cu_mask);
 453		return -EFAULT;
 454	}
 455
 456	mutex_lock(&p->mutex);
 457
 458	retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
 459
 460	mutex_unlock(&p->mutex);
 461
 462	if (retval)
 463		kfree(properties.cu_mask);
 464
 465	return retval;
 466}
 467
 468static int kfd_ioctl_get_queue_wave_state(struct file *filep,
 469					  struct kfd_process *p, void *data)
 470{
 471	struct kfd_ioctl_get_queue_wave_state_args *args = data;
 472	int r;
 473
 474	mutex_lock(&p->mutex);
 475
 476	r = pqm_get_wave_state(&p->pqm, args->queue_id,
 477			       (void __user *)args->ctl_stack_address,
 478			       &args->ctl_stack_used_size,
 479			       &args->save_area_used_size);
 480
 481	mutex_unlock(&p->mutex);
 482
 483	return r;
 484}
 485
 486static int kfd_ioctl_set_memory_policy(struct file *filep,
 487					struct kfd_process *p, void *data)
 488{
 489	struct kfd_ioctl_set_memory_policy_args *args = data;
 490	struct kfd_dev *dev;
 491	int err = 0;
 492	struct kfd_process_device *pdd;
 493	enum cache_policy default_policy, alternate_policy;
 494
 495	if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
 496	    && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
 497		return -EINVAL;
 498	}
 499
 500	if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
 501	    && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
 502		return -EINVAL;
 503	}
 504
 505	dev = kfd_device_by_id(args->gpu_id);
 506	if (!dev)
 507		return -EINVAL;
 508
 509	mutex_lock(&p->mutex);
 
 
 
 
 
 
 510
 511	pdd = kfd_bind_process_to_device(dev, p);
 512	if (IS_ERR(pdd)) {
 513		err = -ESRCH;
 514		goto out;
 515	}
 516
 517	default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
 518			 ? cache_policy_coherent : cache_policy_noncoherent;
 519
 520	alternate_policy =
 521		(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
 522		   ? cache_policy_coherent : cache_policy_noncoherent;
 523
 524	if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
 525				&pdd->qpd,
 526				default_policy,
 527				alternate_policy,
 528				(void __user *)args->alternate_aperture_base,
 529				args->alternate_aperture_size))
 530		err = -EINVAL;
 531
 532out:
 
 533	mutex_unlock(&p->mutex);
 534
 535	return err;
 536}
 537
 538static int kfd_ioctl_set_trap_handler(struct file *filep,
 539					struct kfd_process *p, void *data)
 540{
 541	struct kfd_ioctl_set_trap_handler_args *args = data;
 542	struct kfd_dev *dev;
 543	int err = 0;
 544	struct kfd_process_device *pdd;
 545
 546	dev = kfd_device_by_id(args->gpu_id);
 547	if (!dev)
 548		return -EINVAL;
 549
 550	mutex_lock(&p->mutex);
 551
 552	pdd = kfd_bind_process_to_device(dev, p);
 
 
 
 
 
 
 553	if (IS_ERR(pdd)) {
 554		err = -ESRCH;
 555		goto out;
 556	}
 557
 558	if (dev->dqm->ops.set_trap_handler(dev->dqm,
 559					&pdd->qpd,
 560					args->tba_addr,
 561					args->tma_addr))
 562		err = -EINVAL;
 563
 564out:
 
 565	mutex_unlock(&p->mutex);
 566
 567	return err;
 568}
 569
 570static int kfd_ioctl_dbg_register(struct file *filep,
 571				struct kfd_process *p, void *data)
 572{
 573	struct kfd_ioctl_dbg_register_args *args = data;
 574	struct kfd_dev *dev;
 575	struct kfd_dbgmgr *dbgmgr_ptr;
 576	struct kfd_process_device *pdd;
 577	bool create_ok;
 578	long status = 0;
 579
 580	dev = kfd_device_by_id(args->gpu_id);
 581	if (!dev)
 582		return -EINVAL;
 583
 584	if (dev->device_info->asic_family == CHIP_CARRIZO) {
 585		pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
 586		return -EINVAL;
 587	}
 588
 589	mutex_lock(&p->mutex);
 590	mutex_lock(kfd_get_dbgmgr_mutex());
 591
 592	/*
 593	 * make sure that we have pdd, if this the first queue created for
 594	 * this process
 595	 */
 596	pdd = kfd_bind_process_to_device(dev, p);
 597	if (IS_ERR(pdd)) {
 598		status = PTR_ERR(pdd);
 599		goto out;
 600	}
 601
 602	if (!dev->dbgmgr) {
 603		/* In case of a legal call, we have no dbgmgr yet */
 604		create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
 605		if (create_ok) {
 606			status = kfd_dbgmgr_register(dbgmgr_ptr, p);
 607			if (status != 0)
 608				kfd_dbgmgr_destroy(dbgmgr_ptr);
 609			else
 610				dev->dbgmgr = dbgmgr_ptr;
 611		}
 612	} else {
 613		pr_debug("debugger already registered\n");
 614		status = -EINVAL;
 615	}
 616
 617out:
 618	mutex_unlock(kfd_get_dbgmgr_mutex());
 619	mutex_unlock(&p->mutex);
 620
 621	return status;
 622}
 623
 624static int kfd_ioctl_dbg_unregister(struct file *filep,
 625				struct kfd_process *p, void *data)
 626{
 627	struct kfd_ioctl_dbg_unregister_args *args = data;
 628	struct kfd_dev *dev;
 629	long status;
 630
 631	dev = kfd_device_by_id(args->gpu_id);
 632	if (!dev || !dev->dbgmgr)
 633		return -EINVAL;
 634
 635	if (dev->device_info->asic_family == CHIP_CARRIZO) {
 636		pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
 637		return -EINVAL;
 638	}
 639
 640	mutex_lock(kfd_get_dbgmgr_mutex());
 641
 642	status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
 643	if (!status) {
 644		kfd_dbgmgr_destroy(dev->dbgmgr);
 645		dev->dbgmgr = NULL;
 646	}
 647
 648	mutex_unlock(kfd_get_dbgmgr_mutex());
 649
 650	return status;
 651}
 652
 653/*
 654 * Parse and generate variable size data structure for address watch.
 655 * Total size of the buffer and # watch points is limited in order
 656 * to prevent kernel abuse. (no bearing to the much smaller HW limitation
 657 * which is enforced by dbgdev module)
 658 * please also note that the watch address itself are not "copied from user",
 659 * since it be set into the HW in user mode values.
 660 *
 661 */
 662static int kfd_ioctl_dbg_address_watch(struct file *filep,
 663					struct kfd_process *p, void *data)
 664{
 665	struct kfd_ioctl_dbg_address_watch_args *args = data;
 666	struct kfd_dev *dev;
 667	struct dbg_address_watch_info aw_info;
 668	unsigned char *args_buff;
 669	long status;
 670	void __user *cmd_from_user;
 671	uint64_t watch_mask_value = 0;
 672	unsigned int args_idx = 0;
 673
 674	memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
 675
 676	dev = kfd_device_by_id(args->gpu_id);
 677	if (!dev)
 678		return -EINVAL;
 679
 680	if (dev->device_info->asic_family == CHIP_CARRIZO) {
 681		pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
 682		return -EINVAL;
 683	}
 684
 685	cmd_from_user = (void __user *) args->content_ptr;
 686
 687	/* Validate arguments */
 688
 689	if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
 690		(args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
 691		(cmd_from_user == NULL))
 692		return -EINVAL;
 693
 694	/* this is the actual buffer to work with */
 695	args_buff = memdup_user(cmd_from_user,
 696				args->buf_size_in_bytes - sizeof(*args));
 697	if (IS_ERR(args_buff))
 698		return PTR_ERR(args_buff);
 699
 700	aw_info.process = p;
 701
 702	aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx]));
 703	args_idx += sizeof(aw_info.num_watch_points);
 704
 705	aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx];
 706	args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points;
 707
 708	/*
 709	 * set watch address base pointer to point on the array base
 710	 * within args_buff
 711	 */
 712	aw_info.watch_address = (uint64_t *) &args_buff[args_idx];
 713
 714	/* skip over the addresses buffer */
 715	args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
 716
 717	if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
 718		status = -EINVAL;
 719		goto out;
 720	}
 721
 722	watch_mask_value = (uint64_t) args_buff[args_idx];
 723
 724	if (watch_mask_value > 0) {
 725		/*
 726		 * There is an array of masks.
 727		 * set watch mask base pointer to point on the array base
 728		 * within args_buff
 729		 */
 730		aw_info.watch_mask = (uint64_t *) &args_buff[args_idx];
 731
 732		/* skip over the masks buffer */
 733		args_idx += sizeof(aw_info.watch_mask) *
 734				aw_info.num_watch_points;
 735	} else {
 736		/* just the NULL mask, set to NULL and skip over it */
 737		aw_info.watch_mask = NULL;
 738		args_idx += sizeof(aw_info.watch_mask);
 739	}
 740
 741	if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
 742		status = -EINVAL;
 743		goto out;
 744	}
 745
 746	/* Currently HSA Event is not supported for DBG */
 747	aw_info.watch_event = NULL;
 748
 749	mutex_lock(kfd_get_dbgmgr_mutex());
 750
 751	status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info);
 752
 753	mutex_unlock(kfd_get_dbgmgr_mutex());
 754
 755out:
 756	kfree(args_buff);
 757
 758	return status;
 759}
 760
 761/* Parse and generate fixed size data structure for wave control */
 762static int kfd_ioctl_dbg_wave_control(struct file *filep,
 763					struct kfd_process *p, void *data)
 764{
 765	struct kfd_ioctl_dbg_wave_control_args *args = data;
 766	struct kfd_dev *dev;
 767	struct dbg_wave_control_info wac_info;
 768	unsigned char *args_buff;
 769	uint32_t computed_buff_size;
 770	long status;
 771	void __user *cmd_from_user;
 772	unsigned int args_idx = 0;
 773
 774	memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info));
 775
 776	/* we use compact form, independent of the packing attribute value */
 777	computed_buff_size = sizeof(*args) +
 778				sizeof(wac_info.mode) +
 779				sizeof(wac_info.operand) +
 780				sizeof(wac_info.dbgWave_msg.DbgWaveMsg) +
 781				sizeof(wac_info.dbgWave_msg.MemoryVA) +
 782				sizeof(wac_info.trapId);
 783
 784	dev = kfd_device_by_id(args->gpu_id);
 785	if (!dev)
 786		return -EINVAL;
 787
 788	if (dev->device_info->asic_family == CHIP_CARRIZO) {
 789		pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
 790		return -EINVAL;
 791	}
 792
 793	/* input size must match the computed "compact" size */
 794	if (args->buf_size_in_bytes != computed_buff_size) {
 795		pr_debug("size mismatch, computed : actual %u : %u\n",
 796				args->buf_size_in_bytes, computed_buff_size);
 797		return -EINVAL;
 798	}
 799
 800	cmd_from_user = (void __user *) args->content_ptr;
 801
 802	if (cmd_from_user == NULL)
 803		return -EINVAL;
 804
 805	/* copy the entire buffer from user */
 806
 807	args_buff = memdup_user(cmd_from_user,
 808				args->buf_size_in_bytes - sizeof(*args));
 809	if (IS_ERR(args_buff))
 810		return PTR_ERR(args_buff);
 811
 812	/* move ptr to the start of the "pay-load" area */
 813	wac_info.process = p;
 814
 815	wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx]));
 816	args_idx += sizeof(wac_info.operand);
 817
 818	wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx]));
 819	args_idx += sizeof(wac_info.mode);
 820
 821	wac_info.trapId = *((uint32_t *)(&args_buff[args_idx]));
 822	args_idx += sizeof(wac_info.trapId);
 823
 824	wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value =
 825					*((uint32_t *)(&args_buff[args_idx]));
 826	wac_info.dbgWave_msg.MemoryVA = NULL;
 827
 828	mutex_lock(kfd_get_dbgmgr_mutex());
 829
 830	pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n",
 831			wac_info.process, wac_info.operand,
 832			wac_info.mode, wac_info.trapId,
 833			wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
 834
 835	status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info);
 836
 837	pr_debug("Returned status of dbg manager is %ld\n", status);
 838
 839	mutex_unlock(kfd_get_dbgmgr_mutex());
 840
 841	kfree(args_buff);
 842
 843	return status;
 844}
 845
 846static int kfd_ioctl_get_clock_counters(struct file *filep,
 847				struct kfd_process *p, void *data)
 848{
 849	struct kfd_ioctl_get_clock_counters_args *args = data;
 850	struct kfd_dev *dev;
 851
 852	dev = kfd_device_by_id(args->gpu_id);
 853	if (dev)
 
 
 854		/* Reading GPU clock counter from KGD */
 855		args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
 856	else
 857		/* Node without GPU resource */
 858		args->gpu_clock_counter = 0;
 859
 860	/* No access to rdtsc. Using raw monotonic time */
 861	args->cpu_clock_counter = ktime_get_raw_ns();
 862	args->system_clock_counter = ktime_get_boottime_ns();
 863
 864	/* Since the counter is in nano-seconds we use 1GHz frequency */
 865	args->system_clock_freq = 1000000000;
 866
 867	return 0;
 868}
 869
 870
 871static int kfd_ioctl_get_process_apertures(struct file *filp,
 872				struct kfd_process *p, void *data)
 873{
 874	struct kfd_ioctl_get_process_apertures_args *args = data;
 875	struct kfd_process_device_apertures *pAperture;
 876	struct kfd_process_device *pdd;
 877
 878	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 879
 880	args->num_of_nodes = 0;
 881
 882	mutex_lock(&p->mutex);
 
 
 
 883
 884	/*if the process-device list isn't empty*/
 885	if (kfd_has_process_device_data(p)) {
 886		/* Run over all pdd of the process */
 887		pdd = kfd_get_first_process_device_data(p);
 888		do {
 889			pAperture =
 890				&args->process_apertures[args->num_of_nodes];
 891			pAperture->gpu_id = pdd->dev->id;
 892			pAperture->lds_base = pdd->lds_base;
 893			pAperture->lds_limit = pdd->lds_limit;
 894			pAperture->gpuvm_base = pdd->gpuvm_base;
 895			pAperture->gpuvm_limit = pdd->gpuvm_limit;
 896			pAperture->scratch_base = pdd->scratch_base;
 897			pAperture->scratch_limit = pdd->scratch_limit;
 898
 899			dev_dbg(kfd_device,
 900				"node id %u\n", args->num_of_nodes);
 901			dev_dbg(kfd_device,
 902				"gpu id %u\n", pdd->dev->id);
 903			dev_dbg(kfd_device,
 904				"lds_base %llX\n", pdd->lds_base);
 905			dev_dbg(kfd_device,
 906				"lds_limit %llX\n", pdd->lds_limit);
 907			dev_dbg(kfd_device,
 908				"gpuvm_base %llX\n", pdd->gpuvm_base);
 909			dev_dbg(kfd_device,
 910				"gpuvm_limit %llX\n", pdd->gpuvm_limit);
 911			dev_dbg(kfd_device,
 912				"scratch_base %llX\n", pdd->scratch_base);
 913			dev_dbg(kfd_device,
 914				"scratch_limit %llX\n", pdd->scratch_limit);
 915
 916			args->num_of_nodes++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917
 918			pdd = kfd_get_next_process_device_data(p, pdd);
 919		} while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
 920	}
 921
 922	mutex_unlock(&p->mutex);
 923
 924	return 0;
 925}
 926
 927static int kfd_ioctl_get_process_apertures_new(struct file *filp,
 928				struct kfd_process *p, void *data)
 929{
 930	struct kfd_ioctl_get_process_apertures_new_args *args = data;
 931	struct kfd_process_device_apertures *pa;
 932	struct kfd_process_device *pdd;
 933	uint32_t nodes = 0;
 934	int ret;
 
 935
 936	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 937
 938	if (args->num_of_nodes == 0) {
 939		/* Return number of nodes, so that user space can alloacate
 940		 * sufficient memory
 941		 */
 942		mutex_lock(&p->mutex);
 943
 944		if (!kfd_has_process_device_data(p))
 945			goto out_unlock;
 946
 947		/* Run over all pdd of the process */
 948		pdd = kfd_get_first_process_device_data(p);
 949		do {
 950			args->num_of_nodes++;
 951			pdd = kfd_get_next_process_device_data(p, pdd);
 952		} while (pdd);
 953
 954		goto out_unlock;
 955	}
 956
 957	/* Fill in process-aperture information for all available
 958	 * nodes, but not more than args->num_of_nodes as that is
 959	 * the amount of memory allocated by user
 960	 */
 961	pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
 962				args->num_of_nodes), GFP_KERNEL);
 963	if (!pa)
 964		return -ENOMEM;
 965
 966	mutex_lock(&p->mutex);
 967
 968	if (!kfd_has_process_device_data(p)) {
 969		args->num_of_nodes = 0;
 970		kfree(pa);
 971		goto out_unlock;
 972	}
 973
 974	/* Run over all pdd of the process */
 975	pdd = kfd_get_first_process_device_data(p);
 976	do {
 977		pa[nodes].gpu_id = pdd->dev->id;
 978		pa[nodes].lds_base = pdd->lds_base;
 979		pa[nodes].lds_limit = pdd->lds_limit;
 980		pa[nodes].gpuvm_base = pdd->gpuvm_base;
 981		pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
 982		pa[nodes].scratch_base = pdd->scratch_base;
 983		pa[nodes].scratch_limit = pdd->scratch_limit;
 
 984
 985		dev_dbg(kfd_device,
 986			"gpu id %u\n", pdd->dev->id);
 987		dev_dbg(kfd_device,
 988			"lds_base %llX\n", pdd->lds_base);
 989		dev_dbg(kfd_device,
 990			"lds_limit %llX\n", pdd->lds_limit);
 991		dev_dbg(kfd_device,
 992			"gpuvm_base %llX\n", pdd->gpuvm_base);
 993		dev_dbg(kfd_device,
 994			"gpuvm_limit %llX\n", pdd->gpuvm_limit);
 995		dev_dbg(kfd_device,
 996			"scratch_base %llX\n", pdd->scratch_base);
 997		dev_dbg(kfd_device,
 998			"scratch_limit %llX\n", pdd->scratch_limit);
 999		nodes++;
1000
1001		pdd = kfd_get_next_process_device_data(p, pdd);
1002	} while (pdd && (nodes < args->num_of_nodes));
1003	mutex_unlock(&p->mutex);
1004
1005	args->num_of_nodes = nodes;
1006	ret = copy_to_user(
1007			(void __user *)args->kfd_process_device_apertures_ptr,
1008			pa,
1009			(nodes * sizeof(struct kfd_process_device_apertures)));
1010	kfree(pa);
1011	return ret ? -EFAULT : 0;
1012
1013out_unlock:
1014	mutex_unlock(&p->mutex);
1015	return 0;
1016}
1017
1018static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
1019					void *data)
1020{
1021	struct kfd_ioctl_create_event_args *args = data;
1022	int err;
1023
1024	/* For dGPUs the event page is allocated in user mode. The
1025	 * handle is passed to KFD with the first call to this IOCTL
1026	 * through the event_page_offset field.
1027	 */
1028	if (args->event_page_offset) {
1029		struct kfd_dev *kfd;
1030		struct kfd_process_device *pdd;
1031		void *mem, *kern_addr;
1032		uint64_t size;
1033
1034		if (p->signal_page) {
1035			pr_err("Event page is already set\n");
1036			return -EINVAL;
1037		}
1038
1039		kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
1040		if (!kfd) {
1041			pr_err("Getting device by id failed in %s\n", __func__);
1042			return -EINVAL;
1043		}
1044
1045		mutex_lock(&p->mutex);
1046		pdd = kfd_bind_process_to_device(kfd, p);
1047		if (IS_ERR(pdd)) {
1048			err = PTR_ERR(pdd);
1049			goto out_unlock;
1050		}
1051
1052		mem = kfd_process_device_translate_handle(pdd,
1053				GET_IDR_HANDLE(args->event_page_offset));
1054		if (!mem) {
1055			pr_err("Can't find BO, offset is 0x%llx\n",
1056			       args->event_page_offset);
1057			err = -EINVAL;
1058			goto out_unlock;
1059		}
1060		mutex_unlock(&p->mutex);
1061
1062		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
1063						mem, &kern_addr, &size);
1064		if (err) {
1065			pr_err("Failed to map event page to kernel\n");
1066			return err;
1067		}
1068
1069		err = kfd_event_page_set(p, kern_addr, size);
1070		if (err) {
1071			pr_err("Failed to set event page\n");
1072			return err;
1073		}
1074	}
1075
1076	err = kfd_event_create(filp, p, args->event_type,
1077				args->auto_reset != 0, args->node_id,
1078				&args->event_id, &args->event_trigger_data,
1079				&args->event_page_offset,
1080				&args->event_slot_index);
1081
1082	return err;
1083
1084out_unlock:
1085	mutex_unlock(&p->mutex);
1086	return err;
1087}
1088
1089static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
1090					void *data)
1091{
1092	struct kfd_ioctl_destroy_event_args *args = data;
1093
1094	return kfd_event_destroy(p, args->event_id);
1095}
1096
1097static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
1098				void *data)
1099{
1100	struct kfd_ioctl_set_event_args *args = data;
1101
1102	return kfd_set_event(p, args->event_id);
1103}
1104
1105static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
1106				void *data)
1107{
1108	struct kfd_ioctl_reset_event_args *args = data;
1109
1110	return kfd_reset_event(p, args->event_id);
1111}
1112
1113static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
1114				void *data)
1115{
1116	struct kfd_ioctl_wait_events_args *args = data;
1117	int err;
1118
1119	err = kfd_wait_on_events(p, args->num_events,
1120			(void __user *)args->events_ptr,
1121			(args->wait_for_all != 0),
1122			args->timeout, &args->wait_result);
1123
1124	return err;
1125}
1126static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
1127					struct kfd_process *p, void *data)
1128{
1129	struct kfd_ioctl_set_scratch_backing_va_args *args = data;
1130	struct kfd_process_device *pdd;
1131	struct kfd_dev *dev;
1132	long err;
1133
1134	dev = kfd_device_by_id(args->gpu_id);
1135	if (!dev)
1136		return -EINVAL;
1137
1138	mutex_lock(&p->mutex);
 
 
 
 
 
 
1139
1140	pdd = kfd_bind_process_to_device(dev, p);
1141	if (IS_ERR(pdd)) {
1142		err = PTR_ERR(pdd);
1143		goto bind_process_to_device_fail;
1144	}
1145
1146	pdd->qpd.sh_hidden_private_base = args->va_addr;
1147
1148	mutex_unlock(&p->mutex);
1149
1150	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
1151	    pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
1152		dev->kfd2kgd->set_scratch_backing_va(
1153			dev->kgd, args->va_addr, pdd->qpd.vmid);
1154
1155	return 0;
1156
1157bind_process_to_device_fail:
 
1158	mutex_unlock(&p->mutex);
1159	return err;
1160}
1161
1162static int kfd_ioctl_get_tile_config(struct file *filep,
1163		struct kfd_process *p, void *data)
1164{
1165	struct kfd_ioctl_get_tile_config_args *args = data;
1166	struct kfd_dev *dev;
1167	struct tile_config config;
1168	int err = 0;
1169
1170	dev = kfd_device_by_id(args->gpu_id);
1171	if (!dev)
 
 
1172		return -EINVAL;
1173
1174	amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
1175
1176	args->gb_addr_config = config.gb_addr_config;
1177	args->num_banks = config.num_banks;
1178	args->num_ranks = config.num_ranks;
1179
1180	if (args->num_tile_configs > config.num_tile_configs)
1181		args->num_tile_configs = config.num_tile_configs;
1182	err = copy_to_user((void __user *)args->tile_config_ptr,
1183			config.tile_config_ptr,
1184			args->num_tile_configs * sizeof(uint32_t));
1185	if (err) {
1186		args->num_tile_configs = 0;
1187		return -EFAULT;
1188	}
1189
1190	if (args->num_macro_tile_configs > config.num_macro_tile_configs)
1191		args->num_macro_tile_configs =
1192				config.num_macro_tile_configs;
1193	err = copy_to_user((void __user *)args->macro_tile_config_ptr,
1194			config.macro_tile_config_ptr,
1195			args->num_macro_tile_configs * sizeof(uint32_t));
1196	if (err) {
1197		args->num_macro_tile_configs = 0;
1198		return -EFAULT;
1199	}
1200
1201	return 0;
1202}
1203
1204static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
1205				void *data)
1206{
1207	struct kfd_ioctl_acquire_vm_args *args = data;
1208	struct kfd_process_device *pdd;
1209	struct kfd_dev *dev;
1210	struct file *drm_file;
1211	int ret;
1212
1213	dev = kfd_device_by_id(args->gpu_id);
1214	if (!dev)
1215		return -EINVAL;
1216
1217	drm_file = fget(args->drm_fd);
1218	if (!drm_file)
1219		return -EINVAL;
1220
1221	mutex_lock(&p->mutex);
1222
1223	pdd = kfd_get_process_device_data(dev, p);
1224	if (!pdd) {
1225		ret = -EINVAL;
1226		goto err_unlock;
1227	}
1228
1229	if (pdd->drm_file) {
1230		ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1231		goto err_unlock;
1232	}
1233
1234	ret = kfd_process_device_init_vm(pdd, drm_file);
1235	if (ret)
1236		goto err_unlock;
 
1237	/* On success, the PDD keeps the drm_file reference */
1238	mutex_unlock(&p->mutex);
1239
1240	return 0;
1241
1242err_unlock:
 
 
1243	mutex_unlock(&p->mutex);
1244	fput(drm_file);
1245	return ret;
1246}
1247
1248bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1249{
1250	struct kfd_local_mem_info mem_info;
1251
1252	if (debug_largebar) {
1253		pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1254		return true;
1255	}
1256
1257	if (dev->device_info->needs_iommu_device)
1258		return false;
 
1259
1260	amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
1261	if (mem_info.local_mem_size_private == 0 &&
1262			mem_info.local_mem_size_public > 0)
1263		return true;
 
 
1264	return false;
1265}
1266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1268					struct kfd_process *p, void *data)
1269{
1270	struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1271	struct kfd_process_device *pdd;
1272	void *mem;
1273	struct kfd_dev *dev;
1274	int idr_handle;
1275	long err;
1276	uint64_t offset = args->mmap_offset;
1277	uint32_t flags = args->flags;
1278
1279	if (args->size == 0)
1280		return -EINVAL;
1281
1282	dev = kfd_device_by_id(args->gpu_id);
1283	if (!dev)
1284		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285
1286	if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1287		(flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1288		!kfd_dev_is_large_bar(dev)) {
1289		pr_err("Alloc host visible vram on small bar is not allowed\n");
1290		return -EINVAL;
1291	}
1292
1293	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1294		if (args->size != kfd_doorbell_process_slice(dev))
1295			return -EINVAL;
1296		offset = kfd_get_process_doorbells(dev, p);
1297	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1298		if (args->size != PAGE_SIZE)
1299			return -EINVAL;
1300		offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
1301		if (!offset)
1302			return -ENOMEM;
1303	}
1304
1305	mutex_lock(&p->mutex);
1306
1307	pdd = kfd_bind_process_to_device(dev, p);
1308	if (IS_ERR(pdd)) {
1309		err = PTR_ERR(pdd);
1310		goto err_unlock;
1311	}
1312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1314		dev->kgd, args->va_addr, args->size,
1315		pdd->vm, (struct kgd_mem **) &mem, &offset,
1316		flags);
1317
1318	if (err)
1319		goto err_unlock;
1320
1321	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1322	if (idr_handle < 0) {
1323		err = -EFAULT;
1324		goto err_free;
1325	}
1326
1327	/* Update the VRAM usage count */
1328	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
1329		WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
 
 
 
 
 
1330
1331	mutex_unlock(&p->mutex);
1332
1333	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1334	args->mmap_offset = offset;
1335
1336	/* MMIO is mapped through kfd device
1337	 * Generate a kfd mmap offset
1338	 */
1339	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1340		args->mmap_offset = KFD_MMAP_TYPE_MMIO
1341					| KFD_MMAP_GPU_ID(args->gpu_id);
1342
1343	return 0;
1344
1345err_free:
1346	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
 
1347err_unlock:
 
 
1348	mutex_unlock(&p->mutex);
1349	return err;
1350}
1351
1352static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1353					struct kfd_process *p, void *data)
1354{
1355	struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1356	struct kfd_process_device *pdd;
1357	void *mem;
1358	struct kfd_dev *dev;
1359	int ret;
1360	uint64_t size = 0;
1361
1362	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1363	if (!dev)
1364		return -EINVAL;
1365
1366	mutex_lock(&p->mutex);
 
 
 
 
 
 
 
 
 
1367
1368	pdd = kfd_get_process_device_data(dev, p);
1369	if (!pdd) {
1370		pr_err("Process device data doesn't exist\n");
1371		ret = -EINVAL;
1372		goto err_unlock;
1373	}
1374
1375	mem = kfd_process_device_translate_handle(
1376		pdd, GET_IDR_HANDLE(args->handle));
1377	if (!mem) {
1378		ret = -EINVAL;
1379		goto err_unlock;
1380	}
1381
1382	ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
1383						(struct kgd_mem *)mem, &size);
1384
1385	/* If freeing the buffer failed, leave the handle in place for
1386	 * clean-up during process tear-down.
1387	 */
1388	if (!ret)
1389		kfd_process_device_remove_obj_handle(
1390			pdd, GET_IDR_HANDLE(args->handle));
1391
1392	WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
1393
1394err_unlock:
 
1395	mutex_unlock(&p->mutex);
1396	return ret;
1397}
1398
1399static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1400					struct kfd_process *p, void *data)
1401{
1402	struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1403	struct kfd_process_device *pdd, *peer_pdd;
1404	void *mem;
1405	struct kfd_dev *dev, *peer;
1406	long err = 0;
1407	int i;
1408	uint32_t *devices_arr = NULL;
1409
1410	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1411	if (!dev)
1412		return -EINVAL;
1413
1414	if (!args->n_devices) {
1415		pr_debug("Device IDs array empty\n");
1416		return -EINVAL;
1417	}
1418	if (args->n_success > args->n_devices) {
1419		pr_debug("n_success exceeds n_devices\n");
1420		return -EINVAL;
1421	}
1422
1423	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1424				    GFP_KERNEL);
1425	if (!devices_arr)
1426		return -ENOMEM;
1427
1428	err = copy_from_user(devices_arr,
1429			     (void __user *)args->device_ids_array_ptr,
1430			     args->n_devices * sizeof(*devices_arr));
1431	if (err != 0) {
1432		err = -EFAULT;
1433		goto copy_from_user_failed;
1434	}
1435
1436	mutex_lock(&p->mutex);
 
 
 
 
 
 
1437
1438	pdd = kfd_bind_process_to_device(dev, p);
1439	if (IS_ERR(pdd)) {
1440		err = PTR_ERR(pdd);
1441		goto bind_process_to_device_failed;
1442	}
1443
1444	mem = kfd_process_device_translate_handle(pdd,
1445						GET_IDR_HANDLE(args->handle));
1446	if (!mem) {
1447		err = -ENOMEM;
1448		goto get_mem_obj_from_handle_failed;
1449	}
1450
1451	for (i = args->n_success; i < args->n_devices; i++) {
1452		peer = kfd_device_by_id(devices_arr[i]);
1453		if (!peer) {
1454			pr_debug("Getting device by id failed for 0x%x\n",
1455				 devices_arr[i]);
1456			err = -EINVAL;
1457			goto get_mem_obj_from_handle_failed;
1458		}
1459
1460		peer_pdd = kfd_bind_process_to_device(peer, p);
1461		if (IS_ERR(peer_pdd)) {
1462			err = PTR_ERR(peer_pdd);
1463			goto get_mem_obj_from_handle_failed;
1464		}
 
1465		err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1466			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
 
1467		if (err) {
1468			pr_err("Failed to map to gpu %d/%d\n",
1469			       i, args->n_devices);
 
 
 
 
 
 
 
1470			goto map_memory_to_gpu_failed;
1471		}
1472		args->n_success = i+1;
1473	}
1474
1475	mutex_unlock(&p->mutex);
1476
1477	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
1478	if (err) {
1479		pr_debug("Sync memory failed, wait interrupted by user signal\n");
1480		goto sync_memory_failed;
1481	}
1482
 
 
1483	/* Flush TLBs after waiting for the page table updates to complete */
1484	for (i = 0; i < args->n_devices; i++) {
1485		peer = kfd_device_by_id(devices_arr[i]);
1486		if (WARN_ON_ONCE(!peer))
1487			continue;
1488		peer_pdd = kfd_get_process_device_data(peer, p);
1489		if (WARN_ON_ONCE(!peer_pdd))
1490			continue;
1491		kfd_flush_tlb(peer_pdd);
1492	}
1493
1494	kfree(devices_arr);
1495
1496	return err;
1497
 
1498bind_process_to_device_failed:
1499get_mem_obj_from_handle_failed:
1500map_memory_to_gpu_failed:
 
1501	mutex_unlock(&p->mutex);
1502copy_from_user_failed:
1503sync_memory_failed:
1504	kfree(devices_arr);
1505
1506	return err;
1507}
1508
1509static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1510					struct kfd_process *p, void *data)
1511{
1512	struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1513	struct kfd_process_device *pdd, *peer_pdd;
1514	void *mem;
1515	struct kfd_dev *dev, *peer;
1516	long err = 0;
1517	uint32_t *devices_arr = NULL, i;
1518
1519	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1520	if (!dev)
1521		return -EINVAL;
1522
1523	if (!args->n_devices) {
1524		pr_debug("Device IDs array empty\n");
1525		return -EINVAL;
1526	}
1527	if (args->n_success > args->n_devices) {
1528		pr_debug("n_success exceeds n_devices\n");
1529		return -EINVAL;
1530	}
1531
1532	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1533				    GFP_KERNEL);
1534	if (!devices_arr)
1535		return -ENOMEM;
1536
1537	err = copy_from_user(devices_arr,
1538			     (void __user *)args->device_ids_array_ptr,
1539			     args->n_devices * sizeof(*devices_arr));
1540	if (err != 0) {
1541		err = -EFAULT;
1542		goto copy_from_user_failed;
1543	}
1544
1545	mutex_lock(&p->mutex);
1546
1547	pdd = kfd_get_process_device_data(dev, p);
1548	if (!pdd) {
1549		err = -EINVAL;
1550		goto bind_process_to_device_failed;
1551	}
1552
1553	mem = kfd_process_device_translate_handle(pdd,
1554						GET_IDR_HANDLE(args->handle));
1555	if (!mem) {
1556		err = -ENOMEM;
1557		goto get_mem_obj_from_handle_failed;
1558	}
1559
1560	for (i = args->n_success; i < args->n_devices; i++) {
1561		peer = kfd_device_by_id(devices_arr[i]);
1562		if (!peer) {
1563			err = -EINVAL;
1564			goto get_mem_obj_from_handle_failed;
1565		}
1566
1567		peer_pdd = kfd_get_process_device_data(peer, p);
1568		if (!peer_pdd) {
1569			err = -ENODEV;
1570			goto get_mem_obj_from_handle_failed;
1571		}
1572		err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1573			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
1574		if (err) {
1575			pr_err("Failed to unmap from gpu %d/%d\n",
1576			       i, args->n_devices);
1577			goto unmap_memory_from_gpu_failed;
1578		}
1579		args->n_success = i+1;
1580	}
1581	kfree(devices_arr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582
1583	mutex_unlock(&p->mutex);
1584
 
 
1585	return 0;
1586
1587bind_process_to_device_failed:
1588get_mem_obj_from_handle_failed:
1589unmap_memory_from_gpu_failed:
 
1590	mutex_unlock(&p->mutex);
1591copy_from_user_failed:
1592	kfree(devices_arr);
1593	return err;
1594}
1595
1596static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1597		struct kfd_process *p, void *data)
1598{
1599	int retval;
1600	struct kfd_ioctl_alloc_queue_gws_args *args = data;
1601	struct queue *q;
1602	struct kfd_dev *dev;
1603
1604	mutex_lock(&p->mutex);
1605	q = pqm_get_user_queue(&p->pqm, args->queue_id);
1606
1607	if (q) {
1608		dev = q->device;
1609	} else {
1610		retval = -EINVAL;
1611		goto out_unlock;
1612	}
1613
1614	if (!dev->gws) {
1615		retval = -ENODEV;
1616		goto out_unlock;
1617	}
1618
1619	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1620		retval = -ENODEV;
1621		goto out_unlock;
1622	}
1623
 
 
 
 
 
 
1624	retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1625	mutex_unlock(&p->mutex);
1626
1627	args->first_gws = 0;
1628	return retval;
1629
1630out_unlock:
1631	mutex_unlock(&p->mutex);
1632	return retval;
1633}
1634
1635static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1636		struct kfd_process *p, void *data)
1637{
1638	struct kfd_ioctl_get_dmabuf_info_args *args = data;
1639	struct kfd_dev *dev = NULL;
1640	struct kgd_dev *dma_buf_kgd;
1641	void *metadata_buffer = NULL;
1642	uint32_t flags;
 
1643	unsigned int i;
1644	int r;
1645
1646	/* Find a KFD GPU device that supports the get_dmabuf_info query */
1647	for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1648		if (dev)
1649			break;
1650	if (!dev)
1651		return -EINVAL;
1652
1653	if (args->metadata_ptr) {
1654		metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1655		if (!metadata_buffer)
1656			return -ENOMEM;
1657	}
1658
1659	/* Get dmabuf info from KGD */
1660	r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
1661					  &dma_buf_kgd, &args->size,
1662					  metadata_buffer, args->metadata_size,
1663					  &args->metadata_size, &flags);
1664	if (r)
1665		goto exit;
1666
1667	/* Reverse-lookup gpu_id from kgd pointer */
1668	dev = kfd_device_by_kgd(dma_buf_kgd);
1669	if (!dev) {
1670		r = -EINVAL;
1671		goto exit;
1672	}
1673	args->gpu_id = dev->id;
1674	args->flags = flags;
1675
1676	/* Copy metadata buffer to user mode */
1677	if (metadata_buffer) {
1678		r = copy_to_user((void __user *)args->metadata_ptr,
1679				 metadata_buffer, args->metadata_size);
1680		if (r != 0)
1681			r = -EFAULT;
1682	}
1683
1684exit:
1685	kfree(metadata_buffer);
1686
1687	return r;
1688}
1689
1690static int kfd_ioctl_import_dmabuf(struct file *filep,
1691				   struct kfd_process *p, void *data)
1692{
1693	struct kfd_ioctl_import_dmabuf_args *args = data;
1694	struct kfd_process_device *pdd;
1695	struct dma_buf *dmabuf;
1696	struct kfd_dev *dev;
1697	int idr_handle;
1698	uint64_t size;
1699	void *mem;
1700	int r;
1701
1702	dev = kfd_device_by_id(args->gpu_id);
1703	if (!dev)
1704		return -EINVAL;
1705
1706	dmabuf = dma_buf_get(args->dmabuf_fd);
1707	if (IS_ERR(dmabuf))
1708		return PTR_ERR(dmabuf);
1709
1710	mutex_lock(&p->mutex);
 
 
 
 
 
1711
1712	pdd = kfd_bind_process_to_device(dev, p);
1713	if (IS_ERR(pdd)) {
1714		r = PTR_ERR(pdd);
1715		goto err_unlock;
1716	}
1717
1718	r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
1719					      args->va_addr, pdd->vm,
1720					      (struct kgd_mem **)&mem, &size,
1721					      NULL);
1722	if (r)
1723		goto err_unlock;
1724
1725	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1726	if (idr_handle < 0) {
1727		r = -EFAULT;
1728		goto err_free;
1729	}
1730
1731	mutex_unlock(&p->mutex);
1732
1733	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1734
1735	return 0;
1736
1737err_free:
1738	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
 
1739err_unlock:
1740	mutex_unlock(&p->mutex);
1741	return r;
1742}
1743
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1744/* Handle requests for watching SMI events */
1745static int kfd_ioctl_smi_events(struct file *filep,
1746				struct kfd_process *p, void *data)
1747{
1748	struct kfd_ioctl_smi_events_args *args = data;
1749	struct kfd_dev *dev;
1750
1751	dev = kfd_device_by_id(args->gpuid);
1752	if (!dev)
 
 
 
1753		return -EINVAL;
1754
1755	return kfd_smi_event_open(dev, &args->anon_fd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1756}
1757
1758#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
1759	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
1760			    .cmd_drv = 0, .name = #ioctl}
1761
1762/** Ioctl table */
1763static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
1764	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
1765			kfd_ioctl_get_version, 0),
1766
1767	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
1768			kfd_ioctl_create_queue, 0),
1769
1770	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
1771			kfd_ioctl_destroy_queue, 0),
1772
1773	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
1774			kfd_ioctl_set_memory_policy, 0),
1775
1776	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
1777			kfd_ioctl_get_clock_counters, 0),
1778
1779	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
1780			kfd_ioctl_get_process_apertures, 0),
1781
1782	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
1783			kfd_ioctl_update_queue, 0),
1784
1785	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
1786			kfd_ioctl_create_event, 0),
1787
1788	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
1789			kfd_ioctl_destroy_event, 0),
1790
1791	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
1792			kfd_ioctl_set_event, 0),
1793
1794	AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
1795			kfd_ioctl_reset_event, 0),
1796
1797	AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
1798			kfd_ioctl_wait_events, 0),
1799
1800	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER,
1801			kfd_ioctl_dbg_register, 0),
1802
1803	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
1804			kfd_ioctl_dbg_unregister, 0),
1805
1806	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
1807			kfd_ioctl_dbg_address_watch, 0),
1808
1809	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
1810			kfd_ioctl_dbg_wave_control, 0),
1811
1812	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
1813			kfd_ioctl_set_scratch_backing_va, 0),
1814
1815	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
1816			kfd_ioctl_get_tile_config, 0),
1817
1818	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
1819			kfd_ioctl_set_trap_handler, 0),
1820
1821	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
1822			kfd_ioctl_get_process_apertures_new, 0),
1823
1824	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
1825			kfd_ioctl_acquire_vm, 0),
1826
1827	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
1828			kfd_ioctl_alloc_memory_of_gpu, 0),
1829
1830	AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
1831			kfd_ioctl_free_memory_of_gpu, 0),
1832
1833	AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
1834			kfd_ioctl_map_memory_to_gpu, 0),
1835
1836	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
1837			kfd_ioctl_unmap_memory_from_gpu, 0),
1838
1839	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
1840			kfd_ioctl_set_cu_mask, 0),
1841
1842	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
1843			kfd_ioctl_get_queue_wave_state, 0),
1844
1845	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
1846				kfd_ioctl_get_dmabuf_info, 0),
1847
1848	AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
1849				kfd_ioctl_import_dmabuf, 0),
1850
1851	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
1852			kfd_ioctl_alloc_queue_gws, 0),
1853
1854	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
1855			kfd_ioctl_smi_events, 0),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1856};
1857
1858#define AMDKFD_CORE_IOCTL_COUNT	ARRAY_SIZE(amdkfd_ioctls)
1859
1860static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1861{
1862	struct kfd_process *process;
1863	amdkfd_ioctl_t *func;
1864	const struct amdkfd_ioctl_desc *ioctl = NULL;
1865	unsigned int nr = _IOC_NR(cmd);
1866	char stack_kdata[128];
1867	char *kdata = NULL;
1868	unsigned int usize, asize;
1869	int retcode = -EINVAL;
 
1870
1871	if (nr >= AMDKFD_CORE_IOCTL_COUNT)
1872		goto err_i1;
1873
1874	if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
1875		u32 amdkfd_size;
1876
1877		ioctl = &amdkfd_ioctls[nr];
1878
1879		amdkfd_size = _IOC_SIZE(ioctl->cmd);
1880		usize = asize = _IOC_SIZE(cmd);
1881		if (amdkfd_size > asize)
1882			asize = amdkfd_size;
1883
1884		cmd = ioctl->cmd;
1885	} else
1886		goto err_i1;
1887
1888	dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
1889
1890	/* Get the process struct from the filep. Only the process
1891	 * that opened /dev/kfd can use the file descriptor. Child
1892	 * processes need to create their own KFD device context.
1893	 */
1894	process = filep->private_data;
1895	if (process->lead_thread != current->group_leader) {
 
 
 
 
 
 
 
 
1896		dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
1897		retcode = -EBADF;
1898		goto err_i1;
1899	}
1900
1901	/* Do not trust userspace, use our own definition */
1902	func = ioctl->func;
1903
1904	if (unlikely(!func)) {
1905		dev_dbg(kfd_device, "no function\n");
1906		retcode = -EINVAL;
1907		goto err_i1;
1908	}
1909
 
 
 
 
 
 
 
 
 
 
 
 
 
1910	if (cmd & (IOC_IN | IOC_OUT)) {
1911		if (asize <= sizeof(stack_kdata)) {
1912			kdata = stack_kdata;
1913		} else {
1914			kdata = kmalloc(asize, GFP_KERNEL);
1915			if (!kdata) {
1916				retcode = -ENOMEM;
1917				goto err_i1;
1918			}
1919		}
1920		if (asize > usize)
1921			memset(kdata + usize, 0, asize - usize);
1922	}
1923
1924	if (cmd & IOC_IN) {
1925		if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
1926			retcode = -EFAULT;
1927			goto err_i1;
1928		}
1929	} else if (cmd & IOC_OUT) {
1930		memset(kdata, 0, usize);
1931	}
1932
1933	retcode = func(filep, process, kdata);
1934
1935	if (cmd & IOC_OUT)
1936		if (copy_to_user((void __user *)arg, kdata, usize) != 0)
1937			retcode = -EFAULT;
1938
1939err_i1:
1940	if (!ioctl)
1941		dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
1942			  task_pid_nr(current), cmd, nr);
1943
1944	if (kdata != stack_kdata)
1945		kfree(kdata);
1946
1947	if (retcode)
1948		dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
1949				nr, arg, retcode);
1950
1951	return retcode;
1952}
1953
1954static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
1955		      struct vm_area_struct *vma)
1956{
1957	phys_addr_t address;
1958	int ret;
1959
1960	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1961		return -EINVAL;
1962
1963	address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
 
 
 
1964
1965	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
1966				VM_DONTDUMP | VM_PFNMAP;
1967
1968	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1969
1970	pr_debug("pasid 0x%x mapping mmio page\n"
1971		 "     target user address == 0x%08llX\n"
1972		 "     physical address    == 0x%08llX\n"
1973		 "     vm_flags            == 0x%04lX\n"
1974		 "     size                == 0x%04lX\n",
1975		 process->pasid, (unsigned long long) vma->vm_start,
1976		 address, vma->vm_flags, PAGE_SIZE);
1977
1978	ret = io_remap_pfn_range(vma,
1979				vma->vm_start,
1980				address >> PAGE_SHIFT,
1981				PAGE_SIZE,
1982				vma->vm_page_prot);
1983	return ret;
1984}
1985
1986
1987static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
1988{
1989	struct kfd_process *process;
1990	struct kfd_dev *dev = NULL;
1991	unsigned long mmap_offset;
1992	unsigned int gpu_id;
1993
1994	process = kfd_get_process(current);
1995	if (IS_ERR(process))
1996		return PTR_ERR(process);
1997
1998	mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
1999	gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
2000	if (gpu_id)
2001		dev = kfd_device_by_id(gpu_id);
2002
2003	switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
2004	case KFD_MMAP_TYPE_DOORBELL:
2005		if (!dev)
2006			return -ENODEV;
2007		return kfd_doorbell_mmap(dev, process, vma);
2008
2009	case KFD_MMAP_TYPE_EVENTS:
2010		return kfd_event_mmap(process, vma);
2011
2012	case KFD_MMAP_TYPE_RESERVED_MEM:
2013		if (!dev)
2014			return -ENODEV;
2015		return kfd_reserved_mem_mmap(dev, process, vma);
2016	case KFD_MMAP_TYPE_MMIO:
2017		if (!dev)
2018			return -ENODEV;
2019		return kfd_mmio_mmap(dev, process, vma);
2020	}
2021
2022	return -EFAULT;
2023}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice shall be included in
  13 * all copies or substantial portions of the Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21 * OTHER DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/device.h>
  25#include <linux/export.h>
  26#include <linux/err.h>
  27#include <linux/fs.h>
  28#include <linux/file.h>
  29#include <linux/sched.h>
  30#include <linux/slab.h>
  31#include <linux/uaccess.h>
  32#include <linux/compat.h>
  33#include <uapi/linux/kfd_ioctl.h>
  34#include <linux/time.h>
  35#include <linux/mm.h>
  36#include <linux/mman.h>
  37#include <linux/ptrace.h>
  38#include <linux/dma-buf.h>
  39#include <linux/processor.h>
  40#include "kfd_priv.h"
  41#include "kfd_device_queue_manager.h"
  42#include "kfd_svm.h"
  43#include "amdgpu_amdkfd.h"
  44#include "kfd_smi_events.h"
  45#include "amdgpu_dma_buf.h"
  46#include "kfd_debug.h"
  47
  48static long kfd_ioctl(struct file *, unsigned int, unsigned long);
  49static int kfd_open(struct inode *, struct file *);
  50static int kfd_release(struct inode *, struct file *);
  51static int kfd_mmap(struct file *, struct vm_area_struct *);
  52
  53static const char kfd_dev_name[] = "kfd";
  54
  55static const struct file_operations kfd_fops = {
  56	.owner = THIS_MODULE,
  57	.unlocked_ioctl = kfd_ioctl,
  58	.compat_ioctl = compat_ptr_ioctl,
  59	.open = kfd_open,
  60	.release = kfd_release,
  61	.mmap = kfd_mmap,
  62};
  63
  64static int kfd_char_dev_major = -1;
 
  65struct device *kfd_device;
  66static const struct class kfd_class = {
  67	.name = kfd_dev_name,
  68};
  69
  70static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
  71{
  72	struct kfd_process_device *pdd;
  73
  74	mutex_lock(&p->mutex);
  75	pdd = kfd_process_device_data_by_id(p, gpu_id);
  76
  77	if (pdd)
  78		return pdd;
  79
  80	mutex_unlock(&p->mutex);
  81	return NULL;
  82}
  83
  84static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
  85{
  86	mutex_unlock(&pdd->process->mutex);
  87}
  88
  89int kfd_chardev_init(void)
  90{
  91	int err = 0;
  92
  93	kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
  94	err = kfd_char_dev_major;
  95	if (err < 0)
  96		goto err_register_chrdev;
  97
  98	err = class_register(&kfd_class);
  99	if (err)
 
 100		goto err_class_create;
 101
 102	kfd_device = device_create(&kfd_class, NULL,
 103				   MKDEV(kfd_char_dev_major, 0),
 104				   NULL, kfd_dev_name);
 105	err = PTR_ERR(kfd_device);
 106	if (IS_ERR(kfd_device))
 107		goto err_device_create;
 108
 109	return 0;
 110
 111err_device_create:
 112	class_unregister(&kfd_class);
 113err_class_create:
 114	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
 115err_register_chrdev:
 116	return err;
 117}
 118
 119void kfd_chardev_exit(void)
 120{
 121	device_destroy(&kfd_class, MKDEV(kfd_char_dev_major, 0));
 122	class_unregister(&kfd_class);
 123	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
 124	kfd_device = NULL;
 
 
 
 
 125}
 126
 127
 128static int kfd_open(struct inode *inode, struct file *filep)
 129{
 130	struct kfd_process *process;
 131	bool is_32bit_user_mode;
 132
 133	if (iminor(inode) != 0)
 134		return -ENODEV;
 135
 136	is_32bit_user_mode = in_compat_syscall();
 137
 138	if (is_32bit_user_mode) {
 139		dev_warn(kfd_device,
 140			"Process %d (32-bit) failed to open /dev/kfd\n"
 141			"32-bit processes are not supported by amdkfd\n",
 142			current->pid);
 143		return -EPERM;
 144	}
 145
 146	process = kfd_create_process(current);
 147	if (IS_ERR(process))
 148		return PTR_ERR(process);
 149
 150	if (kfd_process_init_cwsr_apu(process, filep)) {
 
 
 151		kfd_unref_process(process);
 152		return -EFAULT;
 153	}
 154
 155	/* filep now owns the reference returned by kfd_create_process */
 156	filep->private_data = process;
 157
 158	dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
 159		process->pasid, process->is_32bit_user_mode);
 160
 161	return 0;
 162}
 163
 164static int kfd_release(struct inode *inode, struct file *filep)
 165{
 166	struct kfd_process *process = filep->private_data;
 167
 168	if (process)
 169		kfd_unref_process(process);
 170
 171	return 0;
 172}
 173
 174static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
 175					void *data)
 176{
 177	struct kfd_ioctl_get_version_args *args = data;
 178
 179	args->major_version = KFD_IOCTL_MAJOR_VERSION;
 180	args->minor_version = KFD_IOCTL_MINOR_VERSION;
 181
 182	return 0;
 183}
 184
 185static int set_queue_properties_from_user(struct queue_properties *q_properties,
 186				struct kfd_ioctl_create_queue_args *args)
 187{
 188	/*
 189	 * Repurpose queue percentage to accommodate new features:
 190	 * bit 0-7: queue percentage
 191	 * bit 8-15: pm4_target_xcc
 192	 */
 193	if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
 194		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
 195		return -EINVAL;
 196	}
 197
 198	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
 199		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
 200		return -EINVAL;
 201	}
 202
 203	if ((args->ring_base_address) &&
 204		(!access_ok((const void __user *) args->ring_base_address,
 205			sizeof(uint64_t)))) {
 206		pr_err("Can't access ring base address\n");
 207		return -EFAULT;
 208	}
 209
 210	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
 211		pr_err("Ring size must be a power of 2 or 0\n");
 212		return -EINVAL;
 213	}
 214
 215	if (!access_ok((const void __user *) args->read_pointer_address,
 216			sizeof(uint32_t))) {
 217		pr_err("Can't access read pointer\n");
 218		return -EFAULT;
 219	}
 220
 221	if (!access_ok((const void __user *) args->write_pointer_address,
 222			sizeof(uint32_t))) {
 223		pr_err("Can't access write pointer\n");
 224		return -EFAULT;
 225	}
 226
 227	if (args->eop_buffer_address &&
 228		!access_ok((const void __user *) args->eop_buffer_address,
 229			sizeof(uint32_t))) {
 230		pr_debug("Can't access eop buffer");
 231		return -EFAULT;
 232	}
 233
 234	if (args->ctx_save_restore_address &&
 235		!access_ok((const void __user *) args->ctx_save_restore_address,
 236			sizeof(uint32_t))) {
 237		pr_debug("Can't access ctx save restore buffer");
 238		return -EFAULT;
 239	}
 240
 241	q_properties->is_interop = false;
 242	q_properties->is_gws = false;
 243	q_properties->queue_percent = args->queue_percentage & 0xFF;
 244	/* bit 8-15 are repurposed to be PM4 target XCC */
 245	q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
 246	q_properties->priority = args->queue_priority;
 247	q_properties->queue_address = args->ring_base_address;
 248	q_properties->queue_size = args->ring_size;
 249	q_properties->read_ptr = (void __user *)args->read_pointer_address;
 250	q_properties->write_ptr = (void __user *)args->write_pointer_address;
 251	q_properties->eop_ring_buffer_address = args->eop_buffer_address;
 252	q_properties->eop_ring_buffer_size = args->eop_buffer_size;
 253	q_properties->ctx_save_restore_area_address =
 254			args->ctx_save_restore_address;
 255	q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
 256	q_properties->ctl_stack_size = args->ctl_stack_size;
 257	q_properties->sdma_engine_id = args->sdma_engine_id;
 258	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
 259		args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
 260		q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
 261	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
 262		q_properties->type = KFD_QUEUE_TYPE_SDMA;
 263	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
 264		q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
 265	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID)
 266		q_properties->type = KFD_QUEUE_TYPE_SDMA_BY_ENG_ID;
 267	else
 268		return -ENOTSUPP;
 269
 270	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
 271		q_properties->format = KFD_QUEUE_FORMAT_AQL;
 272	else
 273		q_properties->format = KFD_QUEUE_FORMAT_PM4;
 274
 275	pr_debug("Queue Percentage: %d, %d\n",
 276			q_properties->queue_percent, args->queue_percentage);
 277
 278	pr_debug("Queue Priority: %d, %d\n",
 279			q_properties->priority, args->queue_priority);
 280
 281	pr_debug("Queue Address: 0x%llX, 0x%llX\n",
 282			q_properties->queue_address, args->ring_base_address);
 283
 284	pr_debug("Queue Size: 0x%llX, %u\n",
 285			q_properties->queue_size, args->ring_size);
 286
 287	pr_debug("Queue r/w Pointers: %px, %px\n",
 288			q_properties->read_ptr,
 289			q_properties->write_ptr);
 290
 291	pr_debug("Queue Format: %d\n", q_properties->format);
 292
 293	pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
 294
 295	pr_debug("Queue CTX save area: 0x%llX\n",
 296			q_properties->ctx_save_restore_area_address);
 297
 298	return 0;
 299}
 300
 301static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 302					void *data)
 303{
 304	struct kfd_ioctl_create_queue_args *args = data;
 305	struct kfd_node *dev;
 306	int err = 0;
 307	unsigned int queue_id;
 308	struct kfd_process_device *pdd;
 309	struct queue_properties q_properties;
 310	uint32_t doorbell_offset_in_process = 0;
 311
 312	memset(&q_properties, 0, sizeof(struct queue_properties));
 313
 314	pr_debug("Creating queue ioctl\n");
 315
 316	err = set_queue_properties_from_user(&q_properties, args);
 317	if (err)
 318		return err;
 319
 320	pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
 
 
 
 
 
 321
 322	mutex_lock(&p->mutex);
 323
 324	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
 325	if (!pdd) {
 326		pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
 327		err = -EINVAL;
 328		goto err_pdd;
 329	}
 330	dev = pdd->dev;
 331
 332	pdd = kfd_bind_process_to_device(dev, p);
 333	if (IS_ERR(pdd)) {
 334		err = -ESRCH;
 335		goto err_bind_process;
 336	}
 337
 338	if (q_properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
 339		int max_sdma_eng_id = kfd_get_num_sdma_engines(dev) +
 340				      kfd_get_num_xgmi_sdma_engines(dev) - 1;
 341
 342		if (q_properties.sdma_engine_id > max_sdma_eng_id) {
 343			err = -EINVAL;
 344			pr_err("sdma_engine_id %i exceeds maximum id of %i\n",
 345			       q_properties.sdma_engine_id, max_sdma_eng_id);
 346			goto err_sdma_engine_id;
 347		}
 348	}
 349
 350	if (!pdd->qpd.proc_doorbells) {
 351		err = kfd_alloc_process_doorbells(dev->kfd, pdd);
 352		if (err) {
 353			pr_debug("failed to allocate process doorbells\n");
 354			goto err_bind_process;
 355		}
 356	}
 357
 358	err = kfd_queue_acquire_buffers(pdd, &q_properties);
 359	if (err) {
 360		pr_debug("failed to acquire user queue buffers\n");
 361		goto err_acquire_queue_buf;
 362	}
 363
 364	pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
 365			p->pasid,
 366			dev->id);
 367
 368	err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id,
 369			NULL, NULL, NULL, &doorbell_offset_in_process);
 370	if (err != 0)
 371		goto err_create_queue;
 372
 373	args->queue_id = queue_id;
 374
 375
 376	/* Return gpu_id as doorbell offset for mmap usage */
 377	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
 378	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
 379	if (KFD_IS_SOC15(dev))
 380		/* On SOC15 ASICs, include the doorbell offset within the
 381		 * process doorbell frame, which is 2 pages.
 382		 */
 383		args->doorbell_offset |= doorbell_offset_in_process;
 384
 385	mutex_unlock(&p->mutex);
 386
 387	pr_debug("Queue id %d was created successfully\n", args->queue_id);
 388
 389	pr_debug("Ring buffer address == 0x%016llX\n",
 390			args->ring_base_address);
 391
 392	pr_debug("Read ptr address    == 0x%016llX\n",
 393			args->read_pointer_address);
 394
 395	pr_debug("Write ptr address   == 0x%016llX\n",
 396			args->write_pointer_address);
 397
 398	kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0);
 399	return 0;
 400
 401err_create_queue:
 402	kfd_queue_unref_bo_vas(pdd, &q_properties);
 403	kfd_queue_release_buffers(pdd, &q_properties);
 404err_acquire_queue_buf:
 405err_sdma_engine_id:
 406err_bind_process:
 407err_pdd:
 408	mutex_unlock(&p->mutex);
 409	return err;
 410}
 411
 412static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
 413					void *data)
 414{
 415	int retval;
 416	struct kfd_ioctl_destroy_queue_args *args = data;
 417
 418	pr_debug("Destroying queue id %d for pasid 0x%x\n",
 419				args->queue_id,
 420				p->pasid);
 421
 422	mutex_lock(&p->mutex);
 423
 424	retval = pqm_destroy_queue(&p->pqm, args->queue_id);
 425
 426	mutex_unlock(&p->mutex);
 427	return retval;
 428}
 429
 430static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
 431					void *data)
 432{
 433	int retval;
 434	struct kfd_ioctl_update_queue_args *args = data;
 435	struct queue_properties properties;
 436
 437	/*
 438	 * Repurpose queue percentage to accommodate new features:
 439	 * bit 0-7: queue percentage
 440	 * bit 8-15: pm4_target_xcc
 441	 */
 442	if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
 443		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
 444		return -EINVAL;
 445	}
 446
 447	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
 448		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
 449		return -EINVAL;
 450	}
 451
 452	if ((args->ring_base_address) &&
 453		(!access_ok((const void __user *) args->ring_base_address,
 454			sizeof(uint64_t)))) {
 455		pr_err("Can't access ring base address\n");
 456		return -EFAULT;
 457	}
 458
 459	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
 460		pr_err("Ring size must be a power of 2 or 0\n");
 461		return -EINVAL;
 462	}
 463
 464	properties.queue_address = args->ring_base_address;
 465	properties.queue_size = args->ring_size;
 466	properties.queue_percent = args->queue_percentage & 0xFF;
 467	/* bit 8-15 are repurposed to be PM4 target XCC */
 468	properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
 469	properties.priority = args->queue_priority;
 470
 471	pr_debug("Updating queue id %d for pasid 0x%x\n",
 472			args->queue_id, p->pasid);
 473
 474	mutex_lock(&p->mutex);
 475
 476	retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
 477
 478	mutex_unlock(&p->mutex);
 479
 480	return retval;
 481}
 482
 483static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
 484					void *data)
 485{
 486	int retval;
 487	const int max_num_cus = 1024;
 488	struct kfd_ioctl_set_cu_mask_args *args = data;
 489	struct mqd_update_info minfo = {0};
 490	uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
 491	size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
 492
 493	if ((args->num_cu_mask % 32) != 0) {
 494		pr_debug("num_cu_mask 0x%x must be a multiple of 32",
 495				args->num_cu_mask);
 496		return -EINVAL;
 497	}
 498
 499	minfo.cu_mask.count = args->num_cu_mask;
 500	if (minfo.cu_mask.count == 0) {
 501		pr_debug("CU mask cannot be 0");
 502		return -EINVAL;
 503	}
 504
 505	/* To prevent an unreasonably large CU mask size, set an arbitrary
 506	 * limit of max_num_cus bits.  We can then just drop any CU mask bits
 507	 * past max_num_cus bits and just use the first max_num_cus bits.
 508	 */
 509	if (minfo.cu_mask.count > max_num_cus) {
 510		pr_debug("CU mask cannot be greater than 1024 bits");
 511		minfo.cu_mask.count = max_num_cus;
 512		cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
 513	}
 514
 515	minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
 516	if (!minfo.cu_mask.ptr)
 517		return -ENOMEM;
 518
 519	retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
 520	if (retval) {
 521		pr_debug("Could not copy CU mask from userspace");
 522		retval = -EFAULT;
 523		goto out;
 524	}
 525
 526	mutex_lock(&p->mutex);
 527
 528	retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
 529
 530	mutex_unlock(&p->mutex);
 531
 532out:
 533	kfree(minfo.cu_mask.ptr);
 
 534	return retval;
 535}
 536
 537static int kfd_ioctl_get_queue_wave_state(struct file *filep,
 538					  struct kfd_process *p, void *data)
 539{
 540	struct kfd_ioctl_get_queue_wave_state_args *args = data;
 541	int r;
 542
 543	mutex_lock(&p->mutex);
 544
 545	r = pqm_get_wave_state(&p->pqm, args->queue_id,
 546			       (void __user *)args->ctl_stack_address,
 547			       &args->ctl_stack_used_size,
 548			       &args->save_area_used_size);
 549
 550	mutex_unlock(&p->mutex);
 551
 552	return r;
 553}
 554
 555static int kfd_ioctl_set_memory_policy(struct file *filep,
 556					struct kfd_process *p, void *data)
 557{
 558	struct kfd_ioctl_set_memory_policy_args *args = data;
 
 559	int err = 0;
 560	struct kfd_process_device *pdd;
 561	enum cache_policy default_policy, alternate_policy;
 562
 563	if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
 564	    && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
 565		return -EINVAL;
 566	}
 567
 568	if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
 569	    && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
 570		return -EINVAL;
 571	}
 572
 
 
 
 
 573	mutex_lock(&p->mutex);
 574	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
 575	if (!pdd) {
 576		pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
 577		err = -EINVAL;
 578		goto err_pdd;
 579	}
 580
 581	pdd = kfd_bind_process_to_device(pdd->dev, p);
 582	if (IS_ERR(pdd)) {
 583		err = -ESRCH;
 584		goto out;
 585	}
 586
 587	default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
 588			 ? cache_policy_coherent : cache_policy_noncoherent;
 589
 590	alternate_policy =
 591		(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
 592		   ? cache_policy_coherent : cache_policy_noncoherent;
 593
 594	if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
 595				&pdd->qpd,
 596				default_policy,
 597				alternate_policy,
 598				(void __user *)args->alternate_aperture_base,
 599				args->alternate_aperture_size))
 600		err = -EINVAL;
 601
 602out:
 603err_pdd:
 604	mutex_unlock(&p->mutex);
 605
 606	return err;
 607}
 608
 609static int kfd_ioctl_set_trap_handler(struct file *filep,
 610					struct kfd_process *p, void *data)
 611{
 612	struct kfd_ioctl_set_trap_handler_args *args = data;
 
 613	int err = 0;
 614	struct kfd_process_device *pdd;
 615
 
 
 
 
 616	mutex_lock(&p->mutex);
 617
 618	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
 619	if (!pdd) {
 620		err = -EINVAL;
 621		goto err_pdd;
 622	}
 623
 624	pdd = kfd_bind_process_to_device(pdd->dev, p);
 625	if (IS_ERR(pdd)) {
 626		err = -ESRCH;
 627		goto out;
 628	}
 629
 630	kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
 
 
 
 
 631
 632out:
 633err_pdd:
 634	mutex_unlock(&p->mutex);
 635
 636	return err;
 637}
 638
 639static int kfd_ioctl_dbg_register(struct file *filep,
 640				struct kfd_process *p, void *data)
 641{
 642	return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643}
 644
 645static int kfd_ioctl_dbg_unregister(struct file *filep,
 646				struct kfd_process *p, void *data)
 647{
 648	return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649}
 650
 
 
 
 
 
 
 
 
 
 651static int kfd_ioctl_dbg_address_watch(struct file *filep,
 652					struct kfd_process *p, void *data)
 653{
 654	return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655}
 656
 657/* Parse and generate fixed size data structure for wave control */
 658static int kfd_ioctl_dbg_wave_control(struct file *filep,
 659					struct kfd_process *p, void *data)
 660{
 661	return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662}
 663
 664static int kfd_ioctl_get_clock_counters(struct file *filep,
 665				struct kfd_process *p, void *data)
 666{
 667	struct kfd_ioctl_get_clock_counters_args *args = data;
 668	struct kfd_process_device *pdd;
 669
 670	mutex_lock(&p->mutex);
 671	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
 672	mutex_unlock(&p->mutex);
 673	if (pdd)
 674		/* Reading GPU clock counter from KGD */
 675		args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
 676	else
 677		/* Node without GPU resource */
 678		args->gpu_clock_counter = 0;
 679
 680	/* No access to rdtsc. Using raw monotonic time */
 681	args->cpu_clock_counter = ktime_get_raw_ns();
 682	args->system_clock_counter = ktime_get_boottime_ns();
 683
 684	/* Since the counter is in nano-seconds we use 1GHz frequency */
 685	args->system_clock_freq = 1000000000;
 686
 687	return 0;
 688}
 689
 690
 691static int kfd_ioctl_get_process_apertures(struct file *filp,
 692				struct kfd_process *p, void *data)
 693{
 694	struct kfd_ioctl_get_process_apertures_args *args = data;
 695	struct kfd_process_device_apertures *pAperture;
 696	int i;
 697
 698	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 699
 700	args->num_of_nodes = 0;
 701
 702	mutex_lock(&p->mutex);
 703	/* Run over all pdd of the process */
 704	for (i = 0; i < p->n_pdds; i++) {
 705		struct kfd_process_device *pdd = p->pdds[i];
 706
 707		pAperture =
 708			&args->process_apertures[args->num_of_nodes];
 709		pAperture->gpu_id = pdd->dev->id;
 710		pAperture->lds_base = pdd->lds_base;
 711		pAperture->lds_limit = pdd->lds_limit;
 712		pAperture->gpuvm_base = pdd->gpuvm_base;
 713		pAperture->gpuvm_limit = pdd->gpuvm_limit;
 714		pAperture->scratch_base = pdd->scratch_base;
 715		pAperture->scratch_limit = pdd->scratch_limit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 717		dev_dbg(kfd_device,
 718			"node id %u\n", args->num_of_nodes);
 719		dev_dbg(kfd_device,
 720			"gpu id %u\n", pdd->dev->id);
 721		dev_dbg(kfd_device,
 722			"lds_base %llX\n", pdd->lds_base);
 723		dev_dbg(kfd_device,
 724			"lds_limit %llX\n", pdd->lds_limit);
 725		dev_dbg(kfd_device,
 726			"gpuvm_base %llX\n", pdd->gpuvm_base);
 727		dev_dbg(kfd_device,
 728			"gpuvm_limit %llX\n", pdd->gpuvm_limit);
 729		dev_dbg(kfd_device,
 730			"scratch_base %llX\n", pdd->scratch_base);
 731		dev_dbg(kfd_device,
 732			"scratch_limit %llX\n", pdd->scratch_limit);
 733
 734		if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
 735			break;
 736	}
 
 737	mutex_unlock(&p->mutex);
 738
 739	return 0;
 740}
 741
 742static int kfd_ioctl_get_process_apertures_new(struct file *filp,
 743				struct kfd_process *p, void *data)
 744{
 745	struct kfd_ioctl_get_process_apertures_new_args *args = data;
 746	struct kfd_process_device_apertures *pa;
 
 
 747	int ret;
 748	int i;
 749
 750	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 751
 752	if (args->num_of_nodes == 0) {
 753		/* Return number of nodes, so that user space can alloacate
 754		 * sufficient memory
 755		 */
 756		mutex_lock(&p->mutex);
 757		args->num_of_nodes = p->n_pdds;
 
 
 
 
 
 
 
 
 
 
 758		goto out_unlock;
 759	}
 760
 761	/* Fill in process-aperture information for all available
 762	 * nodes, but not more than args->num_of_nodes as that is
 763	 * the amount of memory allocated by user
 764	 */
 765	pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
 766		     GFP_KERNEL);
 767	if (!pa)
 768		return -ENOMEM;
 769
 770	mutex_lock(&p->mutex);
 771
 772	if (!p->n_pdds) {
 773		args->num_of_nodes = 0;
 774		kfree(pa);
 775		goto out_unlock;
 776	}
 777
 778	/* Run over all pdd of the process */
 779	for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
 780		struct kfd_process_device *pdd = p->pdds[i];
 781
 782		pa[i].gpu_id = pdd->dev->id;
 783		pa[i].lds_base = pdd->lds_base;
 784		pa[i].lds_limit = pdd->lds_limit;
 785		pa[i].gpuvm_base = pdd->gpuvm_base;
 786		pa[i].gpuvm_limit = pdd->gpuvm_limit;
 787		pa[i].scratch_base = pdd->scratch_base;
 788		pa[i].scratch_limit = pdd->scratch_limit;
 789
 790		dev_dbg(kfd_device,
 791			"gpu id %u\n", pdd->dev->id);
 792		dev_dbg(kfd_device,
 793			"lds_base %llX\n", pdd->lds_base);
 794		dev_dbg(kfd_device,
 795			"lds_limit %llX\n", pdd->lds_limit);
 796		dev_dbg(kfd_device,
 797			"gpuvm_base %llX\n", pdd->gpuvm_base);
 798		dev_dbg(kfd_device,
 799			"gpuvm_limit %llX\n", pdd->gpuvm_limit);
 800		dev_dbg(kfd_device,
 801			"scratch_base %llX\n", pdd->scratch_base);
 802		dev_dbg(kfd_device,
 803			"scratch_limit %llX\n", pdd->scratch_limit);
 804	}
 
 
 
 805	mutex_unlock(&p->mutex);
 806
 807	args->num_of_nodes = i;
 808	ret = copy_to_user(
 809			(void __user *)args->kfd_process_device_apertures_ptr,
 810			pa,
 811			(i * sizeof(struct kfd_process_device_apertures)));
 812	kfree(pa);
 813	return ret ? -EFAULT : 0;
 814
 815out_unlock:
 816	mutex_unlock(&p->mutex);
 817	return 0;
 818}
 819
 820static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
 821					void *data)
 822{
 823	struct kfd_ioctl_create_event_args *args = data;
 824	int err;
 825
 826	/* For dGPUs the event page is allocated in user mode. The
 827	 * handle is passed to KFD with the first call to this IOCTL
 828	 * through the event_page_offset field.
 829	 */
 830	if (args->event_page_offset) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 831		mutex_lock(&p->mutex);
 832		err = kfd_kmap_event_page(p, args->event_page_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 833		mutex_unlock(&p->mutex);
 834		if (err)
 
 
 
 
 835			return err;
 
 
 
 
 
 
 
 836	}
 837
 838	err = kfd_event_create(filp, p, args->event_type,
 839				args->auto_reset != 0, args->node_id,
 840				&args->event_id, &args->event_trigger_data,
 841				&args->event_page_offset,
 842				&args->event_slot_index);
 843
 844	pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
 
 
 
 845	return err;
 846}
 847
 848static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
 849					void *data)
 850{
 851	struct kfd_ioctl_destroy_event_args *args = data;
 852
 853	return kfd_event_destroy(p, args->event_id);
 854}
 855
 856static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
 857				void *data)
 858{
 859	struct kfd_ioctl_set_event_args *args = data;
 860
 861	return kfd_set_event(p, args->event_id);
 862}
 863
 864static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
 865				void *data)
 866{
 867	struct kfd_ioctl_reset_event_args *args = data;
 868
 869	return kfd_reset_event(p, args->event_id);
 870}
 871
 872static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
 873				void *data)
 874{
 875	struct kfd_ioctl_wait_events_args *args = data;
 
 876
 877	return kfd_wait_on_events(p, args->num_events,
 878			(void __user *)args->events_ptr,
 879			(args->wait_for_all != 0),
 880			&args->timeout, &args->wait_result);
 
 
 881}
 882static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
 883					struct kfd_process *p, void *data)
 884{
 885	struct kfd_ioctl_set_scratch_backing_va_args *args = data;
 886	struct kfd_process_device *pdd;
 887	struct kfd_node *dev;
 888	long err;
 889
 
 
 
 
 890	mutex_lock(&p->mutex);
 891	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
 892	if (!pdd) {
 893		err = -EINVAL;
 894		goto err_pdd;
 895	}
 896	dev = pdd->dev;
 897
 898	pdd = kfd_bind_process_to_device(dev, p);
 899	if (IS_ERR(pdd)) {
 900		err = PTR_ERR(pdd);
 901		goto bind_process_to_device_fail;
 902	}
 903
 904	pdd->qpd.sh_hidden_private_base = args->va_addr;
 905
 906	mutex_unlock(&p->mutex);
 907
 908	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
 909	    pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
 910		dev->kfd2kgd->set_scratch_backing_va(
 911			dev->adev, args->va_addr, pdd->qpd.vmid);
 912
 913	return 0;
 914
 915bind_process_to_device_fail:
 916err_pdd:
 917	mutex_unlock(&p->mutex);
 918	return err;
 919}
 920
 921static int kfd_ioctl_get_tile_config(struct file *filep,
 922		struct kfd_process *p, void *data)
 923{
 924	struct kfd_ioctl_get_tile_config_args *args = data;
 925	struct kfd_process_device *pdd;
 926	struct tile_config config;
 927	int err = 0;
 928
 929	mutex_lock(&p->mutex);
 930	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
 931	mutex_unlock(&p->mutex);
 932	if (!pdd)
 933		return -EINVAL;
 934
 935	amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
 936
 937	args->gb_addr_config = config.gb_addr_config;
 938	args->num_banks = config.num_banks;
 939	args->num_ranks = config.num_ranks;
 940
 941	if (args->num_tile_configs > config.num_tile_configs)
 942		args->num_tile_configs = config.num_tile_configs;
 943	err = copy_to_user((void __user *)args->tile_config_ptr,
 944			config.tile_config_ptr,
 945			args->num_tile_configs * sizeof(uint32_t));
 946	if (err) {
 947		args->num_tile_configs = 0;
 948		return -EFAULT;
 949	}
 950
 951	if (args->num_macro_tile_configs > config.num_macro_tile_configs)
 952		args->num_macro_tile_configs =
 953				config.num_macro_tile_configs;
 954	err = copy_to_user((void __user *)args->macro_tile_config_ptr,
 955			config.macro_tile_config_ptr,
 956			args->num_macro_tile_configs * sizeof(uint32_t));
 957	if (err) {
 958		args->num_macro_tile_configs = 0;
 959		return -EFAULT;
 960	}
 961
 962	return 0;
 963}
 964
 965static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
 966				void *data)
 967{
 968	struct kfd_ioctl_acquire_vm_args *args = data;
 969	struct kfd_process_device *pdd;
 
 970	struct file *drm_file;
 971	int ret;
 972
 
 
 
 
 973	drm_file = fget(args->drm_fd);
 974	if (!drm_file)
 975		return -EINVAL;
 976
 977	mutex_lock(&p->mutex);
 978	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
 
 979	if (!pdd) {
 980		ret = -EINVAL;
 981		goto err_pdd;
 982	}
 983
 984	if (pdd->drm_file) {
 985		ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
 986		goto err_drm_file;
 987	}
 988
 989	ret = kfd_process_device_init_vm(pdd, drm_file);
 990	if (ret)
 991		goto err_unlock;
 992
 993	/* On success, the PDD keeps the drm_file reference */
 994	mutex_unlock(&p->mutex);
 995
 996	return 0;
 997
 998err_unlock:
 999err_pdd:
1000err_drm_file:
1001	mutex_unlock(&p->mutex);
1002	fput(drm_file);
1003	return ret;
1004}
1005
1006bool kfd_dev_is_large_bar(struct kfd_node *dev)
1007{
1008	if (dev->kfd->adev->debug_largebar) {
 
 
1009		pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1010		return true;
1011	}
1012
1013	if (dev->local_mem_info.local_mem_size_private == 0 &&
1014	    dev->local_mem_info.local_mem_size_public > 0)
1015		return true;
1016
1017	if (dev->local_mem_info.local_mem_size_public == 0 &&
1018	    dev->kfd->adev->gmc.is_app_apu) {
1019		pr_debug("APP APU, Consider like a large bar system\n");
1020		return true;
1021	}
1022
1023	return false;
1024}
1025
1026static int kfd_ioctl_get_available_memory(struct file *filep,
1027					  struct kfd_process *p, void *data)
1028{
1029	struct kfd_ioctl_get_available_memory_args *args = data;
1030	struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1031
1032	if (!pdd)
1033		return -EINVAL;
1034	args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
1035							pdd->dev->node_id);
1036	kfd_unlock_pdd(pdd);
1037	return 0;
1038}
1039
1040static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1041					struct kfd_process *p, void *data)
1042{
1043	struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1044	struct kfd_process_device *pdd;
1045	void *mem;
1046	struct kfd_node *dev;
1047	int idr_handle;
1048	long err;
1049	uint64_t offset = args->mmap_offset;
1050	uint32_t flags = args->flags;
1051
1052	if (args->size == 0)
1053		return -EINVAL;
1054
1055#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1056	/* Flush pending deferred work to avoid racing with deferred actions
1057	 * from previous memory map changes (e.g. munmap).
1058	 */
1059	svm_range_list_lock_and_flush_work(&p->svms, current->mm);
1060	mutex_lock(&p->svms.lock);
1061	mmap_write_unlock(current->mm);
1062	if (interval_tree_iter_first(&p->svms.objects,
1063				     args->va_addr >> PAGE_SHIFT,
1064				     (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1065		pr_err("Address: 0x%llx already allocated by SVM\n",
1066			args->va_addr);
1067		mutex_unlock(&p->svms.lock);
1068		return -EADDRINUSE;
1069	}
1070
1071	/* When register user buffer check if it has been registered by svm by
1072	 * buffer cpu virtual address.
1073	 */
1074	if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
1075	    interval_tree_iter_first(&p->svms.objects,
1076				     args->mmap_offset >> PAGE_SHIFT,
1077				     (args->mmap_offset  + args->size - 1) >> PAGE_SHIFT)) {
1078		pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
1079			args->mmap_offset);
1080		mutex_unlock(&p->svms.lock);
1081		return -EADDRINUSE;
1082	}
1083
1084	mutex_unlock(&p->svms.lock);
1085#endif
1086	mutex_lock(&p->mutex);
1087	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1088	if (!pdd) {
1089		err = -EINVAL;
1090		goto err_pdd;
1091	}
1092
1093	dev = pdd->dev;
1094
1095	if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1096		(flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1097		!kfd_dev_is_large_bar(dev)) {
1098		pr_err("Alloc host visible vram on small bar is not allowed\n");
1099		err = -EINVAL;
1100		goto err_large_bar;
 
 
 
 
 
 
 
 
 
 
 
1101	}
1102
 
 
1103	pdd = kfd_bind_process_to_device(dev, p);
1104	if (IS_ERR(pdd)) {
1105		err = PTR_ERR(pdd);
1106		goto err_unlock;
1107	}
1108
1109	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1110		if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
1111			err = -EINVAL;
1112			goto err_unlock;
1113		}
1114		offset = kfd_get_process_doorbells(pdd);
1115		if (!offset) {
1116			err = -ENOMEM;
1117			goto err_unlock;
1118		}
1119	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1120		if (args->size != PAGE_SIZE) {
1121			err = -EINVAL;
1122			goto err_unlock;
1123		}
1124		offset = dev->adev->rmmio_remap.bus_addr;
1125		if (!offset || (PAGE_SIZE > 4096)) {
1126			err = -ENOMEM;
1127			goto err_unlock;
1128		}
1129	}
1130
1131	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1132		dev->adev, args->va_addr, args->size,
1133		pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1134		flags, false);
1135
1136	if (err)
1137		goto err_unlock;
1138
1139	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1140	if (idr_handle < 0) {
1141		err = -EFAULT;
1142		goto err_free;
1143	}
1144
1145	/* Update the VRAM usage count */
1146	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1147		uint64_t size = args->size;
1148
1149		if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
1150			size >>= 1;
1151		atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
1152	}
1153
1154	mutex_unlock(&p->mutex);
1155
1156	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1157	args->mmap_offset = offset;
1158
1159	/* MMIO is mapped through kfd device
1160	 * Generate a kfd mmap offset
1161	 */
1162	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1163		args->mmap_offset = KFD_MMAP_TYPE_MMIO
1164					| KFD_MMAP_GPU_ID(args->gpu_id);
1165
1166	return 0;
1167
1168err_free:
1169	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
1170					       pdd->drm_priv, NULL);
1171err_unlock:
1172err_pdd:
1173err_large_bar:
1174	mutex_unlock(&p->mutex);
1175	return err;
1176}
1177
1178static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1179					struct kfd_process *p, void *data)
1180{
1181	struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1182	struct kfd_process_device *pdd;
1183	void *mem;
 
1184	int ret;
1185	uint64_t size = 0;
1186
 
 
 
 
1187	mutex_lock(&p->mutex);
1188	/*
1189	 * Safeguard to prevent user space from freeing signal BO.
1190	 * It will be freed at process termination.
1191	 */
1192	if (p->signal_handle && (p->signal_handle == args->handle)) {
1193		pr_err("Free signal BO is not allowed\n");
1194		ret = -EPERM;
1195		goto err_unlock;
1196	}
1197
1198	pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1199	if (!pdd) {
1200		pr_err("Process device data doesn't exist\n");
1201		ret = -EINVAL;
1202		goto err_pdd;
1203	}
1204
1205	mem = kfd_process_device_translate_handle(
1206		pdd, GET_IDR_HANDLE(args->handle));
1207	if (!mem) {
1208		ret = -EINVAL;
1209		goto err_unlock;
1210	}
1211
1212	ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1213				(struct kgd_mem *)mem, pdd->drm_priv, &size);
1214
1215	/* If freeing the buffer failed, leave the handle in place for
1216	 * clean-up during process tear-down.
1217	 */
1218	if (!ret)
1219		kfd_process_device_remove_obj_handle(
1220			pdd, GET_IDR_HANDLE(args->handle));
1221
1222	atomic64_sub(size, &pdd->vram_usage);
1223
1224err_unlock:
1225err_pdd:
1226	mutex_unlock(&p->mutex);
1227	return ret;
1228}
1229
1230static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1231					struct kfd_process *p, void *data)
1232{
1233	struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1234	struct kfd_process_device *pdd, *peer_pdd;
1235	void *mem;
1236	struct kfd_node *dev;
1237	long err = 0;
1238	int i;
1239	uint32_t *devices_arr = NULL;
1240
 
 
 
 
1241	if (!args->n_devices) {
1242		pr_debug("Device IDs array empty\n");
1243		return -EINVAL;
1244	}
1245	if (args->n_success > args->n_devices) {
1246		pr_debug("n_success exceeds n_devices\n");
1247		return -EINVAL;
1248	}
1249
1250	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1251				    GFP_KERNEL);
1252	if (!devices_arr)
1253		return -ENOMEM;
1254
1255	err = copy_from_user(devices_arr,
1256			     (void __user *)args->device_ids_array_ptr,
1257			     args->n_devices * sizeof(*devices_arr));
1258	if (err != 0) {
1259		err = -EFAULT;
1260		goto copy_from_user_failed;
1261	}
1262
1263	mutex_lock(&p->mutex);
1264	pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1265	if (!pdd) {
1266		err = -EINVAL;
1267		goto get_process_device_data_failed;
1268	}
1269	dev = pdd->dev;
1270
1271	pdd = kfd_bind_process_to_device(dev, p);
1272	if (IS_ERR(pdd)) {
1273		err = PTR_ERR(pdd);
1274		goto bind_process_to_device_failed;
1275	}
1276
1277	mem = kfd_process_device_translate_handle(pdd,
1278						GET_IDR_HANDLE(args->handle));
1279	if (!mem) {
1280		err = -ENOMEM;
1281		goto get_mem_obj_from_handle_failed;
1282	}
1283
1284	for (i = args->n_success; i < args->n_devices; i++) {
1285		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1286		if (!peer_pdd) {
1287			pr_debug("Getting device by id failed for 0x%x\n",
1288				 devices_arr[i]);
1289			err = -EINVAL;
1290			goto get_mem_obj_from_handle_failed;
1291		}
1292
1293		peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
1294		if (IS_ERR(peer_pdd)) {
1295			err = PTR_ERR(peer_pdd);
1296			goto get_mem_obj_from_handle_failed;
1297		}
1298
1299		err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1300			peer_pdd->dev->adev, (struct kgd_mem *)mem,
1301			peer_pdd->drm_priv);
1302		if (err) {
1303			struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
1304
1305			dev_err(dev->adev->dev,
1306			       "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
1307			       pci_domain_nr(pdev->bus),
1308			       pdev->bus->number,
1309			       PCI_SLOT(pdev->devfn),
1310			       PCI_FUNC(pdev->devfn),
1311			       ((struct kgd_mem *)mem)->domain);
1312			goto map_memory_to_gpu_failed;
1313		}
1314		args->n_success = i+1;
1315	}
1316
1317	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
 
 
1318	if (err) {
1319		pr_debug("Sync memory failed, wait interrupted by user signal\n");
1320		goto sync_memory_failed;
1321	}
1322
1323	mutex_unlock(&p->mutex);
1324
1325	/* Flush TLBs after waiting for the page table updates to complete */
1326	for (i = 0; i < args->n_devices; i++) {
1327		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
 
 
 
1328		if (WARN_ON_ONCE(!peer_pdd))
1329			continue;
1330		kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
1331	}
 
1332	kfree(devices_arr);
1333
1334	return err;
1335
1336get_process_device_data_failed:
1337bind_process_to_device_failed:
1338get_mem_obj_from_handle_failed:
1339map_memory_to_gpu_failed:
1340sync_memory_failed:
1341	mutex_unlock(&p->mutex);
1342copy_from_user_failed:
 
1343	kfree(devices_arr);
1344
1345	return err;
1346}
1347
1348static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1349					struct kfd_process *p, void *data)
1350{
1351	struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1352	struct kfd_process_device *pdd, *peer_pdd;
1353	void *mem;
 
1354	long err = 0;
1355	uint32_t *devices_arr = NULL, i;
1356	bool flush_tlb;
 
 
 
1357
1358	if (!args->n_devices) {
1359		pr_debug("Device IDs array empty\n");
1360		return -EINVAL;
1361	}
1362	if (args->n_success > args->n_devices) {
1363		pr_debug("n_success exceeds n_devices\n");
1364		return -EINVAL;
1365	}
1366
1367	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1368				    GFP_KERNEL);
1369	if (!devices_arr)
1370		return -ENOMEM;
1371
1372	err = copy_from_user(devices_arr,
1373			     (void __user *)args->device_ids_array_ptr,
1374			     args->n_devices * sizeof(*devices_arr));
1375	if (err != 0) {
1376		err = -EFAULT;
1377		goto copy_from_user_failed;
1378	}
1379
1380	mutex_lock(&p->mutex);
1381	pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
 
1382	if (!pdd) {
1383		err = -EINVAL;
1384		goto bind_process_to_device_failed;
1385	}
1386
1387	mem = kfd_process_device_translate_handle(pdd,
1388						GET_IDR_HANDLE(args->handle));
1389	if (!mem) {
1390		err = -ENOMEM;
1391		goto get_mem_obj_from_handle_failed;
1392	}
1393
1394	for (i = args->n_success; i < args->n_devices; i++) {
1395		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
 
 
 
 
 
 
1396		if (!peer_pdd) {
1397			err = -EINVAL;
1398			goto get_mem_obj_from_handle_failed;
1399		}
1400		err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1401			peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
1402		if (err) {
1403			pr_debug("Failed to unmap from gpu %d/%d\n", i, args->n_devices);
 
1404			goto unmap_memory_from_gpu_failed;
1405		}
1406		args->n_success = i+1;
1407	}
1408
1409	flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
1410	if (flush_tlb) {
1411		err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1412				(struct kgd_mem *) mem, true);
1413		if (err) {
1414			pr_debug("Sync memory failed, wait interrupted by user signal\n");
1415			goto sync_memory_failed;
1416		}
1417	}
1418
1419	/* Flush TLBs after waiting for the page table updates to complete */
1420	for (i = 0; i < args->n_devices; i++) {
1421		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1422		if (WARN_ON_ONCE(!peer_pdd))
1423			continue;
1424		if (flush_tlb)
1425			kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
1426
1427		/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
1428		err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
1429		if (err)
1430			goto sync_memory_failed;
1431	}
1432
1433	mutex_unlock(&p->mutex);
1434
1435	kfree(devices_arr);
1436
1437	return 0;
1438
1439bind_process_to_device_failed:
1440get_mem_obj_from_handle_failed:
1441unmap_memory_from_gpu_failed:
1442sync_memory_failed:
1443	mutex_unlock(&p->mutex);
1444copy_from_user_failed:
1445	kfree(devices_arr);
1446	return err;
1447}
1448
1449static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1450		struct kfd_process *p, void *data)
1451{
1452	int retval;
1453	struct kfd_ioctl_alloc_queue_gws_args *args = data;
1454	struct queue *q;
1455	struct kfd_node *dev;
1456
1457	mutex_lock(&p->mutex);
1458	q = pqm_get_user_queue(&p->pqm, args->queue_id);
1459
1460	if (q) {
1461		dev = q->device;
1462	} else {
1463		retval = -EINVAL;
1464		goto out_unlock;
1465	}
1466
1467	if (!dev->gws) {
1468		retval = -ENODEV;
1469		goto out_unlock;
1470	}
1471
1472	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1473		retval = -ENODEV;
1474		goto out_unlock;
1475	}
1476
1477	if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) ||
1478				      kfd_dbg_has_cwsr_workaround(dev))) {
1479		retval = -EBUSY;
1480		goto out_unlock;
1481	}
1482
1483	retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1484	mutex_unlock(&p->mutex);
1485
1486	args->first_gws = 0;
1487	return retval;
1488
1489out_unlock:
1490	mutex_unlock(&p->mutex);
1491	return retval;
1492}
1493
1494static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1495		struct kfd_process *p, void *data)
1496{
1497	struct kfd_ioctl_get_dmabuf_info_args *args = data;
1498	struct kfd_node *dev = NULL;
1499	struct amdgpu_device *dmabuf_adev;
1500	void *metadata_buffer = NULL;
1501	uint32_t flags;
1502	int8_t xcp_id;
1503	unsigned int i;
1504	int r;
1505
1506	/* Find a KFD GPU device that supports the get_dmabuf_info query */
1507	for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1508		if (dev && !kfd_devcgroup_check_permission(dev))
1509			break;
1510	if (!dev)
1511		return -EINVAL;
1512
1513	if (args->metadata_ptr) {
1514		metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1515		if (!metadata_buffer)
1516			return -ENOMEM;
1517	}
1518
1519	/* Get dmabuf info from KGD */
1520	r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1521					  &dmabuf_adev, &args->size,
1522					  metadata_buffer, args->metadata_size,
1523					  &args->metadata_size, &flags, &xcp_id);
1524	if (r)
1525		goto exit;
1526
1527	if (xcp_id >= 0)
1528		args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
1529	else
1530		args->gpu_id = dev->id;
 
 
 
1531	args->flags = flags;
1532
1533	/* Copy metadata buffer to user mode */
1534	if (metadata_buffer) {
1535		r = copy_to_user((void __user *)args->metadata_ptr,
1536				 metadata_buffer, args->metadata_size);
1537		if (r != 0)
1538			r = -EFAULT;
1539	}
1540
1541exit:
1542	kfree(metadata_buffer);
1543
1544	return r;
1545}
1546
1547static int kfd_ioctl_import_dmabuf(struct file *filep,
1548				   struct kfd_process *p, void *data)
1549{
1550	struct kfd_ioctl_import_dmabuf_args *args = data;
1551	struct kfd_process_device *pdd;
 
 
1552	int idr_handle;
1553	uint64_t size;
1554	void *mem;
1555	int r;
1556
 
 
 
 
 
 
 
 
1557	mutex_lock(&p->mutex);
1558	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1559	if (!pdd) {
1560		r = -EINVAL;
1561		goto err_unlock;
1562	}
1563
1564	pdd = kfd_bind_process_to_device(pdd->dev, p);
1565	if (IS_ERR(pdd)) {
1566		r = PTR_ERR(pdd);
1567		goto err_unlock;
1568	}
1569
1570	r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
1571						 args->va_addr, pdd->drm_priv,
1572						 (struct kgd_mem **)&mem, &size,
1573						 NULL);
1574	if (r)
1575		goto err_unlock;
1576
1577	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1578	if (idr_handle < 0) {
1579		r = -EFAULT;
1580		goto err_free;
1581	}
1582
1583	mutex_unlock(&p->mutex);
1584
1585	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1586
1587	return 0;
1588
1589err_free:
1590	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1591					       pdd->drm_priv, NULL);
1592err_unlock:
1593	mutex_unlock(&p->mutex);
1594	return r;
1595}
1596
1597static int kfd_ioctl_export_dmabuf(struct file *filep,
1598				   struct kfd_process *p, void *data)
1599{
1600	struct kfd_ioctl_export_dmabuf_args *args = data;
1601	struct kfd_process_device *pdd;
1602	struct dma_buf *dmabuf;
1603	struct kfd_node *dev;
1604	void *mem;
1605	int ret = 0;
1606
1607	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1608	if (!dev)
1609		return -EINVAL;
1610
1611	mutex_lock(&p->mutex);
1612
1613	pdd = kfd_get_process_device_data(dev, p);
1614	if (!pdd) {
1615		ret = -EINVAL;
1616		goto err_unlock;
1617	}
1618
1619	mem = kfd_process_device_translate_handle(pdd,
1620						GET_IDR_HANDLE(args->handle));
1621	if (!mem) {
1622		ret = -EINVAL;
1623		goto err_unlock;
1624	}
1625
1626	ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
1627	mutex_unlock(&p->mutex);
1628	if (ret)
1629		goto err_out;
1630
1631	ret = dma_buf_fd(dmabuf, args->flags);
1632	if (ret < 0) {
1633		dma_buf_put(dmabuf);
1634		goto err_out;
1635	}
1636	/* dma_buf_fd assigns the reference count to the fd, no need to
1637	 * put the reference here.
1638	 */
1639	args->dmabuf_fd = ret;
1640
1641	return 0;
1642
1643err_unlock:
1644	mutex_unlock(&p->mutex);
1645err_out:
1646	return ret;
1647}
1648
1649/* Handle requests for watching SMI events */
1650static int kfd_ioctl_smi_events(struct file *filep,
1651				struct kfd_process *p, void *data)
1652{
1653	struct kfd_ioctl_smi_events_args *args = data;
1654	struct kfd_process_device *pdd;
1655
1656	mutex_lock(&p->mutex);
1657
1658	pdd = kfd_process_device_data_by_id(p, args->gpuid);
1659	mutex_unlock(&p->mutex);
1660	if (!pdd)
1661		return -EINVAL;
1662
1663	return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1664}
1665
1666#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1667
1668static int kfd_ioctl_set_xnack_mode(struct file *filep,
1669				    struct kfd_process *p, void *data)
1670{
1671	struct kfd_ioctl_set_xnack_mode_args *args = data;
1672	int r = 0;
1673
1674	mutex_lock(&p->mutex);
1675	if (args->xnack_enabled >= 0) {
1676		if (!list_empty(&p->pqm.queues)) {
1677			pr_debug("Process has user queues running\n");
1678			r = -EBUSY;
1679			goto out_unlock;
1680		}
1681
1682		if (p->xnack_enabled == args->xnack_enabled)
1683			goto out_unlock;
1684
1685		if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
1686			r = -EPERM;
1687			goto out_unlock;
1688		}
1689
1690		r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
1691	} else {
1692		args->xnack_enabled = p->xnack_enabled;
1693	}
1694
1695out_unlock:
1696	mutex_unlock(&p->mutex);
1697
1698	return r;
1699}
1700
1701static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1702{
1703	struct kfd_ioctl_svm_args *args = data;
1704	int r = 0;
1705
1706	pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
1707		 args->start_addr, args->size, args->op, args->nattr);
1708
1709	if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1710		return -EINVAL;
1711	if (!args->start_addr || !args->size)
1712		return -EINVAL;
1713
1714	r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1715		      args->attrs);
1716
1717	return r;
1718}
1719#else
1720static int kfd_ioctl_set_xnack_mode(struct file *filep,
1721				    struct kfd_process *p, void *data)
1722{
1723	return -EPERM;
1724}
1725static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1726{
1727	return -EPERM;
1728}
1729#endif
1730
1731static int criu_checkpoint_process(struct kfd_process *p,
1732			     uint8_t __user *user_priv_data,
1733			     uint64_t *priv_offset)
1734{
1735	struct kfd_criu_process_priv_data process_priv;
1736	int ret;
1737
1738	memset(&process_priv, 0, sizeof(process_priv));
1739
1740	process_priv.version = KFD_CRIU_PRIV_VERSION;
1741	/* For CR, we don't consider negative xnack mode which is used for
1742	 * querying without changing it, here 0 simply means disabled and 1
1743	 * means enabled so retry for finding a valid PTE.
1744	 */
1745	process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
1746
1747	ret = copy_to_user(user_priv_data + *priv_offset,
1748				&process_priv, sizeof(process_priv));
1749
1750	if (ret) {
1751		pr_err("Failed to copy process information to user\n");
1752		ret = -EFAULT;
1753	}
1754
1755	*priv_offset += sizeof(process_priv);
1756	return ret;
1757}
1758
1759static int criu_checkpoint_devices(struct kfd_process *p,
1760			     uint32_t num_devices,
1761			     uint8_t __user *user_addr,
1762			     uint8_t __user *user_priv_data,
1763			     uint64_t *priv_offset)
1764{
1765	struct kfd_criu_device_priv_data *device_priv = NULL;
1766	struct kfd_criu_device_bucket *device_buckets = NULL;
1767	int ret = 0, i;
1768
1769	device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
1770	if (!device_buckets) {
1771		ret = -ENOMEM;
1772		goto exit;
1773	}
1774
1775	device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
1776	if (!device_priv) {
1777		ret = -ENOMEM;
1778		goto exit;
1779	}
1780
1781	for (i = 0; i < num_devices; i++) {
1782		struct kfd_process_device *pdd = p->pdds[i];
1783
1784		device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1785		device_buckets[i].actual_gpu_id = pdd->dev->id;
1786
1787		/*
1788		 * priv_data does not contain useful information for now and is reserved for
1789		 * future use, so we do not set its contents.
1790		 */
1791	}
1792
1793	ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
1794	if (ret) {
1795		pr_err("Failed to copy device information to user\n");
1796		ret = -EFAULT;
1797		goto exit;
1798	}
1799
1800	ret = copy_to_user(user_priv_data + *priv_offset,
1801			   device_priv,
1802			   num_devices * sizeof(*device_priv));
1803	if (ret) {
1804		pr_err("Failed to copy device information to user\n");
1805		ret = -EFAULT;
1806	}
1807	*priv_offset += num_devices * sizeof(*device_priv);
1808
1809exit:
1810	kvfree(device_buckets);
1811	kvfree(device_priv);
1812	return ret;
1813}
1814
1815static uint32_t get_process_num_bos(struct kfd_process *p)
1816{
1817	uint32_t num_of_bos = 0;
1818	int i;
1819
1820	/* Run over all PDDs of the process */
1821	for (i = 0; i < p->n_pdds; i++) {
1822		struct kfd_process_device *pdd = p->pdds[i];
1823		void *mem;
1824		int id;
1825
1826		idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1827			struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
1828
1829			if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
1830				num_of_bos++;
1831		}
1832	}
1833	return num_of_bos;
1834}
1835
1836static int criu_get_prime_handle(struct kgd_mem *mem,
1837				 int flags, u32 *shared_fd,
1838				 struct file **file)
1839{
1840	struct dma_buf *dmabuf;
1841	int ret;
1842
1843	ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
1844	if (ret) {
1845		pr_err("dmabuf export failed for the BO\n");
1846		return ret;
1847	}
1848
1849	ret = get_unused_fd_flags(flags);
1850	if (ret < 0) {
1851		pr_err("dmabuf create fd failed, ret:%d\n", ret);
1852		goto out_free_dmabuf;
1853	}
1854
1855	*shared_fd = ret;
1856	*file = dmabuf->file;
1857	return 0;
1858
1859out_free_dmabuf:
1860	dma_buf_put(dmabuf);
1861	return ret;
1862}
1863
1864static void commit_files(struct file **files,
1865			 struct kfd_criu_bo_bucket *bo_buckets,
1866			 unsigned int count,
1867			 int err)
1868{
1869	while (count--) {
1870		struct file *file = files[count];
1871
1872		if (!file)
1873			continue;
1874		if (err) {
1875			fput(file);
1876			put_unused_fd(bo_buckets[count].dmabuf_fd);
1877		} else {
1878			fd_install(bo_buckets[count].dmabuf_fd, file);
1879		}
1880	}
1881}
1882
1883static int criu_checkpoint_bos(struct kfd_process *p,
1884			       uint32_t num_bos,
1885			       uint8_t __user *user_bos,
1886			       uint8_t __user *user_priv_data,
1887			       uint64_t *priv_offset)
1888{
1889	struct kfd_criu_bo_bucket *bo_buckets;
1890	struct kfd_criu_bo_priv_data *bo_privs;
1891	struct file **files = NULL;
1892	int ret = 0, pdd_index, bo_index = 0, id;
1893	void *mem;
1894
1895	bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
1896	if (!bo_buckets)
1897		return -ENOMEM;
1898
1899	bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
1900	if (!bo_privs) {
1901		ret = -ENOMEM;
1902		goto exit;
1903	}
1904
1905	files = kvzalloc(num_bos * sizeof(struct file *), GFP_KERNEL);
1906	if (!files) {
1907		ret = -ENOMEM;
1908		goto exit;
1909	}
1910
1911	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
1912		struct kfd_process_device *pdd = p->pdds[pdd_index];
1913		struct amdgpu_bo *dumper_bo;
1914		struct kgd_mem *kgd_mem;
1915
1916		idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1917			struct kfd_criu_bo_bucket *bo_bucket;
1918			struct kfd_criu_bo_priv_data *bo_priv;
1919			int i, dev_idx = 0;
1920
1921			kgd_mem = (struct kgd_mem *)mem;
1922			dumper_bo = kgd_mem->bo;
1923
1924			/* Skip checkpointing BOs that are used for Trap handler
1925			 * code and state. Currently, these BOs have a VA that
1926			 * is less GPUVM Base
1927			 */
1928			if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
1929				continue;
1930
1931			bo_bucket = &bo_buckets[bo_index];
1932			bo_priv = &bo_privs[bo_index];
1933
1934			bo_bucket->gpu_id = pdd->user_gpu_id;
1935			bo_bucket->addr = (uint64_t)kgd_mem->va;
1936			bo_bucket->size = amdgpu_bo_size(dumper_bo);
1937			bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
1938			bo_priv->idr_handle = id;
1939
1940			if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1941				ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
1942								&bo_priv->user_addr);
1943				if (ret) {
1944					pr_err("Failed to obtain user address for user-pointer bo\n");
1945					goto exit;
1946				}
1947			}
1948			if (bo_bucket->alloc_flags
1949			    & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
1950				ret = criu_get_prime_handle(kgd_mem,
1951						bo_bucket->alloc_flags &
1952						KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
1953						&bo_bucket->dmabuf_fd, &files[bo_index]);
1954				if (ret)
1955					goto exit;
1956			} else {
1957				bo_bucket->dmabuf_fd = KFD_INVALID_FD;
1958			}
1959
1960			if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
1961				bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
1962					KFD_MMAP_GPU_ID(pdd->dev->id);
1963			else if (bo_bucket->alloc_flags &
1964				KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1965				bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
1966					KFD_MMAP_GPU_ID(pdd->dev->id);
1967			else
1968				bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
1969
1970			for (i = 0; i < p->n_pdds; i++) {
1971				if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->drm_priv, kgd_mem))
1972					bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
1973			}
1974
1975			pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
1976					"gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
1977					bo_bucket->size,
1978					bo_bucket->addr,
1979					bo_bucket->offset,
1980					bo_bucket->gpu_id,
1981					bo_bucket->alloc_flags,
1982					bo_priv->idr_handle);
1983			bo_index++;
1984		}
1985	}
1986
1987	ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
1988	if (ret) {
1989		pr_err("Failed to copy BO information to user\n");
1990		ret = -EFAULT;
1991		goto exit;
1992	}
1993
1994	ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
1995	if (ret) {
1996		pr_err("Failed to copy BO priv information to user\n");
1997		ret = -EFAULT;
1998		goto exit;
1999	}
2000
2001	*priv_offset += num_bos * sizeof(*bo_privs);
2002
2003exit:
2004	commit_files(files, bo_buckets, bo_index, ret);
2005	kvfree(files);
2006	kvfree(bo_buckets);
2007	kvfree(bo_privs);
2008	return ret;
2009}
2010
2011static int criu_get_process_object_info(struct kfd_process *p,
2012					uint32_t *num_devices,
2013					uint32_t *num_bos,
2014					uint32_t *num_objects,
2015					uint64_t *objs_priv_size)
2016{
2017	uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
2018	uint32_t num_queues, num_events, num_svm_ranges;
2019	int ret;
2020
2021	*num_devices = p->n_pdds;
2022	*num_bos = get_process_num_bos(p);
2023
2024	ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
2025	if (ret)
2026		return ret;
2027
2028	num_events = kfd_get_num_events(p);
2029
2030	ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
2031	if (ret)
2032		return ret;
2033
2034	*num_objects = num_queues + num_events + num_svm_ranges;
2035
2036	if (objs_priv_size) {
2037		priv_size = sizeof(struct kfd_criu_process_priv_data);
2038		priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
2039		priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
2040		priv_size += queues_priv_data_size;
2041		priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
2042		priv_size += svm_priv_data_size;
2043		*objs_priv_size = priv_size;
2044	}
2045	return 0;
2046}
2047
2048static int criu_checkpoint(struct file *filep,
2049			   struct kfd_process *p,
2050			   struct kfd_ioctl_criu_args *args)
2051{
2052	int ret;
2053	uint32_t num_devices, num_bos, num_objects;
2054	uint64_t priv_size, priv_offset = 0, bo_priv_offset;
2055
2056	if (!args->devices || !args->bos || !args->priv_data)
2057		return -EINVAL;
2058
2059	mutex_lock(&p->mutex);
2060
2061	if (!p->n_pdds) {
2062		pr_err("No pdd for given process\n");
2063		ret = -ENODEV;
2064		goto exit_unlock;
2065	}
2066
2067	/* Confirm all process queues are evicted */
2068	if (!p->queues_paused) {
2069		pr_err("Cannot dump process when queues are not in evicted state\n");
2070		/* CRIU plugin did not call op PROCESS_INFO before checkpointing */
2071		ret = -EINVAL;
2072		goto exit_unlock;
2073	}
2074
2075	ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
2076	if (ret)
2077		goto exit_unlock;
2078
2079	if (num_devices != args->num_devices ||
2080	    num_bos != args->num_bos ||
2081	    num_objects != args->num_objects ||
2082	    priv_size != args->priv_data_size) {
2083
2084		ret = -EINVAL;
2085		goto exit_unlock;
2086	}
2087
2088	/* each function will store private data inside priv_data and adjust priv_offset */
2089	ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
2090	if (ret)
2091		goto exit_unlock;
2092
2093	ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
2094				(uint8_t __user *)args->priv_data, &priv_offset);
2095	if (ret)
2096		goto exit_unlock;
2097
2098	/* Leave room for BOs in the private data. They need to be restored
2099	 * before events, but we checkpoint them last to simplify the error
2100	 * handling.
2101	 */
2102	bo_priv_offset = priv_offset;
2103	priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
2104
2105	if (num_objects) {
2106		ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
2107						 &priv_offset);
2108		if (ret)
2109			goto exit_unlock;
2110
2111		ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
2112						 &priv_offset);
2113		if (ret)
2114			goto exit_unlock;
2115
2116		ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
2117		if (ret)
2118			goto exit_unlock;
2119	}
2120
2121	/* This must be the last thing in this function that can fail.
2122	 * Otherwise we leak dmabuf file descriptors.
2123	 */
2124	ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
2125			   (uint8_t __user *)args->priv_data, &bo_priv_offset);
2126
2127exit_unlock:
2128	mutex_unlock(&p->mutex);
2129	if (ret)
2130		pr_err("Failed to dump CRIU ret:%d\n", ret);
2131	else
2132		pr_debug("CRIU dump ret:%d\n", ret);
2133
2134	return ret;
2135}
2136
2137static int criu_restore_process(struct kfd_process *p,
2138				struct kfd_ioctl_criu_args *args,
2139				uint64_t *priv_offset,
2140				uint64_t max_priv_data_size)
2141{
2142	int ret = 0;
2143	struct kfd_criu_process_priv_data process_priv;
2144
2145	if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
2146		return -EINVAL;
2147
2148	ret = copy_from_user(&process_priv,
2149				(void __user *)(args->priv_data + *priv_offset),
2150				sizeof(process_priv));
2151	if (ret) {
2152		pr_err("Failed to copy process private information from user\n");
2153		ret = -EFAULT;
2154		goto exit;
2155	}
2156	*priv_offset += sizeof(process_priv);
2157
2158	if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
2159		pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
2160			process_priv.version, KFD_CRIU_PRIV_VERSION);
2161		return -EINVAL;
2162	}
2163
2164	pr_debug("Setting XNACK mode\n");
2165	if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
2166		pr_err("xnack mode cannot be set\n");
2167		ret = -EPERM;
2168		goto exit;
2169	} else {
2170		pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
2171		p->xnack_enabled = process_priv.xnack_mode;
2172	}
2173
2174exit:
2175	return ret;
2176}
2177
2178static int criu_restore_devices(struct kfd_process *p,
2179				struct kfd_ioctl_criu_args *args,
2180				uint64_t *priv_offset,
2181				uint64_t max_priv_data_size)
2182{
2183	struct kfd_criu_device_bucket *device_buckets;
2184	struct kfd_criu_device_priv_data *device_privs;
2185	int ret = 0;
2186	uint32_t i;
2187
2188	if (args->num_devices != p->n_pdds)
2189		return -EINVAL;
2190
2191	if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2192		return -EINVAL;
2193
2194	device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2195	if (!device_buckets)
2196		return -ENOMEM;
2197
2198	ret = copy_from_user(device_buckets, (void __user *)args->devices,
2199				args->num_devices * sizeof(*device_buckets));
2200	if (ret) {
2201		pr_err("Failed to copy devices buckets from user\n");
2202		ret = -EFAULT;
2203		goto exit;
2204	}
2205
2206	for (i = 0; i < args->num_devices; i++) {
2207		struct kfd_node *dev;
2208		struct kfd_process_device *pdd;
2209		struct file *drm_file;
2210
2211		/* device private data is not currently used */
2212
2213		if (!device_buckets[i].user_gpu_id) {
2214			pr_err("Invalid user gpu_id\n");
2215			ret = -EINVAL;
2216			goto exit;
2217		}
2218
2219		dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
2220		if (!dev) {
2221			pr_err("Failed to find device with gpu_id = %x\n",
2222				device_buckets[i].actual_gpu_id);
2223			ret = -EINVAL;
2224			goto exit;
2225		}
2226
2227		pdd = kfd_get_process_device_data(dev, p);
2228		if (!pdd) {
2229			pr_err("Failed to get pdd for gpu_id = %x\n",
2230					device_buckets[i].actual_gpu_id);
2231			ret = -EINVAL;
2232			goto exit;
2233		}
2234		pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2235
2236		drm_file = fget(device_buckets[i].drm_fd);
2237		if (!drm_file) {
2238			pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
2239				device_buckets[i].drm_fd);
2240			ret = -EINVAL;
2241			goto exit;
2242		}
2243
2244		if (pdd->drm_file) {
2245			ret = -EINVAL;
2246			goto exit;
2247		}
2248
2249		/* create the vm using render nodes for kfd pdd */
2250		if (kfd_process_device_init_vm(pdd, drm_file)) {
2251			pr_err("could not init vm for given pdd\n");
2252			/* On success, the PDD keeps the drm_file reference */
2253			fput(drm_file);
2254			ret = -EINVAL;
2255			goto exit;
2256		}
2257		/*
2258		 * pdd now already has the vm bound to render node so below api won't create a new
2259		 * exclusive kfd mapping but use existing one with renderDXXX but is still needed
2260		 * for iommu v2 binding  and runtime pm.
2261		 */
2262		pdd = kfd_bind_process_to_device(dev, p);
2263		if (IS_ERR(pdd)) {
2264			ret = PTR_ERR(pdd);
2265			goto exit;
2266		}
2267
2268		if (!pdd->qpd.proc_doorbells) {
2269			ret = kfd_alloc_process_doorbells(dev->kfd, pdd);
2270			if (ret)
2271				goto exit;
2272		}
2273	}
2274
2275	/*
2276	 * We are not copying device private data from user as we are not using the data for now,
2277	 * but we still adjust for its private data.
2278	 */
2279	*priv_offset += args->num_devices * sizeof(*device_privs);
2280
2281exit:
2282	kfree(device_buckets);
2283	return ret;
2284}
2285
2286static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2287				      struct kfd_criu_bo_bucket *bo_bucket,
2288				      struct kfd_criu_bo_priv_data *bo_priv,
2289				      struct kgd_mem **kgd_mem)
2290{
2291	int idr_handle;
2292	int ret;
2293	const bool criu_resume = true;
2294	u64 offset;
2295
2296	if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
2297		if (bo_bucket->size !=
2298				kfd_doorbell_process_slice(pdd->dev->kfd))
2299			return -EINVAL;
2300
2301		offset = kfd_get_process_doorbells(pdd);
2302		if (!offset)
2303			return -ENOMEM;
2304	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2305		/* MMIO BOs need remapped bus address */
2306		if (bo_bucket->size != PAGE_SIZE) {
2307			pr_err("Invalid page size\n");
2308			return -EINVAL;
2309		}
2310		offset = pdd->dev->adev->rmmio_remap.bus_addr;
2311		if (!offset || (PAGE_SIZE > 4096)) {
2312			pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
2313			return -ENOMEM;
2314		}
2315	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
2316		offset = bo_priv->user_addr;
2317	}
2318	/* Create the BO */
2319	ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2320						      bo_bucket->size, pdd->drm_priv, kgd_mem,
2321						      &offset, bo_bucket->alloc_flags, criu_resume);
2322	if (ret) {
2323		pr_err("Could not create the BO\n");
2324		return ret;
2325	}
2326	pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
2327		 bo_bucket->size, bo_bucket->addr, offset);
2328
2329	/* Restore previous IDR handle */
2330	pr_debug("Restoring old IDR handle for the BO");
2331	idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2332			       bo_priv->idr_handle + 1, GFP_KERNEL);
2333
2334	if (idr_handle < 0) {
2335		pr_err("Could not allocate idr\n");
2336		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2337						       NULL);
2338		return -ENOMEM;
2339	}
2340
2341	if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
2342		bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2343	if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2344		bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2345	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
2346		bo_bucket->restored_offset = offset;
2347	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
2348		bo_bucket->restored_offset = offset;
2349		/* Update the VRAM usage count */
2350		atomic64_add(bo_bucket->size, &pdd->vram_usage);
2351	}
2352	return 0;
2353}
2354
2355static int criu_restore_bo(struct kfd_process *p,
2356			   struct kfd_criu_bo_bucket *bo_bucket,
2357			   struct kfd_criu_bo_priv_data *bo_priv,
2358			   struct file **file)
2359{
2360	struct kfd_process_device *pdd;
2361	struct kgd_mem *kgd_mem;
2362	int ret;
2363	int j;
2364
2365	pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
2366		 bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
2367		 bo_priv->idr_handle);
2368
2369	pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2370	if (!pdd) {
2371		pr_err("Failed to get pdd\n");
2372		return -ENODEV;
2373	}
2374
2375	ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2376	if (ret)
2377		return ret;
2378
2379	/* now map these BOs to GPU/s */
2380	for (j = 0; j < p->n_pdds; j++) {
2381		struct kfd_node *peer;
2382		struct kfd_process_device *peer_pdd;
2383
2384		if (!bo_priv->mapped_gpuids[j])
2385			break;
2386
2387		peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
2388		if (!peer_pdd)
2389			return -EINVAL;
2390
2391		peer = peer_pdd->dev;
2392
2393		peer_pdd = kfd_bind_process_to_device(peer, p);
2394		if (IS_ERR(peer_pdd))
2395			return PTR_ERR(peer_pdd);
2396
2397		ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
2398							    peer_pdd->drm_priv);
2399		if (ret) {
2400			pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
2401			return ret;
2402		}
2403	}
2404
2405	pr_debug("map memory was successful for the BO\n");
2406	/* create the dmabuf object and export the bo */
2407	if (bo_bucket->alloc_flags
2408	    & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
2409		ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
2410					    &bo_bucket->dmabuf_fd, file);
2411		if (ret)
2412			return ret;
2413	} else {
2414		bo_bucket->dmabuf_fd = KFD_INVALID_FD;
2415	}
2416
2417	return 0;
2418}
2419
2420static int criu_restore_bos(struct kfd_process *p,
2421			    struct kfd_ioctl_criu_args *args,
2422			    uint64_t *priv_offset,
2423			    uint64_t max_priv_data_size)
2424{
2425	struct kfd_criu_bo_bucket *bo_buckets = NULL;
2426	struct kfd_criu_bo_priv_data *bo_privs = NULL;
2427	struct file **files = NULL;
2428	int ret = 0;
2429	uint32_t i = 0;
2430
2431	if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2432		return -EINVAL;
2433
2434	/* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
2435	amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
2436
2437	bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2438	if (!bo_buckets)
2439		return -ENOMEM;
2440
2441	files = kvzalloc(args->num_bos * sizeof(struct file *), GFP_KERNEL);
2442	if (!files) {
2443		ret = -ENOMEM;
2444		goto exit;
2445	}
2446
2447	ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2448			     args->num_bos * sizeof(*bo_buckets));
2449	if (ret) {
2450		pr_err("Failed to copy BOs information from user\n");
2451		ret = -EFAULT;
2452		goto exit;
2453	}
2454
2455	bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2456	if (!bo_privs) {
2457		ret = -ENOMEM;
2458		goto exit;
2459	}
2460
2461	ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2462			     args->num_bos * sizeof(*bo_privs));
2463	if (ret) {
2464		pr_err("Failed to copy BOs information from user\n");
2465		ret = -EFAULT;
2466		goto exit;
2467	}
2468	*priv_offset += args->num_bos * sizeof(*bo_privs);
2469
2470	/* Create and map new BOs */
2471	for (; i < args->num_bos; i++) {
2472		ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i], &files[i]);
2473		if (ret) {
2474			pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
2475			goto exit;
2476		}
2477	} /* done */
2478
2479	/* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
2480	ret = copy_to_user((void __user *)args->bos,
2481				bo_buckets,
2482				(args->num_bos * sizeof(*bo_buckets)));
2483	if (ret)
2484		ret = -EFAULT;
2485
2486exit:
2487	commit_files(files, bo_buckets, i, ret);
2488	kvfree(files);
2489	kvfree(bo_buckets);
2490	kvfree(bo_privs);
2491	return ret;
2492}
2493
2494static int criu_restore_objects(struct file *filep,
2495				struct kfd_process *p,
2496				struct kfd_ioctl_criu_args *args,
2497				uint64_t *priv_offset,
2498				uint64_t max_priv_data_size)
2499{
2500	int ret = 0;
2501	uint32_t i;
2502
2503	BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
2504	BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
2505	BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
2506
2507	for (i = 0; i < args->num_objects; i++) {
2508		uint32_t object_type;
2509
2510		if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
2511			pr_err("Invalid private data size\n");
2512			return -EINVAL;
2513		}
2514
2515		ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2516		if (ret) {
2517			pr_err("Failed to copy private information from user\n");
2518			goto exit;
2519		}
2520
2521		switch (object_type) {
2522		case KFD_CRIU_OBJECT_TYPE_QUEUE:
2523			ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2524						     priv_offset, max_priv_data_size);
2525			if (ret)
2526				goto exit;
2527			break;
2528		case KFD_CRIU_OBJECT_TYPE_EVENT:
2529			ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2530						     priv_offset, max_priv_data_size);
2531			if (ret)
2532				goto exit;
2533			break;
2534		case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
2535			ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2536						     priv_offset, max_priv_data_size);
2537			if (ret)
2538				goto exit;
2539			break;
2540		default:
2541			pr_err("Invalid object type:%u at index:%d\n", object_type, i);
2542			ret = -EINVAL;
2543			goto exit;
2544		}
2545	}
2546exit:
2547	return ret;
2548}
2549
2550static int criu_restore(struct file *filep,
2551			struct kfd_process *p,
2552			struct kfd_ioctl_criu_args *args)
2553{
2554	uint64_t priv_offset = 0;
2555	int ret = 0;
2556
2557	pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
2558		 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2559
2560	if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
2561	    !args->num_devices || !args->num_bos)
2562		return -EINVAL;
2563
2564	mutex_lock(&p->mutex);
2565
2566	/*
2567	 * Set the process to evicted state to avoid running any new queues before all the memory
2568	 * mappings are ready.
2569	 */
2570	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
2571	if (ret)
2572		goto exit_unlock;
2573
2574	/* Each function will adjust priv_offset based on how many bytes they consumed */
2575	ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2576	if (ret)
2577		goto exit_unlock;
2578
2579	ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2580	if (ret)
2581		goto exit_unlock;
2582
2583	ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2584	if (ret)
2585		goto exit_unlock;
2586
2587	ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2588	if (ret)
2589		goto exit_unlock;
2590
2591	if (priv_offset != args->priv_data_size) {
2592		pr_err("Invalid private data size\n");
2593		ret = -EINVAL;
2594	}
2595
2596exit_unlock:
2597	mutex_unlock(&p->mutex);
2598	if (ret)
2599		pr_err("Failed to restore CRIU ret:%d\n", ret);
2600	else
2601		pr_debug("CRIU restore successful\n");
2602
2603	return ret;
2604}
2605
2606static int criu_unpause(struct file *filep,
2607			struct kfd_process *p,
2608			struct kfd_ioctl_criu_args *args)
2609{
2610	int ret;
2611
2612	mutex_lock(&p->mutex);
2613
2614	if (!p->queues_paused) {
2615		mutex_unlock(&p->mutex);
2616		return -EINVAL;
2617	}
2618
2619	ret = kfd_process_restore_queues(p);
2620	if (ret)
2621		pr_err("Failed to unpause queues ret:%d\n", ret);
2622	else
2623		p->queues_paused = false;
2624
2625	mutex_unlock(&p->mutex);
2626
2627	return ret;
2628}
2629
2630static int criu_resume(struct file *filep,
2631			struct kfd_process *p,
2632			struct kfd_ioctl_criu_args *args)
2633{
2634	struct kfd_process *target = NULL;
2635	struct pid *pid = NULL;
2636	int ret = 0;
2637
2638	pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
2639		 args->pid);
2640
2641	pid = find_get_pid(args->pid);
2642	if (!pid) {
2643		pr_err("Cannot find pid info for %i\n", args->pid);
2644		return -ESRCH;
2645	}
2646
2647	pr_debug("calling kfd_lookup_process_by_pid\n");
2648	target = kfd_lookup_process_by_pid(pid);
2649
2650	put_pid(pid);
2651
2652	if (!target) {
2653		pr_debug("Cannot find process info for %i\n", args->pid);
2654		return -ESRCH;
2655	}
2656
2657	mutex_lock(&target->mutex);
2658	ret = kfd_criu_resume_svm(target);
2659	if (ret) {
2660		pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2661		goto exit;
2662	}
2663
2664	ret =  amdgpu_amdkfd_criu_resume(target->kgd_process_info);
2665	if (ret)
2666		pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2667
2668exit:
2669	mutex_unlock(&target->mutex);
2670
2671	kfd_unref_process(target);
2672	return ret;
2673}
2674
2675static int criu_process_info(struct file *filep,
2676				struct kfd_process *p,
2677				struct kfd_ioctl_criu_args *args)
2678{
2679	int ret = 0;
2680
2681	mutex_lock(&p->mutex);
2682
2683	if (!p->n_pdds) {
2684		pr_err("No pdd for given process\n");
2685		ret = -ENODEV;
2686		goto err_unlock;
2687	}
2688
2689	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
2690	if (ret)
2691		goto err_unlock;
2692
2693	p->queues_paused = true;
2694
2695	args->pid = task_pid_nr_ns(p->lead_thread,
2696					task_active_pid_ns(p->lead_thread));
2697
2698	ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2699					   &args->num_objects, &args->priv_data_size);
2700	if (ret)
2701		goto err_unlock;
2702
2703	dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
2704				args->num_devices, args->num_bos, args->num_objects,
2705				args->priv_data_size);
2706
2707err_unlock:
2708	if (ret) {
2709		kfd_process_restore_queues(p);
2710		p->queues_paused = false;
2711	}
2712	mutex_unlock(&p->mutex);
2713	return ret;
2714}
2715
2716static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
2717{
2718	struct kfd_ioctl_criu_args *args = data;
2719	int ret;
2720
2721	dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2722	switch (args->op) {
2723	case KFD_CRIU_OP_PROCESS_INFO:
2724		ret = criu_process_info(filep, p, args);
2725		break;
2726	case KFD_CRIU_OP_CHECKPOINT:
2727		ret = criu_checkpoint(filep, p, args);
2728		break;
2729	case KFD_CRIU_OP_UNPAUSE:
2730		ret = criu_unpause(filep, p, args);
2731		break;
2732	case KFD_CRIU_OP_RESTORE:
2733		ret = criu_restore(filep, p, args);
2734		break;
2735	case KFD_CRIU_OP_RESUME:
2736		ret = criu_resume(filep, p, args);
2737		break;
2738	default:
2739		dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2740		ret = -EINVAL;
2741		break;
2742	}
2743
2744	if (ret)
2745		dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2746
2747	return ret;
2748}
2749
2750static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
2751			bool enable_ttmp_setup)
2752{
2753	int i = 0, ret = 0;
2754
2755	if (p->is_runtime_retry)
2756		goto retry;
2757
2758	if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
2759		return -EBUSY;
2760
2761	for (i = 0; i < p->n_pdds; i++) {
2762		struct kfd_process_device *pdd = p->pdds[i];
2763
2764		if (pdd->qpd.queue_count)
2765			return -EEXIST;
2766
2767		/*
2768		 * Setup TTMPs by default.
2769		 * Note that this call must remain here for MES ADD QUEUE to
2770		 * skip_process_ctx_clear unconditionally as the first call to
2771		 * SET_SHADER_DEBUGGER clears any stale process context data
2772		 * saved in MES.
2773		 */
2774		if (pdd->dev->kfd->shared_resources.enable_mes)
2775			kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
2776	}
2777
2778	p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
2779	p->runtime_info.r_debug = r_debug;
2780	p->runtime_info.ttmp_setup = enable_ttmp_setup;
2781
2782	if (p->runtime_info.ttmp_setup) {
2783		for (i = 0; i < p->n_pdds; i++) {
2784			struct kfd_process_device *pdd = p->pdds[i];
2785
2786			if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
2787				amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
2788				pdd->dev->kfd2kgd->enable_debug_trap(
2789						pdd->dev->adev,
2790						true,
2791						pdd->dev->vm_info.last_vmid_kfd);
2792			} else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2793				pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
2794						pdd->dev->adev,
2795						false,
2796						0);
2797			}
2798		}
2799	}
2800
2801retry:
2802	if (p->debug_trap_enabled) {
2803		if (!p->is_runtime_retry) {
2804			kfd_dbg_trap_activate(p);
2805			kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
2806					p, NULL, 0, false, NULL, 0);
2807		}
2808
2809		mutex_unlock(&p->mutex);
2810		ret = down_interruptible(&p->runtime_enable_sema);
2811		mutex_lock(&p->mutex);
2812
2813		p->is_runtime_retry = !!ret;
2814	}
2815
2816	return ret;
2817}
2818
2819static int runtime_disable(struct kfd_process *p)
2820{
2821	int i = 0, ret;
2822	bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
2823
2824	p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
2825	p->runtime_info.r_debug = 0;
2826
2827	if (p->debug_trap_enabled) {
2828		if (was_enabled)
2829			kfd_dbg_trap_deactivate(p, false, 0);
2830
2831		if (!p->is_runtime_retry)
2832			kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
2833					p, NULL, 0, false, NULL, 0);
2834
2835		mutex_unlock(&p->mutex);
2836		ret = down_interruptible(&p->runtime_enable_sema);
2837		mutex_lock(&p->mutex);
2838
2839		p->is_runtime_retry = !!ret;
2840		if (ret)
2841			return ret;
2842	}
2843
2844	if (was_enabled && p->runtime_info.ttmp_setup) {
2845		for (i = 0; i < p->n_pdds; i++) {
2846			struct kfd_process_device *pdd = p->pdds[i];
2847
2848			if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
2849				amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
2850		}
2851	}
2852
2853	p->runtime_info.ttmp_setup = false;
2854
2855	/* disable ttmp setup */
2856	for (i = 0; i < p->n_pdds; i++) {
2857		struct kfd_process_device *pdd = p->pdds[i];
2858
2859		if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2860			pdd->spi_dbg_override =
2861					pdd->dev->kfd2kgd->disable_debug_trap(
2862					pdd->dev->adev,
2863					false,
2864					pdd->dev->vm_info.last_vmid_kfd);
2865
2866			if (!pdd->dev->kfd->shared_resources.enable_mes)
2867				debug_refresh_runlist(pdd->dev->dqm);
2868			else
2869				kfd_dbg_set_mes_debug_mode(pdd,
2870							   !kfd_dbg_has_cwsr_workaround(pdd->dev));
2871		}
2872	}
2873
2874	return 0;
2875}
2876
2877static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
2878{
2879	struct kfd_ioctl_runtime_enable_args *args = data;
2880	int r;
2881
2882	mutex_lock(&p->mutex);
2883
2884	if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
2885		r = runtime_enable(p, args->r_debug,
2886				!!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
2887	else
2888		r = runtime_disable(p);
2889
2890	mutex_unlock(&p->mutex);
2891
2892	return r;
2893}
2894
2895static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data)
2896{
2897	struct kfd_ioctl_dbg_trap_args *args = data;
2898	struct task_struct *thread = NULL;
2899	struct mm_struct *mm = NULL;
2900	struct pid *pid = NULL;
2901	struct kfd_process *target = NULL;
2902	struct kfd_process_device *pdd = NULL;
2903	int r = 0;
2904
2905	if (sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2906		pr_err("Debugging does not support sched_policy %i", sched_policy);
2907		return -EINVAL;
2908	}
2909
2910	pid = find_get_pid(args->pid);
2911	if (!pid) {
2912		pr_debug("Cannot find pid info for %i\n", args->pid);
2913		r = -ESRCH;
2914		goto out;
2915	}
2916
2917	thread = get_pid_task(pid, PIDTYPE_PID);
2918	if (!thread) {
2919		r = -ESRCH;
2920		goto out;
2921	}
2922
2923	mm = get_task_mm(thread);
2924	if (!mm) {
2925		r = -ESRCH;
2926		goto out;
2927	}
2928
2929	if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
2930		bool create_process;
2931
2932		rcu_read_lock();
2933		create_process = thread && thread != current && ptrace_parent(thread) == current;
2934		rcu_read_unlock();
2935
2936		target = create_process ? kfd_create_process(thread) :
2937					kfd_lookup_process_by_pid(pid);
2938	} else {
2939		target = kfd_lookup_process_by_pid(pid);
2940	}
2941
2942	if (IS_ERR_OR_NULL(target)) {
2943		pr_debug("Cannot find process PID %i to debug\n", args->pid);
2944		r = target ? PTR_ERR(target) : -ESRCH;
2945		target = NULL;
2946		goto out;
2947	}
2948
2949	/* Check if target is still PTRACED. */
2950	rcu_read_lock();
2951	if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
2952				&& ptrace_parent(target->lead_thread) != current) {
2953		pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
2954		r = -EPERM;
2955	}
2956	rcu_read_unlock();
2957
2958	if (r)
2959		goto out;
2960
2961	mutex_lock(&target->mutex);
2962
2963	if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
2964		pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
2965		r = -EINVAL;
2966		goto unlock_out;
2967	}
2968
2969	if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED &&
2970			(args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
2971			 args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
2972			 args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
2973			 args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
2974			 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
2975			 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
2976			 args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
2977		r = -EPERM;
2978		goto unlock_out;
2979	}
2980
2981	if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
2982	    args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
2983		int user_gpu_id = kfd_process_get_user_gpu_id(target,
2984				args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
2985					args->set_node_address_watch.gpu_id :
2986					args->clear_node_address_watch.gpu_id);
2987
2988		pdd = kfd_process_device_data_by_id(target, user_gpu_id);
2989		if (user_gpu_id == -EINVAL || !pdd) {
2990			r = -ENODEV;
2991			goto unlock_out;
2992		}
2993	}
2994
2995	switch (args->op) {
2996	case KFD_IOC_DBG_TRAP_ENABLE:
2997		if (target != p)
2998			target->debugger_process = p;
2999
3000		r = kfd_dbg_trap_enable(target,
3001					args->enable.dbg_fd,
3002					(void __user *)args->enable.rinfo_ptr,
3003					&args->enable.rinfo_size);
3004		if (!r)
3005			target->exception_enable_mask = args->enable.exception_mask;
3006
3007		break;
3008	case KFD_IOC_DBG_TRAP_DISABLE:
3009		r = kfd_dbg_trap_disable(target);
3010		break;
3011	case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT:
3012		r = kfd_dbg_send_exception_to_runtime(target,
3013				args->send_runtime_event.gpu_id,
3014				args->send_runtime_event.queue_id,
3015				args->send_runtime_event.exception_mask);
3016		break;
3017	case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED:
3018		kfd_dbg_set_enabled_debug_exception_mask(target,
3019				args->set_exceptions_enabled.exception_mask);
3020		break;
3021	case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
3022		r = kfd_dbg_trap_set_wave_launch_override(target,
3023				args->launch_override.override_mode,
3024				args->launch_override.enable_mask,
3025				args->launch_override.support_request_mask,
3026				&args->launch_override.enable_mask,
3027				&args->launch_override.support_request_mask);
3028		break;
3029	case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
3030		r = kfd_dbg_trap_set_wave_launch_mode(target,
3031				args->launch_mode.launch_mode);
3032		break;
3033	case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES:
3034		r = suspend_queues(target,
3035				args->suspend_queues.num_queues,
3036				args->suspend_queues.grace_period,
3037				args->suspend_queues.exception_mask,
3038				(uint32_t *)args->suspend_queues.queue_array_ptr);
3039
3040		break;
3041	case KFD_IOC_DBG_TRAP_RESUME_QUEUES:
3042		r = resume_queues(target, args->resume_queues.num_queues,
3043				(uint32_t *)args->resume_queues.queue_array_ptr);
3044		break;
3045	case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH:
3046		r = kfd_dbg_trap_set_dev_address_watch(pdd,
3047				args->set_node_address_watch.address,
3048				args->set_node_address_watch.mask,
3049				&args->set_node_address_watch.id,
3050				args->set_node_address_watch.mode);
3051		break;
3052	case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH:
3053		r = kfd_dbg_trap_clear_dev_address_watch(pdd,
3054				args->clear_node_address_watch.id);
3055		break;
3056	case KFD_IOC_DBG_TRAP_SET_FLAGS:
3057		r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
3058		break;
3059	case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT:
3060		r = kfd_dbg_ev_query_debug_event(target,
3061				&args->query_debug_event.queue_id,
3062				&args->query_debug_event.gpu_id,
3063				args->query_debug_event.exception_mask,
3064				&args->query_debug_event.exception_mask);
3065		break;
3066	case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO:
3067		r = kfd_dbg_trap_query_exception_info(target,
3068				args->query_exception_info.source_id,
3069				args->query_exception_info.exception_code,
3070				args->query_exception_info.clear_exception,
3071				(void __user *)args->query_exception_info.info_ptr,
3072				&args->query_exception_info.info_size);
3073		break;
3074	case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
3075		r = pqm_get_queue_snapshot(&target->pqm,
3076				args->queue_snapshot.exception_mask,
3077				(void __user *)args->queue_snapshot.snapshot_buf_ptr,
3078				&args->queue_snapshot.num_queues,
3079				&args->queue_snapshot.entry_size);
3080		break;
3081	case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
3082		r = kfd_dbg_trap_device_snapshot(target,
3083				args->device_snapshot.exception_mask,
3084				(void __user *)args->device_snapshot.snapshot_buf_ptr,
3085				&args->device_snapshot.num_devices,
3086				&args->device_snapshot.entry_size);
3087		break;
3088	default:
3089		pr_err("Invalid option: %i\n", args->op);
3090		r = -EINVAL;
3091	}
3092
3093unlock_out:
3094	mutex_unlock(&target->mutex);
3095
3096out:
3097	if (thread)
3098		put_task_struct(thread);
3099
3100	if (mm)
3101		mmput(mm);
3102
3103	if (pid)
3104		put_pid(pid);
3105
3106	if (target)
3107		kfd_unref_process(target);
3108
3109	return r;
3110}
3111
3112#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
3113	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
3114			    .cmd_drv = 0, .name = #ioctl}
3115
3116/** Ioctl table */
3117static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
3118	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
3119			kfd_ioctl_get_version, 0),
3120
3121	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
3122			kfd_ioctl_create_queue, 0),
3123
3124	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
3125			kfd_ioctl_destroy_queue, 0),
3126
3127	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
3128			kfd_ioctl_set_memory_policy, 0),
3129
3130	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
3131			kfd_ioctl_get_clock_counters, 0),
3132
3133	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
3134			kfd_ioctl_get_process_apertures, 0),
3135
3136	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
3137			kfd_ioctl_update_queue, 0),
3138
3139	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
3140			kfd_ioctl_create_event, 0),
3141
3142	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
3143			kfd_ioctl_destroy_event, 0),
3144
3145	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
3146			kfd_ioctl_set_event, 0),
3147
3148	AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
3149			kfd_ioctl_reset_event, 0),
3150
3151	AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
3152			kfd_ioctl_wait_events, 0),
3153
3154	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
3155			kfd_ioctl_dbg_register, 0),
3156
3157	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
3158			kfd_ioctl_dbg_unregister, 0),
3159
3160	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
3161			kfd_ioctl_dbg_address_watch, 0),
3162
3163	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
3164			kfd_ioctl_dbg_wave_control, 0),
3165
3166	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
3167			kfd_ioctl_set_scratch_backing_va, 0),
3168
3169	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
3170			kfd_ioctl_get_tile_config, 0),
3171
3172	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
3173			kfd_ioctl_set_trap_handler, 0),
3174
3175	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
3176			kfd_ioctl_get_process_apertures_new, 0),
3177
3178	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
3179			kfd_ioctl_acquire_vm, 0),
3180
3181	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
3182			kfd_ioctl_alloc_memory_of_gpu, 0),
3183
3184	AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
3185			kfd_ioctl_free_memory_of_gpu, 0),
3186
3187	AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
3188			kfd_ioctl_map_memory_to_gpu, 0),
3189
3190	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
3191			kfd_ioctl_unmap_memory_from_gpu, 0),
3192
3193	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
3194			kfd_ioctl_set_cu_mask, 0),
3195
3196	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
3197			kfd_ioctl_get_queue_wave_state, 0),
3198
3199	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
3200				kfd_ioctl_get_dmabuf_info, 0),
3201
3202	AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
3203				kfd_ioctl_import_dmabuf, 0),
3204
3205	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
3206			kfd_ioctl_alloc_queue_gws, 0),
3207
3208	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
3209			kfd_ioctl_smi_events, 0),
3210
3211	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
3212
3213	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
3214			kfd_ioctl_set_xnack_mode, 0),
3215
3216	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
3217			kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
3218
3219	AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
3220			kfd_ioctl_get_available_memory, 0),
3221
3222	AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
3223				kfd_ioctl_export_dmabuf, 0),
3224
3225	AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE,
3226			kfd_ioctl_runtime_enable, 0),
3227
3228	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
3229			kfd_ioctl_set_debug_trap, 0),
3230};
3231
3232#define AMDKFD_CORE_IOCTL_COUNT	ARRAY_SIZE(amdkfd_ioctls)
3233
3234static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3235{
3236	struct kfd_process *process;
3237	amdkfd_ioctl_t *func;
3238	const struct amdkfd_ioctl_desc *ioctl = NULL;
3239	unsigned int nr = _IOC_NR(cmd);
3240	char stack_kdata[128];
3241	char *kdata = NULL;
3242	unsigned int usize, asize;
3243	int retcode = -EINVAL;
3244	bool ptrace_attached = false;
3245
3246	if (nr >= AMDKFD_CORE_IOCTL_COUNT)
3247		goto err_i1;
3248
3249	if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
3250		u32 amdkfd_size;
3251
3252		ioctl = &amdkfd_ioctls[nr];
3253
3254		amdkfd_size = _IOC_SIZE(ioctl->cmd);
3255		usize = asize = _IOC_SIZE(cmd);
3256		if (amdkfd_size > asize)
3257			asize = amdkfd_size;
3258
3259		cmd = ioctl->cmd;
3260	} else
3261		goto err_i1;
3262
3263	dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
3264
3265	/* Get the process struct from the filep. Only the process
3266	 * that opened /dev/kfd can use the file descriptor. Child
3267	 * processes need to create their own KFD device context.
3268	 */
3269	process = filep->private_data;
3270
3271	rcu_read_lock();
3272	if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
3273	    ptrace_parent(process->lead_thread) == current)
3274		ptrace_attached = true;
3275	rcu_read_unlock();
3276
3277	if (process->lead_thread != current->group_leader
3278	    && !ptrace_attached) {
3279		dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
3280		retcode = -EBADF;
3281		goto err_i1;
3282	}
3283
3284	/* Do not trust userspace, use our own definition */
3285	func = ioctl->func;
3286
3287	if (unlikely(!func)) {
3288		dev_dbg(kfd_device, "no function\n");
3289		retcode = -EINVAL;
3290		goto err_i1;
3291	}
3292
3293	/*
3294	 * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
3295	 * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
3296	 * more priviledged access.
3297	 */
3298	if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
3299		if (!capable(CAP_CHECKPOINT_RESTORE) &&
3300						!capable(CAP_SYS_ADMIN)) {
3301			retcode = -EACCES;
3302			goto err_i1;
3303		}
3304	}
3305
3306	if (cmd & (IOC_IN | IOC_OUT)) {
3307		if (asize <= sizeof(stack_kdata)) {
3308			kdata = stack_kdata;
3309		} else {
3310			kdata = kmalloc(asize, GFP_KERNEL);
3311			if (!kdata) {
3312				retcode = -ENOMEM;
3313				goto err_i1;
3314			}
3315		}
3316		if (asize > usize)
3317			memset(kdata + usize, 0, asize - usize);
3318	}
3319
3320	if (cmd & IOC_IN) {
3321		if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
3322			retcode = -EFAULT;
3323			goto err_i1;
3324		}
3325	} else if (cmd & IOC_OUT) {
3326		memset(kdata, 0, usize);
3327	}
3328
3329	retcode = func(filep, process, kdata);
3330
3331	if (cmd & IOC_OUT)
3332		if (copy_to_user((void __user *)arg, kdata, usize) != 0)
3333			retcode = -EFAULT;
3334
3335err_i1:
3336	if (!ioctl)
3337		dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
3338			  task_pid_nr(current), cmd, nr);
3339
3340	if (kdata != stack_kdata)
3341		kfree(kdata);
3342
3343	if (retcode)
3344		dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
3345				nr, arg, retcode);
3346
3347	return retcode;
3348}
3349
3350static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
3351		      struct vm_area_struct *vma)
3352{
3353	phys_addr_t address;
 
3354
3355	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3356		return -EINVAL;
3357
3358	if (PAGE_SIZE > 4096)
3359		return -EINVAL;
3360
3361	address = dev->adev->rmmio_remap.bus_addr;
3362
3363	vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
3364				VM_DONTDUMP | VM_PFNMAP);
3365
3366	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3367
3368	pr_debug("pasid 0x%x mapping mmio page\n"
3369		 "     target user address == 0x%08llX\n"
3370		 "     physical address    == 0x%08llX\n"
3371		 "     vm_flags            == 0x%04lX\n"
3372		 "     size                == 0x%04lX\n",
3373		 process->pasid, (unsigned long long) vma->vm_start,
3374		 address, vma->vm_flags, PAGE_SIZE);
3375
3376	return io_remap_pfn_range(vma,
3377				vma->vm_start,
3378				address >> PAGE_SHIFT,
3379				PAGE_SIZE,
3380				vma->vm_page_prot);
 
3381}
3382
3383
3384static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
3385{
3386	struct kfd_process *process;
3387	struct kfd_node *dev = NULL;
3388	unsigned long mmap_offset;
3389	unsigned int gpu_id;
3390
3391	process = kfd_get_process(current);
3392	if (IS_ERR(process))
3393		return PTR_ERR(process);
3394
3395	mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
3396	gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
3397	if (gpu_id)
3398		dev = kfd_device_by_id(gpu_id);
3399
3400	switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
3401	case KFD_MMAP_TYPE_DOORBELL:
3402		if (!dev)
3403			return -ENODEV;
3404		return kfd_doorbell_mmap(dev, process, vma);
3405
3406	case KFD_MMAP_TYPE_EVENTS:
3407		return kfd_event_mmap(process, vma);
3408
3409	case KFD_MMAP_TYPE_RESERVED_MEM:
3410		if (!dev)
3411			return -ENODEV;
3412		return kfd_reserved_mem_mmap(dev, process, vma);
3413	case KFD_MMAP_TYPE_MMIO:
3414		if (!dev)
3415			return -ENODEV;
3416		return kfd_mmio_mmap(dev, process, vma);
3417	}
3418
3419	return -EFAULT;
3420}