Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice shall be included in
  13 * all copies or substantial portions of the Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21 * OTHER DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/bsearch.h>
  25#include <linux/pci.h>
  26#include <linux/slab.h>
  27#include "kfd_priv.h"
  28#include "kfd_device_queue_manager.h"
  29#include "kfd_pm4_headers_vi.h"
  30#include "kfd_pm4_headers_aldebaran.h"
  31#include "cwsr_trap_handler.h"
 
  32#include "amdgpu_amdkfd.h"
  33#include "kfd_smi_events.h"
  34#include "kfd_svm.h"
  35#include "kfd_migrate.h"
  36#include "amdgpu.h"
  37#include "amdgpu_xcp.h"
  38
  39#define MQD_SIZE_ALIGNED 768
  40
  41/*
  42 * kfd_locked is used to lock the kfd driver during suspend or reset
  43 * once locked, kfd driver will stop any further GPU execution.
  44 * create process (open) will return -EAGAIN.
  45 */
  46static int kfd_locked;
  47
  48#ifdef CONFIG_DRM_AMDGPU_CIK
  49extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
  50#endif
  51extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
  52extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
  53extern const struct kfd2kgd_calls arcturus_kfd2kgd;
  54extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
  55extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
  56extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
  57extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
  58extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
  59
  60static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
  61				unsigned int chunk_size);
  62static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
  63
  64static int kfd_resume(struct kfd_node *kfd);
  65
  66static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
  67{
  68	uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0);
  69
  70	switch (sdma_version) {
  71	case IP_VERSION(4, 0, 0):/* VEGA10 */
  72	case IP_VERSION(4, 0, 1):/* VEGA12 */
  73	case IP_VERSION(4, 1, 0):/* RAVEN */
  74	case IP_VERSION(4, 1, 1):/* RAVEN */
  75	case IP_VERSION(4, 1, 2):/* RENOIR */
  76	case IP_VERSION(5, 2, 1):/* VANGOGH */
  77	case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
  78	case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
  79	case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
  80		kfd->device_info.num_sdma_queues_per_engine = 2;
  81		break;
  82	case IP_VERSION(4, 2, 0):/* VEGA20 */
  83	case IP_VERSION(4, 2, 2):/* ARCTURUS */
  84	case IP_VERSION(4, 4, 0):/* ALDEBARAN */
  85	case IP_VERSION(4, 4, 2):
  86	case IP_VERSION(5, 0, 0):/* NAVI10 */
  87	case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
  88	case IP_VERSION(5, 0, 2):/* NAVI14 */
  89	case IP_VERSION(5, 0, 5):/* NAVI12 */
  90	case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
  91	case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
  92	case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
  93	case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
  94	case IP_VERSION(6, 0, 0):
  95	case IP_VERSION(6, 0, 1):
  96	case IP_VERSION(6, 0, 2):
  97	case IP_VERSION(6, 0, 3):
  98	case IP_VERSION(6, 1, 0):
  99		kfd->device_info.num_sdma_queues_per_engine = 8;
 100		break;
 101	default:
 102		dev_warn(kfd_device,
 103			"Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
 104			sdma_version);
 105		kfd->device_info.num_sdma_queues_per_engine = 8;
 106	}
 107
 108	bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
 109
 110	switch (sdma_version) {
 111	case IP_VERSION(6, 0, 0):
 112	case IP_VERSION(6, 0, 1):
 113	case IP_VERSION(6, 0, 2):
 114	case IP_VERSION(6, 0, 3):
 115	case IP_VERSION(6, 1, 0):
 116		/* Reserve 1 for paging and 1 for gfx */
 117		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
 118		/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
 119		bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0,
 120			   kfd->adev->sdma.num_instances *
 121			   kfd->device_info.num_reserved_sdma_queues_per_engine);
 
 
 
 
 122		break;
 123	default:
 124		break;
 125	}
 126}
 127
 128static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
 129{
 130	uint32_t gc_version = KFD_GC_VERSION(kfd);
 131
 132	switch (gc_version) {
 133	case IP_VERSION(9, 0, 1): /* VEGA10 */
 134	case IP_VERSION(9, 1, 0): /* RAVEN */
 135	case IP_VERSION(9, 2, 1): /* VEGA12 */
 136	case IP_VERSION(9, 2, 2): /* RAVEN */
 137	case IP_VERSION(9, 3, 0): /* RENOIR */
 138	case IP_VERSION(9, 4, 0): /* VEGA20 */
 139	case IP_VERSION(9, 4, 1): /* ARCTURUS */
 140	case IP_VERSION(9, 4, 2): /* ALDEBARAN */
 141		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
 142		break;
 143	case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
 144		kfd->device_info.event_interrupt_class =
 145						&event_interrupt_class_v9_4_3;
 146		break;
 147	case IP_VERSION(10, 3, 1): /* VANGOGH */
 148	case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
 149	case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
 150	case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
 151	case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
 152	case IP_VERSION(10, 1, 4):
 153	case IP_VERSION(10, 1, 10): /* NAVI10 */
 154	case IP_VERSION(10, 1, 2): /* NAVI12 */
 155	case IP_VERSION(10, 1, 1): /* NAVI14 */
 156	case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
 157	case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
 158	case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
 159	case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
 160		kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
 161		break;
 162	case IP_VERSION(11, 0, 0):
 163	case IP_VERSION(11, 0, 1):
 164	case IP_VERSION(11, 0, 2):
 165	case IP_VERSION(11, 0, 3):
 166	case IP_VERSION(11, 0, 4):
 167	case IP_VERSION(11, 5, 0):
 168		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
 169		break;
 170	default:
 171		dev_warn(kfd_device, "v9 event interrupt handler is set due to "
 172			"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
 173		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
 174	}
 175}
 176
 177static void kfd_device_info_init(struct kfd_dev *kfd,
 178				 bool vf, uint32_t gfx_target_version)
 179{
 180	uint32_t gc_version = KFD_GC_VERSION(kfd);
 181	uint32_t asic_type = kfd->adev->asic_type;
 182
 183	kfd->device_info.max_pasid_bits = 16;
 184	kfd->device_info.max_no_of_hqd = 24;
 185	kfd->device_info.num_of_watch_points = 4;
 186	kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
 187	kfd->device_info.gfx_target_version = gfx_target_version;
 188
 189	if (KFD_IS_SOC15(kfd)) {
 190		kfd->device_info.doorbell_size = 8;
 191		kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
 192		kfd->device_info.supports_cwsr = true;
 193
 194		kfd_device_info_set_sdma_info(kfd);
 195
 196		kfd_device_info_set_event_interrupt_class(kfd);
 197
 
 
 
 
 
 198		if (gc_version < IP_VERSION(11, 0, 0)) {
 199			/* Navi2x+, Navi1x+ */
 200			if (gc_version == IP_VERSION(10, 3, 6))
 201				kfd->device_info.no_atomic_fw_version = 14;
 202			else if (gc_version == IP_VERSION(10, 3, 7))
 203				kfd->device_info.no_atomic_fw_version = 3;
 204			else if (gc_version >= IP_VERSION(10, 3, 0))
 205				kfd->device_info.no_atomic_fw_version = 92;
 206			else if (gc_version >= IP_VERSION(10, 1, 1))
 207				kfd->device_info.no_atomic_fw_version = 145;
 208
 209			/* Navi1x+ */
 210			if (gc_version >= IP_VERSION(10, 1, 1))
 211				kfd->device_info.needs_pci_atomics = true;
 212		} else if (gc_version < IP_VERSION(12, 0, 0)) {
 213			/*
 214			 * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
 215			 * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
 216			 * PCIe atomics support.
 217			 */
 218			kfd->device_info.needs_pci_atomics = true;
 219			kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
 220		}
 221	} else {
 222		kfd->device_info.doorbell_size = 4;
 223		kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
 224		kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
 225		kfd->device_info.num_sdma_queues_per_engine = 2;
 226
 227		if (asic_type != CHIP_KAVERI &&
 228		    asic_type != CHIP_HAWAII &&
 229		    asic_type != CHIP_TONGA)
 230			kfd->device_info.supports_cwsr = true;
 231
 
 
 
 
 232		if (asic_type != CHIP_HAWAII && !vf)
 233			kfd->device_info.needs_pci_atomics = true;
 234	}
 235}
 236
 237struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
 238{
 239	struct kfd_dev *kfd = NULL;
 240	const struct kfd2kgd_calls *f2g = NULL;
 241	uint32_t gfx_target_version = 0;
 242
 243	switch (adev->asic_type) {
 
 244#ifdef CONFIG_DRM_AMDGPU_CIK
 245	case CHIP_KAVERI:
 246		gfx_target_version = 70000;
 247		if (!vf)
 248			f2g = &gfx_v7_kfd2kgd;
 249		break;
 250#endif
 251	case CHIP_CARRIZO:
 252		gfx_target_version = 80001;
 253		if (!vf)
 254			f2g = &gfx_v8_kfd2kgd;
 255		break;
 
 256#ifdef CONFIG_DRM_AMDGPU_CIK
 257	case CHIP_HAWAII:
 258		gfx_target_version = 70001;
 259		if (!amdgpu_exp_hw_support)
 260			pr_info(
 261	"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
 262				);
 263		else if (!vf)
 264			f2g = &gfx_v7_kfd2kgd;
 265		break;
 266#endif
 267	case CHIP_TONGA:
 268		gfx_target_version = 80002;
 269		if (!vf)
 270			f2g = &gfx_v8_kfd2kgd;
 271		break;
 272	case CHIP_FIJI:
 
 
 
 273	case CHIP_POLARIS10:
 274		gfx_target_version = 80003;
 275		f2g = &gfx_v8_kfd2kgd;
 276		break;
 277	case CHIP_POLARIS11:
 
 
 
 
 278	case CHIP_POLARIS12:
 
 
 
 
 279	case CHIP_VEGAM:
 280		gfx_target_version = 80003;
 281		if (!vf)
 282			f2g = &gfx_v8_kfd2kgd;
 283		break;
 284	default:
 285		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
 286		/* Vega 10 */
 287		case IP_VERSION(9, 0, 1):
 288			gfx_target_version = 90000;
 289			f2g = &gfx_v9_kfd2kgd;
 290			break;
 
 291		/* Raven */
 292		case IP_VERSION(9, 1, 0):
 293		case IP_VERSION(9, 2, 2):
 294			gfx_target_version = 90002;
 295			if (!vf)
 296				f2g = &gfx_v9_kfd2kgd;
 297			break;
 
 298		/* Vega12 */
 299		case IP_VERSION(9, 2, 1):
 300			gfx_target_version = 90004;
 301			if (!vf)
 302				f2g = &gfx_v9_kfd2kgd;
 303			break;
 304		/* Renoir */
 305		case IP_VERSION(9, 3, 0):
 306			gfx_target_version = 90012;
 307			if (!vf)
 308				f2g = &gfx_v9_kfd2kgd;
 309			break;
 310		/* Vega20 */
 311		case IP_VERSION(9, 4, 0):
 312			gfx_target_version = 90006;
 313			if (!vf)
 314				f2g = &gfx_v9_kfd2kgd;
 315			break;
 316		/* Arcturus */
 317		case IP_VERSION(9, 4, 1):
 318			gfx_target_version = 90008;
 319			f2g = &arcturus_kfd2kgd;
 320			break;
 321		/* Aldebaran */
 322		case IP_VERSION(9, 4, 2):
 323			gfx_target_version = 90010;
 324			f2g = &aldebaran_kfd2kgd;
 325			break;
 326		case IP_VERSION(9, 4, 3):
 327			gfx_target_version = adev->rev_id >= 1 ? 90402
 328					   : adev->flags & AMD_IS_APU ? 90400
 329					   : 90401;
 330			f2g = &gc_9_4_3_kfd2kgd;
 331			break;
 332		/* Navi10 */
 333		case IP_VERSION(10, 1, 10):
 334			gfx_target_version = 100100;
 335			if (!vf)
 336				f2g = &gfx_v10_kfd2kgd;
 337			break;
 338		/* Navi12 */
 339		case IP_VERSION(10, 1, 2):
 340			gfx_target_version = 100101;
 341			f2g = &gfx_v10_kfd2kgd;
 342			break;
 343		/* Navi14 */
 344		case IP_VERSION(10, 1, 1):
 345			gfx_target_version = 100102;
 346			if (!vf)
 347				f2g = &gfx_v10_kfd2kgd;
 348			break;
 349		/* Cyan Skillfish */
 350		case IP_VERSION(10, 1, 3):
 351		case IP_VERSION(10, 1, 4):
 352			gfx_target_version = 100103;
 353			if (!vf)
 354				f2g = &gfx_v10_kfd2kgd;
 355			break;
 356		/* Sienna Cichlid */
 357		case IP_VERSION(10, 3, 0):
 358			gfx_target_version = 100300;
 359			f2g = &gfx_v10_3_kfd2kgd;
 360			break;
 361		/* Navy Flounder */
 362		case IP_VERSION(10, 3, 2):
 363			gfx_target_version = 100301;
 364			f2g = &gfx_v10_3_kfd2kgd;
 365			break;
 366		/* Van Gogh */
 367		case IP_VERSION(10, 3, 1):
 368			gfx_target_version = 100303;
 369			if (!vf)
 370				f2g = &gfx_v10_3_kfd2kgd;
 371			break;
 372		/* Dimgrey Cavefish */
 373		case IP_VERSION(10, 3, 4):
 374			gfx_target_version = 100302;
 375			f2g = &gfx_v10_3_kfd2kgd;
 376			break;
 377		/* Beige Goby */
 378		case IP_VERSION(10, 3, 5):
 379			gfx_target_version = 100304;
 380			f2g = &gfx_v10_3_kfd2kgd;
 381			break;
 382		/* Yellow Carp */
 383		case IP_VERSION(10, 3, 3):
 384			gfx_target_version = 100305;
 385			if (!vf)
 386				f2g = &gfx_v10_3_kfd2kgd;
 387			break;
 388		case IP_VERSION(10, 3, 6):
 389		case IP_VERSION(10, 3, 7):
 390			gfx_target_version = 100306;
 391			if (!vf)
 392				f2g = &gfx_v10_3_kfd2kgd;
 393			break;
 394		case IP_VERSION(11, 0, 0):
 395			gfx_target_version = 110000;
 396			f2g = &gfx_v11_kfd2kgd;
 397			break;
 398		case IP_VERSION(11, 0, 1):
 399		case IP_VERSION(11, 0, 4):
 400			gfx_target_version = 110003;
 401			f2g = &gfx_v11_kfd2kgd;
 402			break;
 403		case IP_VERSION(11, 0, 2):
 404			gfx_target_version = 110002;
 405			f2g = &gfx_v11_kfd2kgd;
 406			break;
 407		case IP_VERSION(11, 0, 3):
 408			if ((adev->pdev->device == 0x7460 &&
 409			     adev->pdev->revision == 0x00) ||
 410			    (adev->pdev->device == 0x7461 &&
 411			     adev->pdev->revision == 0x00))
 412				/* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
 413				gfx_target_version = 110005;
 414			else
 415				/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
 416				gfx_target_version = 110001;
 417			f2g = &gfx_v11_kfd2kgd;
 418			break;
 419		case IP_VERSION(11, 5, 0):
 420			gfx_target_version = 110500;
 421			f2g = &gfx_v11_kfd2kgd;
 422			break;
 423		default:
 424			break;
 425		}
 426		break;
 427	}
 428
 429	if (!f2g) {
 430		if (amdgpu_ip_version(adev, GC_HWIP, 0))
 431			dev_err(kfd_device,
 432				"GC IP %06x %s not supported in kfd\n",
 433				amdgpu_ip_version(adev, GC_HWIP, 0),
 434				vf ? "VF" : "");
 435		else
 436			dev_err(kfd_device, "%s %s not supported in kfd\n",
 437				amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
 438		return NULL;
 439	}
 440
 441	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
 442	if (!kfd)
 443		return NULL;
 444
 445	kfd->adev = adev;
 446	kfd_device_info_init(kfd, vf, gfx_target_version);
 447	kfd->init_complete = false;
 448	kfd->kfd2kgd = f2g;
 449	atomic_set(&kfd->compute_profile, 0);
 450
 451	mutex_init(&kfd->doorbell_mutex);
 
 
 
 
 452
 453	ida_init(&kfd->doorbell_ida);
 454
 455	return kfd;
 456}
 457
 458static void kfd_cwsr_init(struct kfd_dev *kfd)
 459{
 460	if (cwsr_enable && kfd->device_info.supports_cwsr) {
 461		if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
 462			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
 463			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
 464			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
 465		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
 466			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
 467			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
 468			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
 469		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
 470			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
 471			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
 472			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
 473		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) {
 474			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) > PAGE_SIZE);
 475			kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
 476			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
 477		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
 478			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
 479			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
 480			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
 481		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
 482			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
 483			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
 484			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
 485		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
 486			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
 487			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
 488			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
 489		} else {
 490			BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
 491			kfd->cwsr_isa = cwsr_trap_gfx11_hex;
 492			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
 493		}
 494
 495		kfd->cwsr_enabled = true;
 496	}
 497}
 498
 499static int kfd_gws_init(struct kfd_node *node)
 500{
 501	int ret = 0;
 502	struct kfd_dev *kfd = node->kfd;
 503	uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
 504
 505	if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
 506		return 0;
 507
 508	if (hws_gws_support || (KFD_IS_SOC15(node) &&
 509		((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
 510			&& kfd->mec2_fw_version >= 0x81b3) ||
 511		(KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
 512			&& kfd->mec2_fw_version >= 0x1b3)  ||
 513		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
 514			&& kfd->mec2_fw_version >= 0x30)   ||
 515		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
 516			&& kfd->mec2_fw_version >= 0x28) ||
 517		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) ||
 518		(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
 519			&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
 520			&& kfd->mec2_fw_version >= 0x6b) ||
 521		(KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
 522			&& KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
 523			&& mes_rev >= 68))))
 524		ret = amdgpu_amdkfd_alloc_gws(node->adev,
 525				node->adev->gds.gws_size, &node->gws);
 526
 527	return ret;
 528}
 529
 530static void kfd_smi_init(struct kfd_node *dev)
 531{
 532	INIT_LIST_HEAD(&dev->smi_clients);
 533	spin_lock_init(&dev->smi_lock);
 534}
 535
 536static int kfd_init_node(struct kfd_node *node)
 537{
 538	int err = -1;
 539
 540	if (kfd_interrupt_init(node)) {
 541		dev_err(kfd_device, "Error initializing interrupts\n");
 542		goto kfd_interrupt_error;
 543	}
 544
 545	node->dqm = device_queue_manager_init(node);
 546	if (!node->dqm) {
 547		dev_err(kfd_device, "Error initializing queue manager\n");
 548		goto device_queue_manager_error;
 549	}
 550
 551	if (kfd_gws_init(node)) {
 552		dev_err(kfd_device, "Could not allocate %d gws\n",
 553			node->adev->gds.gws_size);
 554		goto gws_error;
 555	}
 556
 557	if (kfd_resume(node))
 558		goto kfd_resume_error;
 559
 560	if (kfd_topology_add_device(node)) {
 561		dev_err(kfd_device, "Error adding device to topology\n");
 562		goto kfd_topology_add_device_error;
 563	}
 564
 565	kfd_smi_init(node);
 566
 567	return 0;
 568
 569kfd_topology_add_device_error:
 570kfd_resume_error:
 571gws_error:
 572	device_queue_manager_uninit(node->dqm);
 573device_queue_manager_error:
 574	kfd_interrupt_exit(node);
 575kfd_interrupt_error:
 576	if (node->gws)
 577		amdgpu_amdkfd_free_gws(node->adev, node->gws);
 578
 579	/* Cleanup the node memory here */
 580	kfree(node);
 581	return err;
 582}
 583
 584static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
 585{
 586	struct kfd_node *knode;
 587	unsigned int i;
 588
 589	for (i = 0; i < num_nodes; i++) {
 590		knode = kfd->nodes[i];
 591		device_queue_manager_uninit(knode->dqm);
 592		kfd_interrupt_exit(knode);
 593		kfd_topology_remove_device(knode);
 594		if (knode->gws)
 595			amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
 596		kfree(knode);
 597		kfd->nodes[i] = NULL;
 598	}
 599}
 600
 601static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
 602				       unsigned int kfd_node_idx)
 603{
 604	struct amdgpu_device *adev = node->adev;
 605	uint32_t xcc_mask = node->xcc_mask;
 606	uint32_t xcc, mapped_xcc;
 607	/*
 608	 * Interrupt bitmap is setup for processing interrupts from
 609	 * different XCDs and AIDs.
 610	 * Interrupt bitmap is defined as follows:
 611	 * 1. Bits 0-15 - correspond to the NodeId field.
 612	 *    Each bit corresponds to NodeId number. For example, if
 613	 *    a KFD node has interrupt bitmap set to 0x7, then this
 614	 *    KFD node will process interrupts with NodeId = 0, 1 and 2
 615	 *    in the IH cookie.
 616	 * 2. Bits 16-31 - unused.
 617	 *
 618	 * Please note that the kfd_node_idx argument passed to this
 619	 * function is not related to NodeId field received in the
 620	 * IH cookie.
 621	 *
 622	 * In CPX mode, a KFD node will process an interrupt if:
 623	 * - the Node Id matches the corresponding bit set in
 624	 *   Bits 0-15.
 625	 * - AND VMID reported in the interrupt lies within the
 626	 *   VMID range of the node.
 627	 */
 628	for_each_inst(xcc, xcc_mask) {
 629		mapped_xcc = GET_INST(GC, xcc);
 630		node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
 631	}
 632	dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
 633							node->interrupt_bitmap);
 634}
 635
 636bool kgd2kfd_device_init(struct kfd_dev *kfd,
 637			 const struct kgd2kfd_shared_resources *gpu_resources)
 638{
 639	unsigned int size, map_process_packet_size, i;
 640	struct kfd_node *node;
 641	uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
 642	unsigned int max_proc_per_quantum;
 643	int partition_mode;
 644	int xcp_idx;
 645
 646	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
 647			KGD_ENGINE_MEC1);
 648	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
 649			KGD_ENGINE_MEC2);
 650	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
 651			KGD_ENGINE_SDMA1);
 652	kfd->shared_resources = *gpu_resources;
 653
 654	kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
 655
 656	if (kfd->num_nodes == 0) {
 657		dev_err(kfd_device,
 658			"KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
 659			kfd->adev->gfx.num_xcc_per_xcp);
 660		goto out;
 661	}
 662
 663	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
 664	 * 32 and 64-bit requests are possible and must be
 665	 * supported.
 666	 */
 667	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
 668	if (!kfd->pci_atomic_requested &&
 669	    kfd->device_info.needs_pci_atomics &&
 670	    (!kfd->device_info.no_atomic_fw_version ||
 671	     kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
 672		dev_info(kfd_device,
 673			 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
 674			 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
 675			 kfd->mec_fw_version,
 676			 kfd->device_info.no_atomic_fw_version);
 677		return false;
 678	}
 679
 680	first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
 681	last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
 682	vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
 683
 684	/* For GFX9.4.3, we need special handling for VMIDs depending on
 685	 * partition mode.
 686	 * In CPX mode, the VMID range needs to be shared between XCDs.
 687	 * Additionally, there are 13 VMIDs (3-15) available for KFD. To
 688	 * divide them equally, we change starting VMID to 4 and not use
 689	 * VMID 3.
 690	 * If the VMID range changes for GFX9.4.3, then this code MUST be
 691	 * revisited.
 692	 */
 693	if (kfd->adev->xcp_mgr) {
 694		partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
 695								 AMDGPU_XCP_FL_LOCKED);
 696		if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
 697		    kfd->num_nodes != 1) {
 698			vmid_num_kfd /= 2;
 699			first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
 700		}
 701	}
 702
 703	/* Verify module parameters regarding mapped process number*/
 704	if (hws_max_conc_proc >= 0)
 705		max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
 706	else
 707		max_proc_per_quantum = vmid_num_kfd;
 708
 709	/* calculate max size of mqds needed for queues */
 710	size = max_num_of_queues_per_device *
 711			kfd->device_info.mqd_size_aligned;
 712
 713	/*
 714	 * calculate max size of runlist packet.
 715	 * There can be only 2 packets at once
 716	 */
 717	map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
 718				sizeof(struct pm4_mes_map_process_aldebaran) :
 719				sizeof(struct pm4_mes_map_process);
 720	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
 721		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
 722		+ sizeof(struct pm4_mes_runlist)) * 2;
 723
 724	/* Add size of HIQ & DIQ */
 725	size += KFD_KERNEL_QUEUE_SIZE * 2;
 726
 727	/* add another 512KB for all other allocations on gart (HPD, fences) */
 728	size += 512 * 1024;
 729
 730	if (amdgpu_amdkfd_alloc_gtt_mem(
 731			kfd->adev, size, &kfd->gtt_mem,
 732			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
 733			false)) {
 734		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
 735		goto alloc_gtt_mem_failure;
 736	}
 737
 738	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
 739
 740	/* Initialize GTT sa with 512 byte chunk size */
 741	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
 742		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
 743		goto kfd_gtt_sa_init_error;
 744	}
 745
 746	if (kfd_doorbell_init(kfd)) {
 747		dev_err(kfd_device,
 748			"Error initializing doorbell aperture\n");
 749		goto kfd_doorbell_error;
 750	}
 751
 752	if (amdgpu_use_xgmi_p2p)
 753		kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
 754
 755	/*
 756	 * For GFX9.4.3, the KFD abstracts all partitions within a socket as
 757	 * xGMI connected in the topology so assign a unique hive id per
 758	 * device based on the pci device location if device is in PCIe mode.
 759	 */
 760	if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1)
 761		kfd->hive_id = pci_dev_id(kfd->adev->pdev);
 762
 763	kfd->noretry = kfd->adev->gmc.noretry;
 764
 765	kfd_cwsr_init(kfd);
 
 
 
 766
 767	dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
 768				kfd->num_nodes);
 
 
 
 769
 770	/* Allocate the KFD nodes */
 771	for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
 772		node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
 773		if (!node)
 774			goto node_alloc_error;
 775
 776		node->node_id = i;
 777		node->adev = kfd->adev;
 778		node->kfd = kfd;
 779		node->kfd2kgd = kfd->kfd2kgd;
 780		node->vm_info.vmid_num_kfd = vmid_num_kfd;
 781		node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
 782		/* TODO : Check if error handling is needed */
 783		if (node->xcp) {
 784			amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
 785						    &node->xcc_mask);
 786			++xcp_idx;
 787		} else {
 788			node->xcc_mask =
 789				(1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
 790		}
 791
 792		if (node->xcp) {
 793			dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
 794				node->node_id, node->xcp->mem_id,
 795				KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
 796		}
 797
 798		if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
 799		    partition_mode == AMDGPU_CPX_PARTITION_MODE &&
 800		    kfd->num_nodes != 1) {
 801			/* For GFX9.4.3 and CPX mode, first XCD gets VMID range
 802			 * 4-9 and second XCD gets VMID range 10-15.
 803			 */
 804
 805			node->vm_info.first_vmid_kfd = (i%2 == 0) ?
 806						first_vmid_kfd :
 807						first_vmid_kfd+vmid_num_kfd;
 808			node->vm_info.last_vmid_kfd = (i%2 == 0) ?
 809						last_vmid_kfd-vmid_num_kfd :
 810						last_vmid_kfd;
 811			node->compute_vmid_bitmap =
 812				((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
 813				((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
 814		} else {
 815			node->vm_info.first_vmid_kfd = first_vmid_kfd;
 816			node->vm_info.last_vmid_kfd = last_vmid_kfd;
 817			node->compute_vmid_bitmap =
 818				gpu_resources->compute_vmid_bitmap;
 819		}
 820		node->max_proc_per_quantum = max_proc_per_quantum;
 821		atomic_set(&node->sram_ecc_flag, 0);
 822
 823		amdgpu_amdkfd_get_local_mem_info(kfd->adev,
 824					&node->local_mem_info, node->xcp);
 825
 826		if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3))
 827			kfd_setup_interrupt_bitmap(node, i);
 828
 829		/* Initialize the KFD node */
 830		if (kfd_init_node(node)) {
 831			dev_err(kfd_device, "Error initializing KFD node\n");
 832			goto node_init_error;
 833		}
 834		kfd->nodes[i] = node;
 835	}
 836
 837	svm_range_set_max_pages(kfd->adev);
 
 838
 839	spin_lock_init(&kfd->watch_points_lock);
 
 
 
 
 
 
 
 840
 841	kfd->init_complete = true;
 842	dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
 843		 kfd->adev->pdev->device);
 844
 845	pr_debug("Starting kfd with the following scheduling policy %d\n",
 846		node->dqm->sched_policy);
 847
 848	goto out;
 849
 850node_init_error:
 851node_alloc_error:
 852	kfd_cleanup_nodes(kfd, i);
 
 
 
 
 
 853	kfd_doorbell_fini(kfd);
 854kfd_doorbell_error:
 855	kfd_gtt_sa_fini(kfd);
 856kfd_gtt_sa_init_error:
 857	amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
 858alloc_gtt_mem_failure:
 
 
 859	dev_err(kfd_device,
 860		"device %x:%x NOT added due to errors\n",
 861		kfd->adev->pdev->vendor, kfd->adev->pdev->device);
 862out:
 863	return kfd->init_complete;
 864}
 865
 866void kgd2kfd_device_exit(struct kfd_dev *kfd)
 867{
 868	if (kfd->init_complete) {
 869		/* Cleanup KFD nodes */
 870		kfd_cleanup_nodes(kfd, kfd->num_nodes);
 871		/* Cleanup common/shared resources */
 872		kfd_doorbell_fini(kfd);
 873		ida_destroy(&kfd->doorbell_ida);
 874		kfd_gtt_sa_fini(kfd);
 875		amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
 
 
 876	}
 877
 878	kfree(kfd);
 879}
 880
 881int kgd2kfd_pre_reset(struct kfd_dev *kfd)
 882{
 883	struct kfd_node *node;
 884	int i;
 885
 886	if (!kfd->init_complete)
 887		return 0;
 888
 889	for (i = 0; i < kfd->num_nodes; i++) {
 890		node = kfd->nodes[i];
 891		kfd_smi_event_update_gpu_reset(node, false);
 892		node->dqm->ops.pre_reset(node->dqm);
 893	}
 894
 895	kgd2kfd_suspend(kfd, false);
 896
 897	for (i = 0; i < kfd->num_nodes; i++)
 898		kfd_signal_reset_event(kfd->nodes[i]);
 899
 
 900	return 0;
 901}
 902
 903/*
 904 * Fix me. KFD won't be able to resume existing process for now.
 905 * We will keep all existing process in a evicted state and
 906 * wait the process to be terminated.
 907 */
 908
 909int kgd2kfd_post_reset(struct kfd_dev *kfd)
 910{
 911	int ret;
 912	struct kfd_node *node;
 913	int i;
 914
 915	if (!kfd->init_complete)
 916		return 0;
 917
 918	for (i = 0; i < kfd->num_nodes; i++) {
 919		ret = kfd_resume(kfd->nodes[i]);
 920		if (ret)
 921			return ret;
 922	}
 923
 924	mutex_lock(&kfd_processes_mutex);
 925	--kfd_locked;
 926	mutex_unlock(&kfd_processes_mutex);
 927
 928	for (i = 0; i < kfd->num_nodes; i++) {
 929		node = kfd->nodes[i];
 930		atomic_set(&node->sram_ecc_flag, 0);
 931		kfd_smi_event_update_gpu_reset(node, true);
 932	}
 933
 934	return 0;
 935}
 936
 937bool kfd_is_locked(void)
 938{
 939	lockdep_assert_held(&kfd_processes_mutex);
 940	return  (kfd_locked > 0);
 941}
 942
 943void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 944{
 945	struct kfd_node *node;
 946	int i;
 947	int count;
 948
 949	if (!kfd->init_complete)
 950		return;
 951
 952	/* for runtime suspend, skip locking kfd */
 953	if (!run_pm) {
 954		mutex_lock(&kfd_processes_mutex);
 955		count = ++kfd_locked;
 956		mutex_unlock(&kfd_processes_mutex);
 957
 958		/* For first KFD device suspend all the KFD processes */
 959		if (count == 1)
 960			kfd_suspend_all_processes();
 961	}
 962
 963	for (i = 0; i < kfd->num_nodes; i++) {
 964		node = kfd->nodes[i];
 965		node->dqm->ops.stop(node->dqm);
 966	}
 967}
 968
 969int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 970{
 971	int ret, count, i;
 972
 973	if (!kfd->init_complete)
 974		return 0;
 975
 976	for (i = 0; i < kfd->num_nodes; i++) {
 977		ret = kfd_resume(kfd->nodes[i]);
 978		if (ret)
 979			return ret;
 980	}
 981
 982	/* for runtime resume, skip unlocking kfd */
 983	if (!run_pm) {
 984		mutex_lock(&kfd_processes_mutex);
 985		count = --kfd_locked;
 986		mutex_unlock(&kfd_processes_mutex);
 987
 988		WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
 989		if (count == 0)
 990			ret = kfd_resume_all_processes();
 991	}
 992
 993	return ret;
 994}
 995
 996static int kfd_resume(struct kfd_node *node)
 
 
 
 
 
 
 
 
 
 
 
 
 997{
 998	int err = 0;
 999
1000	err = node->dqm->ops.start(node->dqm);
1001	if (err)
1002		dev_err(kfd_device,
1003			"Error starting queue manager for device %x:%x\n",
1004			node->adev->pdev->vendor, node->adev->pdev->device);
1005
1006	return err;
1007}
1008
1009static inline void kfd_queue_work(struct workqueue_struct *wq,
1010				  struct work_struct *work)
1011{
1012	int cpu, new_cpu;
1013
1014	cpu = new_cpu = smp_processor_id();
1015	do {
1016		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
1017		if (cpu_to_node(new_cpu) == numa_node_id())
1018			break;
1019	} while (cpu != new_cpu);
1020
1021	queue_work_on(new_cpu, wq, work);
1022}
1023
1024/* This is called directly from KGD at ISR. */
1025void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1026{
1027	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
1028	bool is_patched = false;
1029	unsigned long flags;
1030	struct kfd_node *node;
1031
1032	if (!kfd->init_complete)
1033		return;
1034
1035	if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
1036		dev_err_once(kfd_device, "Ring entry too small\n");
1037		return;
1038	}
1039
1040	for (i = 0; i < kfd->num_nodes; i++) {
1041		node = kfd->nodes[i];
1042		spin_lock_irqsave(&node->interrupt_lock, flags);
1043
1044		if (node->interrupts_active
1045		    && interrupt_is_wanted(node, ih_ring_entry,
1046			    	patched_ihre, &is_patched)
1047		    && enqueue_ih_ring_entry(node,
1048			    	is_patched ? patched_ihre : ih_ring_entry)) {
1049			kfd_queue_work(node->ih_wq, &node->interrupt_work);
1050			spin_unlock_irqrestore(&node->interrupt_lock, flags);
1051			return;
1052		}
1053		spin_unlock_irqrestore(&node->interrupt_lock, flags);
1054	}
1055
 
 
 
 
 
 
 
 
1056}
1057
1058int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
1059{
1060	struct kfd_process *p;
1061	int r;
1062
1063	/* Because we are called from arbitrary context (workqueue) as opposed
1064	 * to process context, kfd_process could attempt to exit while we are
1065	 * running so the lookup function increments the process ref count.
1066	 */
1067	p = kfd_lookup_process_by_mm(mm);
1068	if (!p)
1069		return -ESRCH;
1070
1071	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1072	r = kfd_process_evict_queues(p, trigger);
1073
1074	kfd_unref_process(p);
1075	return r;
1076}
1077
1078int kgd2kfd_resume_mm(struct mm_struct *mm)
1079{
1080	struct kfd_process *p;
1081	int r;
1082
1083	/* Because we are called from arbitrary context (workqueue) as opposed
1084	 * to process context, kfd_process could attempt to exit while we are
1085	 * running so the lookup function increments the process ref count.
1086	 */
1087	p = kfd_lookup_process_by_mm(mm);
1088	if (!p)
1089		return -ESRCH;
1090
1091	r = kfd_process_restore_queues(p);
1092
1093	kfd_unref_process(p);
1094	return r;
1095}
1096
1097/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1098 *   prepare for safe eviction of KFD BOs that belong to the specified
1099 *   process.
1100 *
1101 * @mm: mm_struct that identifies the specified KFD process
1102 * @fence: eviction fence attached to KFD process BOs
1103 *
1104 */
1105int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1106					       struct dma_fence *fence)
1107{
1108	struct kfd_process *p;
1109	unsigned long active_time;
1110	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1111
1112	if (!fence)
1113		return -EINVAL;
1114
1115	if (dma_fence_is_signaled(fence))
1116		return 0;
1117
1118	p = kfd_lookup_process_by_mm(mm);
1119	if (!p)
1120		return -ENODEV;
1121
1122	if (fence->seqno == p->last_eviction_seqno)
1123		goto out;
1124
1125	p->last_eviction_seqno = fence->seqno;
1126
1127	/* Avoid KFD process starvation. Wait for at least
1128	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1129	 */
1130	active_time = get_jiffies_64() - p->last_restore_timestamp;
1131	if (delay_jiffies > active_time)
1132		delay_jiffies -= active_time;
1133	else
1134		delay_jiffies = 0;
1135
1136	/* During process initialization eviction_work.dwork is initialized
1137	 * to kfd_evict_bo_worker
1138	 */
1139	WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1140	     p->lead_thread->pid, delay_jiffies);
1141	schedule_delayed_work(&p->eviction_work, delay_jiffies);
1142out:
1143	kfd_unref_process(p);
1144	return 0;
1145}
1146
1147static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1148				unsigned int chunk_size)
1149{
1150	if (WARN_ON(buf_size < chunk_size))
1151		return -EINVAL;
1152	if (WARN_ON(buf_size == 0))
1153		return -EINVAL;
1154	if (WARN_ON(chunk_size == 0))
1155		return -EINVAL;
1156
1157	kfd->gtt_sa_chunk_size = chunk_size;
1158	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1159
1160	kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
1161					   GFP_KERNEL);
1162	if (!kfd->gtt_sa_bitmap)
1163		return -ENOMEM;
1164
1165	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1166			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1167
1168	mutex_init(&kfd->gtt_sa_lock);
1169
1170	return 0;
1171}
1172
1173static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1174{
1175	mutex_destroy(&kfd->gtt_sa_lock);
1176	bitmap_free(kfd->gtt_sa_bitmap);
1177}
1178
1179static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1180						unsigned int bit_num,
1181						unsigned int chunk_size)
1182{
1183	return start_addr + bit_num * chunk_size;
1184}
1185
1186static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1187						unsigned int bit_num,
1188						unsigned int chunk_size)
1189{
1190	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1191}
1192
1193int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1194			struct kfd_mem_obj **mem_obj)
1195{
1196	unsigned int found, start_search, cur_size;
1197	struct kfd_dev *kfd = node->kfd;
1198
1199	if (size == 0)
1200		return -EINVAL;
1201
1202	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1203		return -ENOMEM;
1204
1205	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1206	if (!(*mem_obj))
1207		return -ENOMEM;
1208
1209	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1210
1211	start_search = 0;
1212
1213	mutex_lock(&kfd->gtt_sa_lock);
1214
1215kfd_gtt_restart_search:
1216	/* Find the first chunk that is free */
1217	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1218					kfd->gtt_sa_num_of_chunks,
1219					start_search);
1220
1221	pr_debug("Found = %d\n", found);
1222
1223	/* If there wasn't any free chunk, bail out */
1224	if (found == kfd->gtt_sa_num_of_chunks)
1225		goto kfd_gtt_no_free_chunk;
1226
1227	/* Update fields of mem_obj */
1228	(*mem_obj)->range_start = found;
1229	(*mem_obj)->range_end = found;
1230	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1231					kfd->gtt_start_gpu_addr,
1232					found,
1233					kfd->gtt_sa_chunk_size);
1234	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1235					kfd->gtt_start_cpu_ptr,
1236					found,
1237					kfd->gtt_sa_chunk_size);
1238
1239	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1240			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1241
1242	/* If we need only one chunk, mark it as allocated and get out */
1243	if (size <= kfd->gtt_sa_chunk_size) {
1244		pr_debug("Single bit\n");
1245		__set_bit(found, kfd->gtt_sa_bitmap);
1246		goto kfd_gtt_out;
1247	}
1248
1249	/* Otherwise, try to see if we have enough contiguous chunks */
1250	cur_size = size - kfd->gtt_sa_chunk_size;
1251	do {
1252		(*mem_obj)->range_end =
1253			find_next_zero_bit(kfd->gtt_sa_bitmap,
1254					kfd->gtt_sa_num_of_chunks, ++found);
1255		/*
1256		 * If next free chunk is not contiguous than we need to
1257		 * restart our search from the last free chunk we found (which
1258		 * wasn't contiguous to the previous ones
1259		 */
1260		if ((*mem_obj)->range_end != found) {
1261			start_search = found;
1262			goto kfd_gtt_restart_search;
1263		}
1264
1265		/*
1266		 * If we reached end of buffer, bail out with error
1267		 */
1268		if (found == kfd->gtt_sa_num_of_chunks)
1269			goto kfd_gtt_no_free_chunk;
1270
1271		/* Check if we don't need another chunk */
1272		if (cur_size <= kfd->gtt_sa_chunk_size)
1273			cur_size = 0;
1274		else
1275			cur_size -= kfd->gtt_sa_chunk_size;
1276
1277	} while (cur_size > 0);
1278
1279	pr_debug("range_start = %d, range_end = %d\n",
1280		(*mem_obj)->range_start, (*mem_obj)->range_end);
1281
1282	/* Mark the chunks as allocated */
1283	bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1284		   (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1285
1286kfd_gtt_out:
1287	mutex_unlock(&kfd->gtt_sa_lock);
1288	return 0;
1289
1290kfd_gtt_no_free_chunk:
1291	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1292	mutex_unlock(&kfd->gtt_sa_lock);
1293	kfree(*mem_obj);
1294	return -ENOMEM;
1295}
1296
1297int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
1298{
1299	struct kfd_dev *kfd = node->kfd;
1300
1301	/* Act like kfree when trying to free a NULL object */
1302	if (!mem_obj)
1303		return 0;
1304
1305	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1306			mem_obj, mem_obj->range_start, mem_obj->range_end);
1307
1308	mutex_lock(&kfd->gtt_sa_lock);
1309
1310	/* Mark the chunks as free */
1311	bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1312		     mem_obj->range_end - mem_obj->range_start + 1);
1313
1314	mutex_unlock(&kfd->gtt_sa_lock);
1315
1316	kfree(mem_obj);
1317	return 0;
1318}
1319
1320void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1321{
1322	/*
1323	 * TODO: Currently update SRAM ECC flag for first node.
1324	 * This needs to be updated later when we can
1325	 * identify SRAM ECC error on other nodes also.
1326	 */
1327	if (kfd)
1328		atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
1329}
1330
1331void kfd_inc_compute_active(struct kfd_node *node)
1332{
1333	if (atomic_inc_return(&node->kfd->compute_profile) == 1)
1334		amdgpu_amdkfd_set_compute_idle(node->adev, false);
1335}
1336
1337void kfd_dec_compute_active(struct kfd_node *node)
1338{
1339	int count = atomic_dec_return(&node->kfd->compute_profile);
1340
1341	if (count == 0)
1342		amdgpu_amdkfd_set_compute_idle(node->adev, true);
1343	WARN_ONCE(count < 0, "Compute profile ref. count error");
1344}
1345
1346void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1347{
1348	/*
1349	 * TODO: For now, raise the throttling event only on first node.
1350	 * This will need to change after we are able to determine
1351	 * which node raised the throttling event.
1352	 */
1353	if (kfd && kfd->init_complete)
1354		kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
1355							throttle_bitmask);
1356}
1357
1358/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1359 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1360 * When the device has more than two engines, we reserve two for PCIe to enable
1361 * full-duplex and the rest are used as XGMI.
1362 */
1363unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
1364{
1365	/* If XGMI is not supported, all SDMA engines are PCIe */
1366	if (!node->adev->gmc.xgmi.supported)
1367		return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
1368
1369	return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
1370}
1371
1372unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
1373{
1374	/* After reserved for PCIe, the rest of engines are XGMI */
1375	return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
1376		kfd_get_num_sdma_engines(node);
1377}
1378
1379int kgd2kfd_check_and_lock_kfd(void)
1380{
1381	mutex_lock(&kfd_processes_mutex);
1382	if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
1383		mutex_unlock(&kfd_processes_mutex);
1384		return -EBUSY;
1385	}
1386
1387	++kfd_locked;
1388	mutex_unlock(&kfd_processes_mutex);
1389
1390	return 0;
1391}
1392
1393void kgd2kfd_unlock_kfd(void)
1394{
1395	mutex_lock(&kfd_processes_mutex);
1396	--kfd_locked;
1397	mutex_unlock(&kfd_processes_mutex);
1398}
1399
1400#if defined(CONFIG_DEBUG_FS)
1401
1402/* This function will send a package to HIQ to hang the HWS
1403 * which will trigger a GPU reset and bring the HWS back to normal state
1404 */
1405int kfd_debugfs_hang_hws(struct kfd_node *dev)
1406{
1407	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1408		pr_err("HWS is not enabled");
1409		return -EINVAL;
1410	}
1411
1412	return dqm_debugfs_hang_hws(dev->dqm);
1413}
1414
1415#endif
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice shall be included in
  13 * all copies or substantial portions of the Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21 * OTHER DEALINGS IN THE SOFTWARE.
  22 */
  23
  24#include <linux/bsearch.h>
  25#include <linux/pci.h>
  26#include <linux/slab.h>
  27#include "kfd_priv.h"
  28#include "kfd_device_queue_manager.h"
  29#include "kfd_pm4_headers_vi.h"
  30#include "kfd_pm4_headers_aldebaran.h"
  31#include "cwsr_trap_handler.h"
  32#include "kfd_iommu.h"
  33#include "amdgpu_amdkfd.h"
  34#include "kfd_smi_events.h"
 
  35#include "kfd_migrate.h"
  36#include "amdgpu.h"
 
  37
  38#define MQD_SIZE_ALIGNED 768
  39
  40/*
  41 * kfd_locked is used to lock the kfd driver during suspend or reset
  42 * once locked, kfd driver will stop any further GPU execution.
  43 * create process (open) will return -EAGAIN.
  44 */
  45static atomic_t kfd_locked = ATOMIC_INIT(0);
  46
  47#ifdef CONFIG_DRM_AMDGPU_CIK
  48extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
  49#endif
  50extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
  51extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
  52extern const struct kfd2kgd_calls arcturus_kfd2kgd;
  53extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
 
  54extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
  55extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
  56extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
  57
  58static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
  59				unsigned int chunk_size);
  60static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
  61
  62static int kfd_resume(struct kfd_dev *kfd);
  63
  64static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
  65{
  66	uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
  67
  68	switch (sdma_version) {
  69	case IP_VERSION(4, 0, 0):/* VEGA10 */
  70	case IP_VERSION(4, 0, 1):/* VEGA12 */
  71	case IP_VERSION(4, 1, 0):/* RAVEN */
  72	case IP_VERSION(4, 1, 1):/* RAVEN */
  73	case IP_VERSION(4, 1, 2):/* RENOIR */
  74	case IP_VERSION(5, 2, 1):/* VANGOGH */
  75	case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
  76	case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
  77	case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
  78		kfd->device_info.num_sdma_queues_per_engine = 2;
  79		break;
  80	case IP_VERSION(4, 2, 0):/* VEGA20 */
  81	case IP_VERSION(4, 2, 2):/* ARCTURUS */
  82	case IP_VERSION(4, 4, 0):/* ALDEBARAN */
 
  83	case IP_VERSION(5, 0, 0):/* NAVI10 */
  84	case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
  85	case IP_VERSION(5, 0, 2):/* NAVI14 */
  86	case IP_VERSION(5, 0, 5):/* NAVI12 */
  87	case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
  88	case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
  89	case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
  90	case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
  91	case IP_VERSION(6, 0, 0):
  92	case IP_VERSION(6, 0, 1):
  93	case IP_VERSION(6, 0, 2):
  94	case IP_VERSION(6, 0, 3):
 
  95		kfd->device_info.num_sdma_queues_per_engine = 8;
  96		break;
  97	default:
  98		dev_warn(kfd_device,
  99			"Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
 100			sdma_version);
 101		kfd->device_info.num_sdma_queues_per_engine = 8;
 102	}
 103
 
 
 104	switch (sdma_version) {
 105	case IP_VERSION(6, 0, 0):
 
 106	case IP_VERSION(6, 0, 2):
 107	case IP_VERSION(6, 0, 3):
 
 108		/* Reserve 1 for paging and 1 for gfx */
 109		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
 110		/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
 111		kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
 112		break;
 113	case IP_VERSION(6, 0, 1):
 114		/* Reserve 1 for paging and 1 for gfx */
 115		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
 116		/* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
 117		kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
 118		break;
 119	default:
 120		break;
 121	}
 122}
 123
 124static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
 125{
 126	uint32_t gc_version = KFD_GC_VERSION(kfd);
 127
 128	switch (gc_version) {
 129	case IP_VERSION(9, 0, 1): /* VEGA10 */
 130	case IP_VERSION(9, 1, 0): /* RAVEN */
 131	case IP_VERSION(9, 2, 1): /* VEGA12 */
 132	case IP_VERSION(9, 2, 2): /* RAVEN */
 133	case IP_VERSION(9, 3, 0): /* RENOIR */
 134	case IP_VERSION(9, 4, 0): /* VEGA20 */
 135	case IP_VERSION(9, 4, 1): /* ARCTURUS */
 136	case IP_VERSION(9, 4, 2): /* ALDEBARAN */
 
 
 
 
 
 
 137	case IP_VERSION(10, 3, 1): /* VANGOGH */
 138	case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
 139	case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
 140	case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
 141	case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
 142	case IP_VERSION(10, 1, 4):
 143	case IP_VERSION(10, 1, 10): /* NAVI10 */
 144	case IP_VERSION(10, 1, 2): /* NAVI12 */
 145	case IP_VERSION(10, 1, 1): /* NAVI14 */
 146	case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
 147	case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
 148	case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
 149	case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
 150		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
 151		break;
 152	case IP_VERSION(11, 0, 0):
 153	case IP_VERSION(11, 0, 1):
 154	case IP_VERSION(11, 0, 2):
 155	case IP_VERSION(11, 0, 3):
 156	case IP_VERSION(11, 0, 4):
 
 157		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
 158		break;
 159	default:
 160		dev_warn(kfd_device, "v9 event interrupt handler is set due to "
 161			"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
 162		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
 163	}
 164}
 165
 166static void kfd_device_info_init(struct kfd_dev *kfd,
 167				 bool vf, uint32_t gfx_target_version)
 168{
 169	uint32_t gc_version = KFD_GC_VERSION(kfd);
 170	uint32_t asic_type = kfd->adev->asic_type;
 171
 172	kfd->device_info.max_pasid_bits = 16;
 173	kfd->device_info.max_no_of_hqd = 24;
 174	kfd->device_info.num_of_watch_points = 4;
 175	kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
 176	kfd->device_info.gfx_target_version = gfx_target_version;
 177
 178	if (KFD_IS_SOC15(kfd)) {
 179		kfd->device_info.doorbell_size = 8;
 180		kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
 181		kfd->device_info.supports_cwsr = true;
 182
 183		kfd_device_info_set_sdma_info(kfd);
 184
 185		kfd_device_info_set_event_interrupt_class(kfd);
 186
 187		/* Raven */
 188		if (gc_version == IP_VERSION(9, 1, 0) ||
 189		    gc_version == IP_VERSION(9, 2, 2))
 190			kfd->device_info.needs_iommu_device = true;
 191
 192		if (gc_version < IP_VERSION(11, 0, 0)) {
 193			/* Navi2x+, Navi1x+ */
 194			if (gc_version == IP_VERSION(10, 3, 6))
 195				kfd->device_info.no_atomic_fw_version = 14;
 196			else if (gc_version == IP_VERSION(10, 3, 7))
 197				kfd->device_info.no_atomic_fw_version = 3;
 198			else if (gc_version >= IP_VERSION(10, 3, 0))
 199				kfd->device_info.no_atomic_fw_version = 92;
 200			else if (gc_version >= IP_VERSION(10, 1, 1))
 201				kfd->device_info.no_atomic_fw_version = 145;
 202
 203			/* Navi1x+ */
 204			if (gc_version >= IP_VERSION(10, 1, 1))
 205				kfd->device_info.needs_pci_atomics = true;
 
 
 
 
 
 
 
 
 206		}
 207	} else {
 208		kfd->device_info.doorbell_size = 4;
 209		kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
 210		kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
 211		kfd->device_info.num_sdma_queues_per_engine = 2;
 212
 213		if (asic_type != CHIP_KAVERI &&
 214		    asic_type != CHIP_HAWAII &&
 215		    asic_type != CHIP_TONGA)
 216			kfd->device_info.supports_cwsr = true;
 217
 218		if (asic_type == CHIP_KAVERI ||
 219		    asic_type == CHIP_CARRIZO)
 220			kfd->device_info.needs_iommu_device = true;
 221
 222		if (asic_type != CHIP_HAWAII && !vf)
 223			kfd->device_info.needs_pci_atomics = true;
 224	}
 225}
 226
 227struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
 228{
 229	struct kfd_dev *kfd = NULL;
 230	const struct kfd2kgd_calls *f2g = NULL;
 231	uint32_t gfx_target_version = 0;
 232
 233	switch (adev->asic_type) {
 234#ifdef KFD_SUPPORT_IOMMU_V2
 235#ifdef CONFIG_DRM_AMDGPU_CIK
 236	case CHIP_KAVERI:
 237		gfx_target_version = 70000;
 238		if (!vf)
 239			f2g = &gfx_v7_kfd2kgd;
 240		break;
 241#endif
 242	case CHIP_CARRIZO:
 243		gfx_target_version = 80001;
 244		if (!vf)
 245			f2g = &gfx_v8_kfd2kgd;
 246		break;
 247#endif
 248#ifdef CONFIG_DRM_AMDGPU_CIK
 249	case CHIP_HAWAII:
 250		gfx_target_version = 70001;
 251		if (!amdgpu_exp_hw_support)
 252			pr_info(
 253	"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
 254				);
 255		else if (!vf)
 256			f2g = &gfx_v7_kfd2kgd;
 257		break;
 258#endif
 259	case CHIP_TONGA:
 260		gfx_target_version = 80002;
 261		if (!vf)
 262			f2g = &gfx_v8_kfd2kgd;
 263		break;
 264	case CHIP_FIJI:
 265		gfx_target_version = 80003;
 266		f2g = &gfx_v8_kfd2kgd;
 267		break;
 268	case CHIP_POLARIS10:
 269		gfx_target_version = 80003;
 270		f2g = &gfx_v8_kfd2kgd;
 271		break;
 272	case CHIP_POLARIS11:
 273		gfx_target_version = 80003;
 274		if (!vf)
 275			f2g = &gfx_v8_kfd2kgd;
 276		break;
 277	case CHIP_POLARIS12:
 278		gfx_target_version = 80003;
 279		if (!vf)
 280			f2g = &gfx_v8_kfd2kgd;
 281		break;
 282	case CHIP_VEGAM:
 283		gfx_target_version = 80003;
 284		if (!vf)
 285			f2g = &gfx_v8_kfd2kgd;
 286		break;
 287	default:
 288		switch (adev->ip_versions[GC_HWIP][0]) {
 289		/* Vega 10 */
 290		case IP_VERSION(9, 0, 1):
 291			gfx_target_version = 90000;
 292			f2g = &gfx_v9_kfd2kgd;
 293			break;
 294#ifdef KFD_SUPPORT_IOMMU_V2
 295		/* Raven */
 296		case IP_VERSION(9, 1, 0):
 297		case IP_VERSION(9, 2, 2):
 298			gfx_target_version = 90002;
 299			if (!vf)
 300				f2g = &gfx_v9_kfd2kgd;
 301			break;
 302#endif
 303		/* Vega12 */
 304		case IP_VERSION(9, 2, 1):
 305			gfx_target_version = 90004;
 306			if (!vf)
 307				f2g = &gfx_v9_kfd2kgd;
 308			break;
 309		/* Renoir */
 310		case IP_VERSION(9, 3, 0):
 311			gfx_target_version = 90012;
 312			if (!vf)
 313				f2g = &gfx_v9_kfd2kgd;
 314			break;
 315		/* Vega20 */
 316		case IP_VERSION(9, 4, 0):
 317			gfx_target_version = 90006;
 318			if (!vf)
 319				f2g = &gfx_v9_kfd2kgd;
 320			break;
 321		/* Arcturus */
 322		case IP_VERSION(9, 4, 1):
 323			gfx_target_version = 90008;
 324			f2g = &arcturus_kfd2kgd;
 325			break;
 326		/* Aldebaran */
 327		case IP_VERSION(9, 4, 2):
 328			gfx_target_version = 90010;
 329			f2g = &aldebaran_kfd2kgd;
 330			break;
 
 
 
 
 
 
 331		/* Navi10 */
 332		case IP_VERSION(10, 1, 10):
 333			gfx_target_version = 100100;
 334			if (!vf)
 335				f2g = &gfx_v10_kfd2kgd;
 336			break;
 337		/* Navi12 */
 338		case IP_VERSION(10, 1, 2):
 339			gfx_target_version = 100101;
 340			f2g = &gfx_v10_kfd2kgd;
 341			break;
 342		/* Navi14 */
 343		case IP_VERSION(10, 1, 1):
 344			gfx_target_version = 100102;
 345			if (!vf)
 346				f2g = &gfx_v10_kfd2kgd;
 347			break;
 348		/* Cyan Skillfish */
 349		case IP_VERSION(10, 1, 3):
 350		case IP_VERSION(10, 1, 4):
 351			gfx_target_version = 100103;
 352			if (!vf)
 353				f2g = &gfx_v10_kfd2kgd;
 354			break;
 355		/* Sienna Cichlid */
 356		case IP_VERSION(10, 3, 0):
 357			gfx_target_version = 100300;
 358			f2g = &gfx_v10_3_kfd2kgd;
 359			break;
 360		/* Navy Flounder */
 361		case IP_VERSION(10, 3, 2):
 362			gfx_target_version = 100301;
 363			f2g = &gfx_v10_3_kfd2kgd;
 364			break;
 365		/* Van Gogh */
 366		case IP_VERSION(10, 3, 1):
 367			gfx_target_version = 100303;
 368			if (!vf)
 369				f2g = &gfx_v10_3_kfd2kgd;
 370			break;
 371		/* Dimgrey Cavefish */
 372		case IP_VERSION(10, 3, 4):
 373			gfx_target_version = 100302;
 374			f2g = &gfx_v10_3_kfd2kgd;
 375			break;
 376		/* Beige Goby */
 377		case IP_VERSION(10, 3, 5):
 378			gfx_target_version = 100304;
 379			f2g = &gfx_v10_3_kfd2kgd;
 380			break;
 381		/* Yellow Carp */
 382		case IP_VERSION(10, 3, 3):
 383			gfx_target_version = 100305;
 384			if (!vf)
 385				f2g = &gfx_v10_3_kfd2kgd;
 386			break;
 387		case IP_VERSION(10, 3, 6):
 388		case IP_VERSION(10, 3, 7):
 389			gfx_target_version = 100306;
 390			if (!vf)
 391				f2g = &gfx_v10_3_kfd2kgd;
 392			break;
 393		case IP_VERSION(11, 0, 0):
 394			gfx_target_version = 110000;
 395			f2g = &gfx_v11_kfd2kgd;
 396			break;
 397		case IP_VERSION(11, 0, 1):
 398		case IP_VERSION(11, 0, 4):
 399			gfx_target_version = 110003;
 400			f2g = &gfx_v11_kfd2kgd;
 401			break;
 402		case IP_VERSION(11, 0, 2):
 403			gfx_target_version = 110002;
 404			f2g = &gfx_v11_kfd2kgd;
 405			break;
 406		case IP_VERSION(11, 0, 3):
 407			/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
 408			gfx_target_version = 110001;
 
 
 
 
 
 
 
 
 
 
 
 409			f2g = &gfx_v11_kfd2kgd;
 410			break;
 411		default:
 412			break;
 413		}
 414		break;
 415	}
 416
 417	if (!f2g) {
 418		if (adev->ip_versions[GC_HWIP][0])
 419			dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
 420				adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
 
 
 421		else
 422			dev_err(kfd_device, "%s %s not supported in kfd\n",
 423				amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
 424		return NULL;
 425	}
 426
 427	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
 428	if (!kfd)
 429		return NULL;
 430
 431	kfd->adev = adev;
 432	kfd_device_info_init(kfd, vf, gfx_target_version);
 433	kfd->init_complete = false;
 434	kfd->kfd2kgd = f2g;
 435	atomic_set(&kfd->compute_profile, 0);
 436
 437	mutex_init(&kfd->doorbell_mutex);
 438	memset(&kfd->doorbell_available_index, 0,
 439		sizeof(kfd->doorbell_available_index));
 440
 441	atomic_set(&kfd->sram_ecc_flag, 0);
 442
 443	ida_init(&kfd->doorbell_ida);
 444
 445	return kfd;
 446}
 447
 448static void kfd_cwsr_init(struct kfd_dev *kfd)
 449{
 450	if (cwsr_enable && kfd->device_info.supports_cwsr) {
 451		if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
 452			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
 453			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
 454			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
 455		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
 456			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
 457			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
 458			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
 459		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
 460			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
 461			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
 462			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
 
 
 
 
 463		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
 464			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
 465			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
 466			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
 467		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
 468			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
 469			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
 470			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
 471		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
 472			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
 473			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
 474			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
 475		} else {
 476			BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
 477			kfd->cwsr_isa = cwsr_trap_gfx11_hex;
 478			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
 479		}
 480
 481		kfd->cwsr_enabled = true;
 482	}
 483}
 484
 485static int kfd_gws_init(struct kfd_dev *kfd)
 486{
 487	int ret = 0;
 
 
 488
 489	if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
 490		return 0;
 491
 492	if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
 493		((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
 494			&& kfd->mec2_fw_version >= 0x81b3) ||
 495		(KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
 496			&& kfd->mec2_fw_version >= 0x1b3)  ||
 497		(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
 498			&& kfd->mec2_fw_version >= 0x30)   ||
 499		(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
 500			&& kfd->mec2_fw_version >= 0x28) ||
 501		(KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0)
 502			&& KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)
 503			&& kfd->mec2_fw_version >= 0x6b))))
 504		ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
 505				kfd->adev->gds.gws_size, &kfd->gws);
 
 
 
 
 506
 507	return ret;
 508}
 509
 510static void kfd_smi_init(struct kfd_dev *dev)
 511{
 512	INIT_LIST_HEAD(&dev->smi_clients);
 513	spin_lock_init(&dev->smi_lock);
 514}
 515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 516bool kgd2kfd_device_init(struct kfd_dev *kfd,
 517			 const struct kgd2kfd_shared_resources *gpu_resources)
 518{
 519	unsigned int size, map_process_packet_size;
 
 
 
 
 
 520
 521	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
 522			KGD_ENGINE_MEC1);
 523	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
 524			KGD_ENGINE_MEC2);
 525	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
 526			KGD_ENGINE_SDMA1);
 527	kfd->shared_resources = *gpu_resources;
 528
 529	kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
 530	kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
 531	kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
 532			- kfd->vm_info.first_vmid_kfd + 1;
 
 
 
 
 533
 534	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
 535	 * 32 and 64-bit requests are possible and must be
 536	 * supported.
 537	 */
 538	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
 539	if (!kfd->pci_atomic_requested &&
 540	    kfd->device_info.needs_pci_atomics &&
 541	    (!kfd->device_info.no_atomic_fw_version ||
 542	     kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
 543		dev_info(kfd_device,
 544			 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
 545			 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
 546			 kfd->mec_fw_version,
 547			 kfd->device_info.no_atomic_fw_version);
 548		return false;
 549	}
 550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551	/* Verify module parameters regarding mapped process number*/
 552	if (hws_max_conc_proc >= 0)
 553		kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
 554	else
 555		kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
 556
 557	/* calculate max size of mqds needed for queues */
 558	size = max_num_of_queues_per_device *
 559			kfd->device_info.mqd_size_aligned;
 560
 561	/*
 562	 * calculate max size of runlist packet.
 563	 * There can be only 2 packets at once
 564	 */
 565	map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
 566				sizeof(struct pm4_mes_map_process_aldebaran) :
 567				sizeof(struct pm4_mes_map_process);
 568	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
 569		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
 570		+ sizeof(struct pm4_mes_runlist)) * 2;
 571
 572	/* Add size of HIQ & DIQ */
 573	size += KFD_KERNEL_QUEUE_SIZE * 2;
 574
 575	/* add another 512KB for all other allocations on gart (HPD, fences) */
 576	size += 512 * 1024;
 577
 578	if (amdgpu_amdkfd_alloc_gtt_mem(
 579			kfd->adev, size, &kfd->gtt_mem,
 580			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
 581			false)) {
 582		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
 583		goto alloc_gtt_mem_failure;
 584	}
 585
 586	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
 587
 588	/* Initialize GTT sa with 512 byte chunk size */
 589	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
 590		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
 591		goto kfd_gtt_sa_init_error;
 592	}
 593
 594	if (kfd_doorbell_init(kfd)) {
 595		dev_err(kfd_device,
 596			"Error initializing doorbell aperture\n");
 597		goto kfd_doorbell_error;
 598	}
 599
 600	if (amdgpu_use_xgmi_p2p)
 601		kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
 602
 
 
 
 
 
 
 
 
 603	kfd->noretry = kfd->adev->gmc.noretry;
 604
 605	if (kfd_interrupt_init(kfd)) {
 606		dev_err(kfd_device, "Error initializing interrupts\n");
 607		goto kfd_interrupt_error;
 608	}
 609
 610	kfd->dqm = device_queue_manager_init(kfd);
 611	if (!kfd->dqm) {
 612		dev_err(kfd_device, "Error initializing queue manager\n");
 613		goto device_queue_manager_error;
 614	}
 615
 616	/* If supported on this device, allocate global GWS that is shared
 617	 * by all KFD processes
 618	 */
 619	if (kfd_gws_init(kfd)) {
 620		dev_err(kfd_device, "Could not allocate %d gws\n",
 621			kfd->adev->gds.gws_size);
 622		goto gws_error;
 623	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 624
 625	/* If CRAT is broken, won't set iommu enabled */
 626	kfd_double_confirm_iommu_support(kfd);
 
 
 
 627
 628	if (kfd_iommu_device_init(kfd)) {
 629		kfd->use_iommu_v2 = false;
 630		dev_err(kfd_device, "Error initializing iommuv2\n");
 631		goto device_iommu_error;
 632	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634	kfd_cwsr_init(kfd);
 
 635
 636	svm_migrate_init(kfd->adev);
 
 637
 638	if (kgd2kfd_resume_iommu(kfd))
 639		goto device_iommu_error;
 
 
 
 
 
 640
 641	if (kfd_resume(kfd))
 642		goto kfd_resume_error;
 643
 644	amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info);
 645
 646	if (kfd_topology_add_device(kfd)) {
 647		dev_err(kfd_device, "Error adding device to topology\n");
 648		goto kfd_topology_add_device_error;
 649	}
 650
 651	kfd_smi_init(kfd);
 652
 653	kfd->init_complete = true;
 654	dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
 655		 kfd->adev->pdev->device);
 656
 657	pr_debug("Starting kfd with the following scheduling policy %d\n",
 658		kfd->dqm->sched_policy);
 659
 660	goto out;
 661
 662kfd_topology_add_device_error:
 663kfd_resume_error:
 664device_iommu_error:
 665gws_error:
 666	device_queue_manager_uninit(kfd->dqm);
 667device_queue_manager_error:
 668	kfd_interrupt_exit(kfd);
 669kfd_interrupt_error:
 670	kfd_doorbell_fini(kfd);
 671kfd_doorbell_error:
 672	kfd_gtt_sa_fini(kfd);
 673kfd_gtt_sa_init_error:
 674	amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
 675alloc_gtt_mem_failure:
 676	if (kfd->gws)
 677		amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
 678	dev_err(kfd_device,
 679		"device %x:%x NOT added due to errors\n",
 680		kfd->adev->pdev->vendor, kfd->adev->pdev->device);
 681out:
 682	return kfd->init_complete;
 683}
 684
 685void kgd2kfd_device_exit(struct kfd_dev *kfd)
 686{
 687	if (kfd->init_complete) {
 688		device_queue_manager_uninit(kfd->dqm);
 689		kfd_interrupt_exit(kfd);
 690		kfd_topology_remove_device(kfd);
 691		kfd_doorbell_fini(kfd);
 692		ida_destroy(&kfd->doorbell_ida);
 693		kfd_gtt_sa_fini(kfd);
 694		amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
 695		if (kfd->gws)
 696			amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
 697	}
 698
 699	kfree(kfd);
 700}
 701
 702int kgd2kfd_pre_reset(struct kfd_dev *kfd)
 703{
 
 
 
 704	if (!kfd->init_complete)
 705		return 0;
 706
 707	kfd_smi_event_update_gpu_reset(kfd, false);
 
 
 
 
 708
 709	kfd->dqm->ops.pre_reset(kfd->dqm);
 710
 711	kgd2kfd_suspend(kfd, false);
 
 712
 713	kfd_signal_reset_event(kfd);
 714	return 0;
 715}
 716
 717/*
 718 * Fix me. KFD won't be able to resume existing process for now.
 719 * We will keep all existing process in a evicted state and
 720 * wait the process to be terminated.
 721 */
 722
 723int kgd2kfd_post_reset(struct kfd_dev *kfd)
 724{
 725	int ret;
 
 
 726
 727	if (!kfd->init_complete)
 728		return 0;
 729
 730	ret = kfd_resume(kfd);
 731	if (ret)
 732		return ret;
 733	atomic_dec(&kfd_locked);
 
 734
 735	atomic_set(&kfd->sram_ecc_flag, 0);
 736
 737	kfd_smi_event_update_gpu_reset(kfd, true);
 
 
 
 
 
 
 738
 739	return 0;
 740}
 741
 742bool kfd_is_locked(void)
 743{
 744	return  (atomic_read(&kfd_locked) > 0);
 
 745}
 746
 747void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 748{
 
 
 
 
 749	if (!kfd->init_complete)
 750		return;
 751
 752	/* for runtime suspend, skip locking kfd */
 753	if (!run_pm) {
 
 
 
 
 754		/* For first KFD device suspend all the KFD processes */
 755		if (atomic_inc_return(&kfd_locked) == 1)
 756			kfd_suspend_all_processes();
 757	}
 758
 759	kfd->dqm->ops.stop(kfd->dqm);
 760	kfd_iommu_suspend(kfd);
 
 
 761}
 762
 763int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 764{
 765	int ret, count;
 766
 767	if (!kfd->init_complete)
 768		return 0;
 769
 770	ret = kfd_resume(kfd);
 771	if (ret)
 772		return ret;
 
 
 773
 774	/* for runtime resume, skip unlocking kfd */
 775	if (!run_pm) {
 776		count = atomic_dec_return(&kfd_locked);
 
 
 
 777		WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
 778		if (count == 0)
 779			ret = kfd_resume_all_processes();
 780	}
 781
 782	return ret;
 783}
 784
 785int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
 786{
 787	int err = 0;
 788
 789	err = kfd_iommu_resume(kfd);
 790	if (err)
 791		dev_err(kfd_device,
 792			"Failed to resume IOMMU for device %x:%x\n",
 793			kfd->adev->pdev->vendor, kfd->adev->pdev->device);
 794	return err;
 795}
 796
 797static int kfd_resume(struct kfd_dev *kfd)
 798{
 799	int err = 0;
 800
 801	err = kfd->dqm->ops.start(kfd->dqm);
 802	if (err)
 803		dev_err(kfd_device,
 804			"Error starting queue manager for device %x:%x\n",
 805			kfd->adev->pdev->vendor, kfd->adev->pdev->device);
 806
 807	return err;
 808}
 809
 810static inline void kfd_queue_work(struct workqueue_struct *wq,
 811				  struct work_struct *work)
 812{
 813	int cpu, new_cpu;
 814
 815	cpu = new_cpu = smp_processor_id();
 816	do {
 817		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
 818		if (cpu_to_node(new_cpu) == numa_node_id())
 819			break;
 820	} while (cpu != new_cpu);
 821
 822	queue_work_on(new_cpu, wq, work);
 823}
 824
 825/* This is called directly from KGD at ISR. */
 826void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 827{
 828	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
 829	bool is_patched = false;
 830	unsigned long flags;
 
 831
 832	if (!kfd->init_complete)
 833		return;
 834
 835	if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
 836		dev_err_once(kfd_device, "Ring entry too small\n");
 837		return;
 838	}
 839
 840	spin_lock_irqsave(&kfd->interrupt_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 841
 842	if (kfd->interrupts_active
 843	    && interrupt_is_wanted(kfd, ih_ring_entry,
 844				   patched_ihre, &is_patched)
 845	    && enqueue_ih_ring_entry(kfd,
 846				     is_patched ? patched_ihre : ih_ring_entry))
 847		kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
 848
 849	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 850}
 851
 852int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
 853{
 854	struct kfd_process *p;
 855	int r;
 856
 857	/* Because we are called from arbitrary context (workqueue) as opposed
 858	 * to process context, kfd_process could attempt to exit while we are
 859	 * running so the lookup function increments the process ref count.
 860	 */
 861	p = kfd_lookup_process_by_mm(mm);
 862	if (!p)
 863		return -ESRCH;
 864
 865	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
 866	r = kfd_process_evict_queues(p, trigger);
 867
 868	kfd_unref_process(p);
 869	return r;
 870}
 871
 872int kgd2kfd_resume_mm(struct mm_struct *mm)
 873{
 874	struct kfd_process *p;
 875	int r;
 876
 877	/* Because we are called from arbitrary context (workqueue) as opposed
 878	 * to process context, kfd_process could attempt to exit while we are
 879	 * running so the lookup function increments the process ref count.
 880	 */
 881	p = kfd_lookup_process_by_mm(mm);
 882	if (!p)
 883		return -ESRCH;
 884
 885	r = kfd_process_restore_queues(p);
 886
 887	kfd_unref_process(p);
 888	return r;
 889}
 890
 891/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
 892 *   prepare for safe eviction of KFD BOs that belong to the specified
 893 *   process.
 894 *
 895 * @mm: mm_struct that identifies the specified KFD process
 896 * @fence: eviction fence attached to KFD process BOs
 897 *
 898 */
 899int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
 900					       struct dma_fence *fence)
 901{
 902	struct kfd_process *p;
 903	unsigned long active_time;
 904	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
 905
 906	if (!fence)
 907		return -EINVAL;
 908
 909	if (dma_fence_is_signaled(fence))
 910		return 0;
 911
 912	p = kfd_lookup_process_by_mm(mm);
 913	if (!p)
 914		return -ENODEV;
 915
 916	if (fence->seqno == p->last_eviction_seqno)
 917		goto out;
 918
 919	p->last_eviction_seqno = fence->seqno;
 920
 921	/* Avoid KFD process starvation. Wait for at least
 922	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
 923	 */
 924	active_time = get_jiffies_64() - p->last_restore_timestamp;
 925	if (delay_jiffies > active_time)
 926		delay_jiffies -= active_time;
 927	else
 928		delay_jiffies = 0;
 929
 930	/* During process initialization eviction_work.dwork is initialized
 931	 * to kfd_evict_bo_worker
 932	 */
 933	WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
 934	     p->lead_thread->pid, delay_jiffies);
 935	schedule_delayed_work(&p->eviction_work, delay_jiffies);
 936out:
 937	kfd_unref_process(p);
 938	return 0;
 939}
 940
 941static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
 942				unsigned int chunk_size)
 943{
 944	if (WARN_ON(buf_size < chunk_size))
 945		return -EINVAL;
 946	if (WARN_ON(buf_size == 0))
 947		return -EINVAL;
 948	if (WARN_ON(chunk_size == 0))
 949		return -EINVAL;
 950
 951	kfd->gtt_sa_chunk_size = chunk_size;
 952	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
 953
 954	kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
 955					   GFP_KERNEL);
 956	if (!kfd->gtt_sa_bitmap)
 957		return -ENOMEM;
 958
 959	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
 960			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
 961
 962	mutex_init(&kfd->gtt_sa_lock);
 963
 964	return 0;
 965}
 966
 967static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
 968{
 969	mutex_destroy(&kfd->gtt_sa_lock);
 970	bitmap_free(kfd->gtt_sa_bitmap);
 971}
 972
 973static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
 974						unsigned int bit_num,
 975						unsigned int chunk_size)
 976{
 977	return start_addr + bit_num * chunk_size;
 978}
 979
 980static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
 981						unsigned int bit_num,
 982						unsigned int chunk_size)
 983{
 984	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
 985}
 986
 987int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
 988			struct kfd_mem_obj **mem_obj)
 989{
 990	unsigned int found, start_search, cur_size;
 
 991
 992	if (size == 0)
 993		return -EINVAL;
 994
 995	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
 996		return -ENOMEM;
 997
 998	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
 999	if (!(*mem_obj))
1000		return -ENOMEM;
1001
1002	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1003
1004	start_search = 0;
1005
1006	mutex_lock(&kfd->gtt_sa_lock);
1007
1008kfd_gtt_restart_search:
1009	/* Find the first chunk that is free */
1010	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1011					kfd->gtt_sa_num_of_chunks,
1012					start_search);
1013
1014	pr_debug("Found = %d\n", found);
1015
1016	/* If there wasn't any free chunk, bail out */
1017	if (found == kfd->gtt_sa_num_of_chunks)
1018		goto kfd_gtt_no_free_chunk;
1019
1020	/* Update fields of mem_obj */
1021	(*mem_obj)->range_start = found;
1022	(*mem_obj)->range_end = found;
1023	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1024					kfd->gtt_start_gpu_addr,
1025					found,
1026					kfd->gtt_sa_chunk_size);
1027	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1028					kfd->gtt_start_cpu_ptr,
1029					found,
1030					kfd->gtt_sa_chunk_size);
1031
1032	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1033			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1034
1035	/* If we need only one chunk, mark it as allocated and get out */
1036	if (size <= kfd->gtt_sa_chunk_size) {
1037		pr_debug("Single bit\n");
1038		__set_bit(found, kfd->gtt_sa_bitmap);
1039		goto kfd_gtt_out;
1040	}
1041
1042	/* Otherwise, try to see if we have enough contiguous chunks */
1043	cur_size = size - kfd->gtt_sa_chunk_size;
1044	do {
1045		(*mem_obj)->range_end =
1046			find_next_zero_bit(kfd->gtt_sa_bitmap,
1047					kfd->gtt_sa_num_of_chunks, ++found);
1048		/*
1049		 * If next free chunk is not contiguous than we need to
1050		 * restart our search from the last free chunk we found (which
1051		 * wasn't contiguous to the previous ones
1052		 */
1053		if ((*mem_obj)->range_end != found) {
1054			start_search = found;
1055			goto kfd_gtt_restart_search;
1056		}
1057
1058		/*
1059		 * If we reached end of buffer, bail out with error
1060		 */
1061		if (found == kfd->gtt_sa_num_of_chunks)
1062			goto kfd_gtt_no_free_chunk;
1063
1064		/* Check if we don't need another chunk */
1065		if (cur_size <= kfd->gtt_sa_chunk_size)
1066			cur_size = 0;
1067		else
1068			cur_size -= kfd->gtt_sa_chunk_size;
1069
1070	} while (cur_size > 0);
1071
1072	pr_debug("range_start = %d, range_end = %d\n",
1073		(*mem_obj)->range_start, (*mem_obj)->range_end);
1074
1075	/* Mark the chunks as allocated */
1076	bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1077		   (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1078
1079kfd_gtt_out:
1080	mutex_unlock(&kfd->gtt_sa_lock);
1081	return 0;
1082
1083kfd_gtt_no_free_chunk:
1084	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1085	mutex_unlock(&kfd->gtt_sa_lock);
1086	kfree(*mem_obj);
1087	return -ENOMEM;
1088}
1089
1090int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1091{
 
 
1092	/* Act like kfree when trying to free a NULL object */
1093	if (!mem_obj)
1094		return 0;
1095
1096	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1097			mem_obj, mem_obj->range_start, mem_obj->range_end);
1098
1099	mutex_lock(&kfd->gtt_sa_lock);
1100
1101	/* Mark the chunks as free */
1102	bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1103		     mem_obj->range_end - mem_obj->range_start + 1);
1104
1105	mutex_unlock(&kfd->gtt_sa_lock);
1106
1107	kfree(mem_obj);
1108	return 0;
1109}
1110
1111void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1112{
 
 
 
 
 
1113	if (kfd)
1114		atomic_inc(&kfd->sram_ecc_flag);
1115}
1116
1117void kfd_inc_compute_active(struct kfd_dev *kfd)
1118{
1119	if (atomic_inc_return(&kfd->compute_profile) == 1)
1120		amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
1121}
1122
1123void kfd_dec_compute_active(struct kfd_dev *kfd)
1124{
1125	int count = atomic_dec_return(&kfd->compute_profile);
1126
1127	if (count == 0)
1128		amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
1129	WARN_ONCE(count < 0, "Compute profile ref. count error");
1130}
1131
1132void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1133{
 
 
 
 
 
1134	if (kfd && kfd->init_complete)
1135		kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
 
1136}
1137
1138/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1139 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1140 * When the device has more than two engines, we reserve two for PCIe to enable
1141 * full-duplex and the rest are used as XGMI.
1142 */
1143unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
1144{
1145	/* If XGMI is not supported, all SDMA engines are PCIe */
1146	if (!kdev->adev->gmc.xgmi.supported)
1147		return kdev->adev->sdma.num_instances;
1148
1149	return min(kdev->adev->sdma.num_instances, 2);
1150}
1151
1152unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
1153{
1154	/* After reserved for PCIe, the rest of engines are XGMI */
1155	return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156}
1157
1158#if defined(CONFIG_DEBUG_FS)
1159
1160/* This function will send a package to HIQ to hang the HWS
1161 * which will trigger a GPU reset and bring the HWS back to normal state
1162 */
1163int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1164{
1165	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1166		pr_err("HWS is not enabled");
1167		return -EINVAL;
1168	}
1169
1170	return dqm_debugfs_hang_hws(dev->dqm);
1171}
1172
1173#endif