Linux Audio

Check our new training course

Loading...
v5.9
 
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/ratelimit.h>
  25#include <linux/printk.h>
  26#include <linux/slab.h>
  27#include <linux/list.h>
  28#include <linux/types.h>
  29#include <linux/bitops.h>
  30#include <linux/sched.h>
  31#include "kfd_priv.h"
  32#include "kfd_device_queue_manager.h"
  33#include "kfd_mqd_manager.h"
  34#include "cik_regs.h"
  35#include "kfd_kernel_queue.h"
  36#include "amdgpu_amdkfd.h"
 
  37
  38/* Size of the per-pipe EOP queue */
  39#define CIK_HPD_EOP_BYTES_LOG2 11
  40#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
  41
  42static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
  43					unsigned int pasid, unsigned int vmid);
  44
  45static int execute_queues_cpsch(struct device_queue_manager *dqm,
  46				enum kfd_unmap_queues_filter filter,
  47				uint32_t filter_param);
  48static int unmap_queues_cpsch(struct device_queue_manager *dqm,
  49				enum kfd_unmap_queues_filter filter,
  50				uint32_t filter_param);
  51
  52static int map_queues_cpsch(struct device_queue_manager *dqm);
  53
  54static void deallocate_sdma_queue(struct device_queue_manager *dqm,
  55				struct queue *q);
  56
  57static inline void deallocate_hqd(struct device_queue_manager *dqm,
  58				struct queue *q);
  59static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
  60static int allocate_sdma_queue(struct device_queue_manager *dqm,
  61				struct queue *q);
  62static void kfd_process_hw_exception(struct work_struct *work);
  63
  64static inline
  65enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
  66{
  67	if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
  68		return KFD_MQD_TYPE_SDMA;
  69	return KFD_MQD_TYPE_CP;
  70}
  71
  72static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
  73{
  74	int i;
  75	int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
  76		+ pipe * dqm->dev->shared_resources.num_queue_per_pipe;
  77
  78	/* queue is available for KFD usage if bit is 1 */
  79	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
  80		if (test_bit(pipe_offset + i,
  81			      dqm->dev->shared_resources.cp_queue_bitmap))
  82			return true;
  83	return false;
  84}
  85
  86unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
  87{
  88	return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
  89				KGD_MAX_QUEUES);
  90}
  91
  92unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
  93{
  94	return dqm->dev->shared_resources.num_queue_per_pipe;
  95}
  96
  97unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
  98{
  99	return dqm->dev->shared_resources.num_pipe_per_mec;
 100}
 101
 102static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
 103{
 104	return dqm->dev->device_info->num_sdma_engines;
 105}
 106
 107static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
 108{
 109	return dqm->dev->device_info->num_xgmi_sdma_engines;
 110}
 111
 112static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
 113{
 114	return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
 
 115}
 116
 117unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
 118{
 119	return dqm->dev->device_info->num_sdma_engines
 120			* dqm->dev->device_info->num_sdma_queues_per_engine;
 121}
 122
 123unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
 124{
 125	return dqm->dev->device_info->num_xgmi_sdma_engines
 126			* dqm->dev->device_info->num_sdma_queues_per_engine;
 
 
 
 
 
 127}
 128
 129void program_sh_mem_settings(struct device_queue_manager *dqm,
 130					struct qcm_process_device *qpd)
 131{
 132	return dqm->dev->kfd2kgd->program_sh_mem_settings(
 133						dqm->dev->kgd, qpd->vmid,
 134						qpd->sh_mem_config,
 135						qpd->sh_mem_ape1_base,
 136						qpd->sh_mem_ape1_limit,
 137						qpd->sh_mem_bases);
 138}
 139
 140static void increment_queue_count(struct device_queue_manager *dqm,
 141			enum kfd_queue_type type)
 142{
 143	dqm->active_queue_count++;
 144	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
 145		dqm->active_cp_queue_count++;
 
 
 
 
 
 
 
 
 146}
 147
 148static void decrement_queue_count(struct device_queue_manager *dqm,
 149			enum kfd_queue_type type)
 150{
 151	dqm->active_queue_count--;
 152	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
 153		dqm->active_cp_queue_count--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 154}
 155
 156int read_sdma_queue_counter(uint64_t q_rptr, uint64_t *val)
 
 157{
 158	int ret;
 159	uint64_t tmp = 0;
 
 
 
 
 
 
 160
 161	if (!val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162		return -EINVAL;
 163	/*
 164	 * SDMA activity counter is stored at queue's RPTR + 0x8 location.
 165	 */
 166	if (!access_ok((const void __user *)(q_rptr +
 167					sizeof(uint64_t)), sizeof(uint64_t))) {
 168		pr_err("Can't access sdma queue activity counter\n");
 169		return -EFAULT;
 170	}
 
 171
 172	ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t)));
 173	if (!ret) {
 174		*val = tmp;
 175	}
 176
 177	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178}
 179
 180static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
 
 
 
 
 
 
 181{
 182	struct kfd_dev *dev = qpd->dqm->dev;
 183
 184	if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
 185		/* On pre-SOC15 chips we need to use the queue ID to
 186		 * preserve the user mode ABI.
 187		 */
 
 
 
 
 188		q->doorbell_id = q->properties.queue_id;
 189	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 190			q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
 191		/* For SDMA queues on SOC15 with 8-byte doorbell, use static
 192		 * doorbell assignments based on the engine and queue id.
 193		 * The doobell index distance between RLC (2*i) and (2*i+1)
 194		 * for a SDMA engine is 512.
 195		 */
 196		uint32_t *idx_offset =
 197				dev->shared_resources.sdma_doorbell_idx;
 198
 199		q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
 200			+ (q->properties.sdma_queue_id & 1)
 201			* KFD_QUEUE_DOORBELL_MIRROR_OFFSET
 202			+ (q->properties.sdma_queue_id >> 1);
 
 
 
 
 
 203	} else {
 204		/* For CP queues on SOC15 reserve a free doorbell ID */
 205		unsigned int found;
 
 
 
 
 
 
 
 
 206
 207		found = find_first_zero_bit(qpd->doorbell_bitmap,
 208					    KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
 209		if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
 210			pr_debug("No doorbells available");
 211			return -EBUSY;
 
 
 
 212		}
 213		set_bit(found, qpd->doorbell_bitmap);
 214		q->doorbell_id = found;
 215	}
 216
 217	q->properties.doorbell_off =
 218		kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
 219					  q->doorbell_id);
 220
 221	return 0;
 222}
 223
 224static void deallocate_doorbell(struct qcm_process_device *qpd,
 225				struct queue *q)
 226{
 227	unsigned int old;
 228	struct kfd_dev *dev = qpd->dqm->dev;
 229
 230	if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
 231	    q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 232	    q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 233		return;
 234
 235	old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
 236	WARN_ON(!old);
 237}
 238
 
 
 
 
 
 
 
 
 
 239static int allocate_vmid(struct device_queue_manager *dqm,
 240			struct qcm_process_device *qpd,
 241			struct queue *q)
 242{
 243	int allocated_vmid = -1, i;
 244
 245	for (i = dqm->dev->vm_info.first_vmid_kfd;
 246			i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
 247		if (!dqm->vmid_pasid[i]) {
 248			allocated_vmid = i;
 249			break;
 250		}
 251	}
 252
 253	if (allocated_vmid < 0) {
 254		pr_err("no more vmid to allocate\n");
 255		return -ENOSPC;
 256	}
 257
 258	pr_debug("vmid allocated: %d\n", allocated_vmid);
 259
 260	dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
 261
 262	set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
 263
 264	qpd->vmid = allocated_vmid;
 265	q->properties.vmid = allocated_vmid;
 266
 267	program_sh_mem_settings(dqm, qpd);
 268
 
 
 
 269	/* qpd->page_table_base is set earlier when register_process()
 270	 * is called, i.e. when the first queue is created.
 271	 */
 272	dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
 273			qpd->vmid,
 274			qpd->page_table_base);
 275	/* invalidate the VM context after pasid and vmid mapping is set up */
 276	kfd_flush_tlb(qpd_to_pdd(qpd));
 277
 278	if (dqm->dev->kfd2kgd->set_scratch_backing_va)
 279		dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
 280				qpd->sh_hidden_private_base, qpd->vmid);
 281
 282	return 0;
 283}
 284
 285static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
 286				struct qcm_process_device *qpd)
 287{
 288	const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
 289	int ret;
 290
 291	if (!qpd->ib_kaddr)
 292		return -ENOMEM;
 293
 294	ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
 295	if (ret)
 296		return ret;
 297
 298	return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
 299				qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
 300				pmf->release_mem_size / sizeof(uint32_t));
 301}
 302
 303static void deallocate_vmid(struct device_queue_manager *dqm,
 304				struct qcm_process_device *qpd,
 305				struct queue *q)
 306{
 307	/* On GFX v7, CP doesn't flush TC at dequeue */
 308	if (q->device->device_info->asic_family == CHIP_HAWAII)
 309		if (flush_texture_cache_nocpsch(q->device, qpd))
 310			pr_err("Failed to flush TC\n");
 311
 312	kfd_flush_tlb(qpd_to_pdd(qpd));
 313
 314	/* Release the vmid mapping */
 315	set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
 316	dqm->vmid_pasid[qpd->vmid] = 0;
 317
 318	qpd->vmid = 0;
 319	q->properties.vmid = 0;
 320}
 321
 322static int create_queue_nocpsch(struct device_queue_manager *dqm,
 323				struct queue *q,
 324				struct qcm_process_device *qpd)
 
 
 325{
 326	struct mqd_manager *mqd_mgr;
 327	int retval;
 328
 329	dqm_lock(dqm);
 330
 331	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
 332		pr_warn("Can't create new usermode queue because %d queues were already created\n",
 333				dqm->total_queue_count);
 334		retval = -EPERM;
 335		goto out_unlock;
 336	}
 337
 338	if (list_empty(&qpd->queues_list)) {
 339		retval = allocate_vmid(dqm, qpd, q);
 340		if (retval)
 341			goto out_unlock;
 342	}
 343	q->properties.vmid = qpd->vmid;
 344	/*
 345	 * Eviction state logic: mark all queues as evicted, even ones
 346	 * not currently active. Restoring inactive queues later only
 347	 * updates the is_evicted flag but is a no-op otherwise.
 348	 */
 349	q->properties.is_evicted = !!qpd->evicted;
 350
 351	q->properties.tba_addr = qpd->tba_addr;
 352	q->properties.tma_addr = qpd->tma_addr;
 353
 354	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 355			q->properties.type)];
 356	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
 357		retval = allocate_hqd(dqm, q);
 358		if (retval)
 359			goto deallocate_vmid;
 360		pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
 361			q->pipe, q->queue);
 362	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 363		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
 364		retval = allocate_sdma_queue(dqm, q);
 365		if (retval)
 366			goto deallocate_vmid;
 367		dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
 368	}
 369
 370	retval = allocate_doorbell(qpd, q);
 371	if (retval)
 372		goto out_deallocate_hqd;
 373
 374	/* Temporarily release dqm lock to avoid a circular lock dependency */
 375	dqm_unlock(dqm);
 376	q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
 377	dqm_lock(dqm);
 378
 379	if (!q->mqd_mem_obj) {
 380		retval = -ENOMEM;
 381		goto out_deallocate_doorbell;
 382	}
 383	mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
 384				&q->gart_mqd_addr, &q->properties);
 
 
 
 
 
 
 
 385	if (q->properties.is_active) {
 386		if (!dqm->sched_running) {
 387			WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
 388			goto add_queue_to_list;
 389		}
 390
 391		if (WARN(q->process->mm != current->mm,
 392					"should only run in user thread"))
 393			retval = -EFAULT;
 394		else
 395			retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
 396					q->queue, &q->properties, current->mm);
 397		if (retval)
 398			goto out_free_mqd;
 399	}
 400
 401add_queue_to_list:
 402	list_add(&q->list, &qpd->queues_list);
 403	qpd->queue_count++;
 404	if (q->properties.is_active)
 405		increment_queue_count(dqm, q->properties.type);
 406
 407	/*
 408	 * Unconditionally increment this counter, regardless of the queue's
 409	 * type or whether the queue is active.
 410	 */
 411	dqm->total_queue_count++;
 412	pr_debug("Total of %d queues are accountable so far\n",
 413			dqm->total_queue_count);
 414	goto out_unlock;
 415
 416out_free_mqd:
 417	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 418out_deallocate_doorbell:
 419	deallocate_doorbell(qpd, q);
 420out_deallocate_hqd:
 421	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
 422		deallocate_hqd(dqm, q);
 423	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 424		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 425		deallocate_sdma_queue(dqm, q);
 426deallocate_vmid:
 427	if (list_empty(&qpd->queues_list))
 428		deallocate_vmid(dqm, qpd, q);
 429out_unlock:
 430	dqm_unlock(dqm);
 431	return retval;
 432}
 433
 434static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
 435{
 436	bool set;
 437	int pipe, bit, i;
 438
 439	set = false;
 440
 441	for (pipe = dqm->next_pipe_to_allocate, i = 0;
 442			i < get_pipes_per_mec(dqm);
 443			pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
 444
 445		if (!is_pipe_enabled(dqm, 0, pipe))
 446			continue;
 447
 448		if (dqm->allocated_queues[pipe] != 0) {
 449			bit = ffs(dqm->allocated_queues[pipe]) - 1;
 450			dqm->allocated_queues[pipe] &= ~(1 << bit);
 451			q->pipe = pipe;
 452			q->queue = bit;
 453			set = true;
 454			break;
 455		}
 456	}
 457
 458	if (!set)
 459		return -EBUSY;
 460
 461	pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
 462	/* horizontal hqd allocation */
 463	dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
 464
 465	return 0;
 466}
 467
 468static inline void deallocate_hqd(struct device_queue_manager *dqm,
 469				struct queue *q)
 470{
 471	dqm->allocated_queues[q->pipe] |= (1 << q->queue);
 472}
 473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
 475 * to avoid asynchronized access
 476 */
 477static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 478				struct qcm_process_device *qpd,
 479				struct queue *q)
 480{
 481	int retval;
 482	struct mqd_manager *mqd_mgr;
 483
 484	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 485			q->properties.type)];
 486
 487	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
 488		deallocate_hqd(dqm, q);
 489	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 490		deallocate_sdma_queue(dqm, q);
 491	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 492		deallocate_sdma_queue(dqm, q);
 493	else {
 494		pr_debug("q->properties.type %d is invalid\n",
 495				q->properties.type);
 496		return -EINVAL;
 497	}
 498	dqm->total_queue_count--;
 499
 500	deallocate_doorbell(qpd, q);
 501
 502	if (!dqm->sched_running) {
 503		WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
 504		return 0;
 505	}
 506
 507	retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
 508				KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
 509				KFD_UNMAP_LATENCY_MS,
 510				q->pipe, q->queue);
 511	if (retval == -ETIME)
 512		qpd->reset_wavefronts = true;
 513
 514
 515	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 516
 517	list_del(&q->list);
 518	if (list_empty(&qpd->queues_list)) {
 519		if (qpd->reset_wavefronts) {
 520			pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
 521					dqm->dev);
 522			/* dbgdev_wave_reset_wavefronts has to be called before
 523			 * deallocate_vmid(), i.e. when vmid is still in use.
 524			 */
 525			dbgdev_wave_reset_wavefronts(dqm->dev,
 526					qpd->pqm->process);
 527			qpd->reset_wavefronts = false;
 528		}
 529
 530		deallocate_vmid(dqm, qpd, q);
 531	}
 532	qpd->queue_count--;
 533	if (q->properties.is_active) {
 534		decrement_queue_count(dqm, q->properties.type);
 535		if (q->properties.is_gws) {
 536			dqm->gws_queue_count--;
 537			qpd->mapped_gws_queue = false;
 538		}
 539	}
 540
 541	return retval;
 542}
 543
 544static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
 545				struct qcm_process_device *qpd,
 546				struct queue *q)
 547{
 548	int retval;
 549	uint64_t sdma_val = 0;
 550	struct kfd_process_device *pdd = qpd_to_pdd(qpd);
 
 
 551
 552	/* Get the SDMA queue stats */
 553	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
 554	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
 555		retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr,
 556							&sdma_val);
 557		if (retval)
 558			pr_err("Failed to read SDMA queue counter for queue: %d\n",
 559				q->properties.queue_id);
 560	}
 561
 562	dqm_lock(dqm);
 563	retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
 564	if (!retval)
 565		pdd->sdma_past_activity_counter += sdma_val;
 566	dqm_unlock(dqm);
 567
 
 
 568	return retval;
 569}
 570
 571static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 
 572{
 573	int retval = 0;
 574	struct mqd_manager *mqd_mgr;
 575	struct kfd_process_device *pdd;
 576	bool prev_active = false;
 577
 578	dqm_lock(dqm);
 579	pdd = kfd_get_process_device_data(q->device, q->process);
 580	if (!pdd) {
 581		retval = -ENODEV;
 582		goto out_unlock;
 583	}
 584	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 585			q->properties.type)];
 586
 587	/* Save previous activity state for counters */
 588	prev_active = q->properties.is_active;
 589
 590	/* Make sure the queue is unmapped before updating the MQD */
 591	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
 592		retval = unmap_queues_cpsch(dqm,
 593				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 
 
 
 
 594		if (retval) {
 595			pr_err("unmap queue failed\n");
 596			goto out_unlock;
 597		}
 598	} else if (prev_active &&
 599		   (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
 600		    q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 601		    q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
 602
 603		if (!dqm->sched_running) {
 604			WARN_ONCE(1, "Update non-HWS queue while stopped\n");
 605			goto out_unlock;
 606		}
 607
 608		retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
 609				KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
 
 
 610				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
 611		if (retval) {
 612			pr_err("destroy mqd failed\n");
 613			goto out_unlock;
 614		}
 615	}
 616
 617	mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
 618
 619	/*
 620	 * check active state vs. the previous state and modify
 621	 * counter accordingly. map_queues_cpsch uses the
 622	 * dqm->active_queue_count to determine whether a new runlist must be
 623	 * uploaded.
 624	 */
 625	if (q->properties.is_active && !prev_active)
 626		increment_queue_count(dqm, q->properties.type);
 627	else if (!q->properties.is_active && prev_active)
 628		decrement_queue_count(dqm, q->properties.type);
 629
 630	if (q->gws && !q->properties.is_gws) {
 631		if (q->properties.is_active) {
 632			dqm->gws_queue_count++;
 633			pdd->qpd.mapped_gws_queue = true;
 634		}
 635		q->properties.is_gws = true;
 636	} else if (!q->gws && q->properties.is_gws) {
 637		if (q->properties.is_active) {
 638			dqm->gws_queue_count--;
 639			pdd->qpd.mapped_gws_queue = false;
 640		}
 641		q->properties.is_gws = false;
 642	}
 643
 644	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
 645		retval = map_queues_cpsch(dqm);
 646	else if (q->properties.is_active &&
 
 
 
 647		 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
 648		  q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 649		  q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
 650		if (WARN(q->process->mm != current->mm,
 651			 "should only run in user thread"))
 652			retval = -EFAULT;
 653		else
 654			retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
 655						   q->pipe, q->queue,
 656						   &q->properties, current->mm);
 657	}
 658
 659out_unlock:
 660	dqm_unlock(dqm);
 661	return retval;
 662}
 663
 664static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
 665					struct qcm_process_device *qpd)
 666{
 667	struct queue *q;
 668	struct mqd_manager *mqd_mgr;
 669	struct kfd_process_device *pdd;
 670	int retval, ret = 0;
 671
 672	dqm_lock(dqm);
 673	if (qpd->evicted++ > 0) /* already evicted, do nothing */
 674		goto out;
 675
 676	pdd = qpd_to_pdd(qpd);
 677	pr_info_ratelimited("Evicting PASID 0x%x queues\n",
 678			    pdd->process->pasid);
 679
 
 680	/* Mark all queues as evicted. Deactivate all active queues on
 681	 * the qpd.
 682	 */
 683	list_for_each_entry(q, &qpd->queues_list, list) {
 684		q->properties.is_evicted = true;
 685		if (!q->properties.is_active)
 686			continue;
 687
 688		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 689				q->properties.type)];
 690		q->properties.is_active = false;
 691		decrement_queue_count(dqm, q->properties.type);
 692		if (q->properties.is_gws) {
 693			dqm->gws_queue_count--;
 694			qpd->mapped_gws_queue = false;
 695		}
 696
 697		if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
 698			continue;
 699
 700		retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
 701				KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
 
 
 702				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
 703		if (retval && !ret)
 704			/* Return the first error, but keep going to
 705			 * maintain a consistent eviction state
 706			 */
 707			ret = retval;
 708	}
 709
 710out:
 711	dqm_unlock(dqm);
 712	return ret;
 713}
 714
 715static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
 716				      struct qcm_process_device *qpd)
 717{
 718	struct queue *q;
 719	struct kfd_process_device *pdd;
 720	int retval = 0;
 721
 722	dqm_lock(dqm);
 723	if (qpd->evicted++ > 0) /* already evicted, do nothing */
 724		goto out;
 725
 726	pdd = qpd_to_pdd(qpd);
 727	pr_info_ratelimited("Evicting PASID 0x%x queues\n",
 728			    pdd->process->pasid);
 729
 730	/* Mark all queues as evicted. Deactivate all active queues on
 731	 * the qpd.
 732	 */
 733	list_for_each_entry(q, &qpd->queues_list, list) {
 734		q->properties.is_evicted = true;
 735		if (!q->properties.is_active)
 736			continue;
 737
 738		q->properties.is_active = false;
 739		decrement_queue_count(dqm, q->properties.type);
 
 
 
 
 
 
 
 
 
 740	}
 741	retval = execute_queues_cpsch(dqm,
 742				qpd->is_debug ?
 743				KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
 744				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 
 
 745
 746out:
 747	dqm_unlock(dqm);
 748	return retval;
 749}
 750
 751static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 752					  struct qcm_process_device *qpd)
 753{
 754	struct mm_struct *mm = NULL;
 755	struct queue *q;
 756	struct mqd_manager *mqd_mgr;
 757	struct kfd_process_device *pdd;
 758	uint64_t pd_base;
 
 759	int retval, ret = 0;
 760
 761	pdd = qpd_to_pdd(qpd);
 762	/* Retrieve PD base */
 763	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 764
 765	dqm_lock(dqm);
 766	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
 767		goto out;
 768	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
 769		qpd->evicted--;
 770		goto out;
 771	}
 772
 773	pr_info_ratelimited("Restoring PASID 0x%x queues\n",
 774			    pdd->process->pasid);
 775
 776	/* Update PD Base in QPD */
 777	qpd->page_table_base = pd_base;
 778	pr_debug("Updated PD address to 0x%llx\n", pd_base);
 779
 780	if (!list_empty(&qpd->queues_list)) {
 781		dqm->dev->kfd2kgd->set_vm_context_page_table_base(
 782				dqm->dev->kgd,
 783				qpd->vmid,
 784				qpd->page_table_base);
 785		kfd_flush_tlb(pdd);
 786	}
 787
 788	/* Take a safe reference to the mm_struct, which may otherwise
 789	 * disappear even while the kfd_process is still referenced.
 790	 */
 791	mm = get_task_mm(pdd->process->lead_thread);
 792	if (!mm) {
 793		ret = -EFAULT;
 794		goto out;
 795	}
 796
 797	/* Remove the eviction flags. Activate queues that are not
 798	 * inactive for other reasons.
 799	 */
 800	list_for_each_entry(q, &qpd->queues_list, list) {
 801		q->properties.is_evicted = false;
 802		if (!QUEUE_IS_ACTIVE(q->properties))
 803			continue;
 804
 805		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 806				q->properties.type)];
 807		q->properties.is_active = true;
 808		increment_queue_count(dqm, q->properties.type);
 809		if (q->properties.is_gws) {
 810			dqm->gws_queue_count++;
 811			qpd->mapped_gws_queue = true;
 812		}
 813
 814		if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
 815			continue;
 816
 817		retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
 818				       q->queue, &q->properties, mm);
 819		if (retval && !ret)
 820			/* Return the first error, but keep going to
 821			 * maintain a consistent eviction state
 822			 */
 823			ret = retval;
 824	}
 825	qpd->evicted = 0;
 
 
 826out:
 827	if (mm)
 828		mmput(mm);
 829	dqm_unlock(dqm);
 830	return ret;
 831}
 832
 833static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
 834					struct qcm_process_device *qpd)
 835{
 836	struct queue *q;
 837	struct kfd_process_device *pdd;
 838	uint64_t pd_base;
 
 839	int retval = 0;
 840
 841	pdd = qpd_to_pdd(qpd);
 842	/* Retrieve PD base */
 843	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 844
 845	dqm_lock(dqm);
 846	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
 847		goto out;
 848	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
 849		qpd->evicted--;
 850		goto out;
 851	}
 852
 853	pr_info_ratelimited("Restoring PASID 0x%x queues\n",
 854			    pdd->process->pasid);
 855
 856	/* Update PD Base in QPD */
 857	qpd->page_table_base = pd_base;
 858	pr_debug("Updated PD address to 0x%llx\n", pd_base);
 859
 860	/* activate all active queues on the qpd */
 861	list_for_each_entry(q, &qpd->queues_list, list) {
 862		q->properties.is_evicted = false;
 863		if (!QUEUE_IS_ACTIVE(q->properties))
 864			continue;
 865
 866		q->properties.is_active = true;
 867		increment_queue_count(dqm, q->properties.type);
 
 
 
 
 
 
 
 
 
 868	}
 869	retval = execute_queues_cpsch(dqm,
 870				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 
 871	qpd->evicted = 0;
 
 
 872out:
 873	dqm_unlock(dqm);
 874	return retval;
 875}
 876
 877static int register_process(struct device_queue_manager *dqm,
 878					struct qcm_process_device *qpd)
 879{
 880	struct device_process_node *n;
 881	struct kfd_process_device *pdd;
 882	uint64_t pd_base;
 883	int retval;
 884
 885	n = kzalloc(sizeof(*n), GFP_KERNEL);
 886	if (!n)
 887		return -ENOMEM;
 888
 889	n->qpd = qpd;
 890
 891	pdd = qpd_to_pdd(qpd);
 892	/* Retrieve PD base */
 893	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
 894
 895	dqm_lock(dqm);
 896	list_add(&n->list, &dqm->queues);
 897
 898	/* Update PD Base in QPD */
 899	qpd->page_table_base = pd_base;
 900	pr_debug("Updated PD address to 0x%llx\n", pd_base);
 901
 902	retval = dqm->asic_ops.update_qpd(dqm, qpd);
 903
 904	dqm->processes_count++;
 905
 906	dqm_unlock(dqm);
 907
 908	/* Outside the DQM lock because under the DQM lock we can't do
 909	 * reclaim or take other locks that others hold while reclaiming.
 910	 */
 911	kfd_inc_compute_active(dqm->dev);
 912
 913	return retval;
 914}
 915
 916static int unregister_process(struct device_queue_manager *dqm,
 917					struct qcm_process_device *qpd)
 918{
 919	int retval;
 920	struct device_process_node *cur, *next;
 921
 922	pr_debug("qpd->queues_list is %s\n",
 923			list_empty(&qpd->queues_list) ? "empty" : "not empty");
 924
 925	retval = 0;
 926	dqm_lock(dqm);
 927
 928	list_for_each_entry_safe(cur, next, &dqm->queues, list) {
 929		if (qpd == cur->qpd) {
 930			list_del(&cur->list);
 931			kfree(cur);
 932			dqm->processes_count--;
 933			goto out;
 934		}
 935	}
 936	/* qpd not found in dqm list */
 937	retval = 1;
 938out:
 939	dqm_unlock(dqm);
 940
 941	/* Outside the DQM lock because under the DQM lock we can't do
 942	 * reclaim or take other locks that others hold while reclaiming.
 943	 */
 944	if (!retval)
 945		kfd_dec_compute_active(dqm->dev);
 946
 947	return retval;
 948}
 949
 950static int
 951set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
 952			unsigned int vmid)
 953{
 954	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
 955						dqm->dev->kgd, pasid, vmid);
 956}
 957
 958static void init_interrupts(struct device_queue_manager *dqm)
 959{
 960	unsigned int i;
 961
 962	for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
 963		if (is_pipe_enabled(dqm, 0, i))
 964			dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965}
 966
 967static int initialize_nocpsch(struct device_queue_manager *dqm)
 968{
 969	int pipe, queue;
 970
 971	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
 972
 973	dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
 974					sizeof(unsigned int), GFP_KERNEL);
 975	if (!dqm->allocated_queues)
 976		return -ENOMEM;
 977
 978	mutex_init(&dqm->lock_hidden);
 979	INIT_LIST_HEAD(&dqm->queues);
 980	dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
 981	dqm->active_cp_queue_count = 0;
 982	dqm->gws_queue_count = 0;
 983
 984	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
 985		int pipe_offset = pipe * get_queues_per_pipe(dqm);
 986
 987		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
 988			if (test_bit(pipe_offset + queue,
 989				     dqm->dev->shared_resources.cp_queue_bitmap))
 990				dqm->allocated_queues[pipe] |= 1 << queue;
 991	}
 992
 993	memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
 994
 995	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
 996	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
 997
 998	return 0;
 999}
1000
1001static void uninitialize(struct device_queue_manager *dqm)
1002{
1003	int i;
1004
1005	WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
1006
1007	kfree(dqm->allocated_queues);
1008	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
1009		kfree(dqm->mqd_mgrs[i]);
1010	mutex_destroy(&dqm->lock_hidden);
1011}
1012
1013static int start_nocpsch(struct device_queue_manager *dqm)
1014{
 
 
1015	pr_info("SW scheduler is used");
1016	init_interrupts(dqm);
1017	
1018	if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
1019		return pm_init(&dqm->packets, dqm);
1020	dqm->sched_running = true;
1021
1022	return 0;
 
 
 
 
 
1023}
1024
1025static int stop_nocpsch(struct device_queue_manager *dqm)
1026{
1027	if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
1028		pm_uninit(&dqm->packets, false);
1029	dqm->sched_running = false;
1030
1031	return 0;
1032}
1033
1034static void pre_reset(struct device_queue_manager *dqm)
1035{
1036	dqm_lock(dqm);
1037	dqm->is_resetting = true;
1038	dqm_unlock(dqm);
1039}
1040
1041static int allocate_sdma_queue(struct device_queue_manager *dqm,
1042				struct queue *q)
1043{
1044	int bit;
1045
1046	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1047		if (dqm->sdma_bitmap == 0) {
1048			pr_err("No more SDMA queue to allocate\n");
1049			return -ENOMEM;
1050		}
1051
1052		bit = __ffs64(dqm->sdma_bitmap);
1053		dqm->sdma_bitmap &= ~(1ULL << bit);
1054		q->sdma_id = bit;
 
 
 
 
 
 
 
 
 
 
 
 
1055		q->properties.sdma_engine_id = q->sdma_id %
1056				get_num_sdma_engines(dqm);
1057		q->properties.sdma_queue_id = q->sdma_id /
1058				get_num_sdma_engines(dqm);
1059	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1060		if (dqm->xgmi_sdma_bitmap == 0) {
1061			pr_err("No more XGMI SDMA queue to allocate\n");
1062			return -ENOMEM;
1063		}
1064		bit = __ffs64(dqm->xgmi_sdma_bitmap);
1065		dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1066		q->sdma_id = bit;
 
 
 
 
 
 
 
 
 
 
1067		/* sdma_engine_id is sdma id including
1068		 * both PCIe-optimized SDMAs and XGMI-
1069		 * optimized SDMAs. The calculation below
1070		 * assumes the first N engines are always
1071		 * PCIe-optimized ones
1072		 */
1073		q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
1074				q->sdma_id % get_num_xgmi_sdma_engines(dqm);
 
1075		q->properties.sdma_queue_id = q->sdma_id /
1076				get_num_xgmi_sdma_engines(dqm);
1077	}
1078
1079	pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1080	pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1081
1082	return 0;
1083}
1084
1085static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1086				struct queue *q)
1087{
1088	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1089		if (q->sdma_id >= get_num_sdma_queues(dqm))
1090			return;
1091		dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1092	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1093		if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1094			return;
1095		dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1096	}
1097}
1098
1099/*
1100 * Device Queue Manager implementation for cp scheduler
1101 */
1102
1103static int set_sched_resources(struct device_queue_manager *dqm)
1104{
1105	int i, mec;
1106	struct scheduling_resources res;
1107
1108	res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1109
1110	res.queue_mask = 0;
1111	for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1112		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1113			/ dqm->dev->shared_resources.num_pipe_per_mec;
1114
1115		if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
1116			continue;
1117
1118		/* only acquire queues from the first MEC */
1119		if (mec > 0)
1120			continue;
1121
1122		/* This situation may be hit in the future if a new HW
1123		 * generation exposes more than 64 queues. If so, the
1124		 * definition of res.queue_mask needs updating
1125		 */
1126		if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1127			pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1128			break;
1129		}
1130
1131		res.queue_mask |= 1ull
1132			<< amdgpu_queue_mask_bit_to_set_resource_bit(
1133				(struct amdgpu_device *)dqm->dev->kgd, i);
1134	}
1135	res.gws_mask = ~0ull;
1136	res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1137
1138	pr_debug("Scheduling resources:\n"
1139			"vmid mask: 0x%8X\n"
1140			"queue mask: 0x%8llX\n",
1141			res.vmid_mask, res.queue_mask);
1142
1143	return pm_send_set_resources(&dqm->packets, &res);
1144}
1145
1146static int initialize_cpsch(struct device_queue_manager *dqm)
1147{
1148	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1149
1150	mutex_init(&dqm->lock_hidden);
1151	INIT_LIST_HEAD(&dqm->queues);
1152	dqm->active_queue_count = dqm->processes_count = 0;
1153	dqm->active_cp_queue_count = 0;
1154	dqm->gws_queue_count = 0;
1155	dqm->active_runlist = false;
1156	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1157	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1158
1159	INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1160
 
 
1161	return 0;
1162}
1163
1164static int start_cpsch(struct device_queue_manager *dqm)
1165{
1166	int retval;
1167
1168	retval = 0;
1169
1170	retval = pm_init(&dqm->packets, dqm);
1171	if (retval)
1172		goto fail_packet_manager_init;
1173
1174	retval = set_sched_resources(dqm);
1175	if (retval)
1176		goto fail_set_sched_resources;
 
1177
 
 
 
 
1178	pr_debug("Allocating fence memory\n");
1179
1180	/* allocate fence memory on the gart */
1181	retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1182					&dqm->fence_mem);
1183
1184	if (retval)
1185		goto fail_allocate_vidmem;
1186
1187	dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1188	dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1189
1190	init_interrupts(dqm);
1191
1192	dqm_lock(dqm);
1193	/* clear hang status when driver try to start the hw scheduler */
1194	dqm->is_hws_hang = false;
1195	dqm->is_resetting = false;
1196	dqm->sched_running = true;
1197	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 
1198	dqm_unlock(dqm);
1199
1200	return 0;
1201fail_allocate_vidmem:
1202fail_set_sched_resources:
1203	pm_uninit(&dqm->packets, false);
 
1204fail_packet_manager_init:
 
1205	return retval;
1206}
1207
1208static int stop_cpsch(struct device_queue_manager *dqm)
1209{
1210	bool hanging;
1211
1212	dqm_lock(dqm);
1213	if (!dqm->is_hws_hang)
1214		unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
 
 
 
 
 
 
 
 
 
 
1215	hanging = dqm->is_hws_hang || dqm->is_resetting;
1216	dqm->sched_running = false;
1217	dqm_unlock(dqm);
1218
1219	pm_release_ib(&dqm->packets);
 
1220
1221	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1222	pm_uninit(&dqm->packets, hanging);
 
 
1223
1224	return 0;
1225}
1226
1227static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1228					struct kernel_queue *kq,
1229					struct qcm_process_device *qpd)
1230{
1231	dqm_lock(dqm);
1232	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1233		pr_warn("Can't create new kernel queue because %d queues were already created\n",
1234				dqm->total_queue_count);
1235		dqm_unlock(dqm);
1236		return -EPERM;
1237	}
1238
1239	/*
1240	 * Unconditionally increment this counter, regardless of the queue's
1241	 * type or whether the queue is active.
1242	 */
1243	dqm->total_queue_count++;
1244	pr_debug("Total of %d queues are accountable so far\n",
1245			dqm->total_queue_count);
1246
1247	list_add(&kq->list, &qpd->priv_queue_list);
1248	increment_queue_count(dqm, kq->queue->properties.type);
1249	qpd->is_debug = true;
1250	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1251	dqm_unlock(dqm);
1252
1253	return 0;
1254}
1255
1256static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1257					struct kernel_queue *kq,
1258					struct qcm_process_device *qpd)
1259{
1260	dqm_lock(dqm);
1261	list_del(&kq->list);
1262	decrement_queue_count(dqm, kq->queue->properties.type);
1263	qpd->is_debug = false;
1264	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1265	/*
1266	 * Unconditionally decrement this counter, regardless of the queue's
1267	 * type.
1268	 */
1269	dqm->total_queue_count--;
1270	pr_debug("Total of %d queues are accountable so far\n",
1271			dqm->total_queue_count);
1272	dqm_unlock(dqm);
1273}
1274
1275static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1276			struct qcm_process_device *qpd)
 
 
1277{
1278	int retval;
1279	struct mqd_manager *mqd_mgr;
1280
1281	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1282		pr_warn("Can't create new usermode queue because %d queues were already created\n",
1283				dqm->total_queue_count);
1284		retval = -EPERM;
1285		goto out;
1286	}
1287
1288	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1289		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1290		dqm_lock(dqm);
1291		retval = allocate_sdma_queue(dqm, q);
1292		dqm_unlock(dqm);
1293		if (retval)
1294			goto out;
1295	}
1296
1297	retval = allocate_doorbell(qpd, q);
1298	if (retval)
1299		goto out_deallocate_sdma_queue;
1300
1301	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1302			q->properties.type)];
1303
1304	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1305		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1306		dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1307	q->properties.tba_addr = qpd->tba_addr;
1308	q->properties.tma_addr = qpd->tma_addr;
1309	q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1310	if (!q->mqd_mem_obj) {
1311		retval = -ENOMEM;
1312		goto out_deallocate_doorbell;
1313	}
1314
1315	dqm_lock(dqm);
1316	/*
1317	 * Eviction state logic: mark all queues as evicted, even ones
1318	 * not currently active. Restoring inactive queues later only
1319	 * updates the is_evicted flag but is a no-op otherwise.
1320	 */
1321	q->properties.is_evicted = !!qpd->evicted;
1322	mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1323				&q->gart_mqd_addr, &q->properties);
 
 
 
 
 
 
1324
1325	list_add(&q->list, &qpd->queues_list);
1326	qpd->queue_count++;
1327
1328	if (q->properties.is_active) {
1329		increment_queue_count(dqm, q->properties.type);
1330
1331		execute_queues_cpsch(dqm,
1332				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 
 
 
 
 
1333	}
1334
1335	/*
1336	 * Unconditionally increment this counter, regardless of the queue's
1337	 * type or whether the queue is active.
1338	 */
1339	dqm->total_queue_count++;
1340
1341	pr_debug("Total of %d queues are accountable so far\n",
1342			dqm->total_queue_count);
1343
1344	dqm_unlock(dqm);
1345	return retval;
1346
 
 
 
 
 
 
 
1347out_deallocate_doorbell:
1348	deallocate_doorbell(qpd, q);
1349out_deallocate_sdma_queue:
1350	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1351		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1352		dqm_lock(dqm);
1353		deallocate_sdma_queue(dqm, q);
1354		dqm_unlock(dqm);
1355	}
1356out:
1357	return retval;
1358}
1359
1360int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1361				unsigned int fence_value,
1362				unsigned int timeout_ms)
1363{
1364	unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1365
1366	while (*fence_addr != fence_value) {
1367		if (time_after(jiffies, end_jiffies)) {
1368			pr_err("qcm fence wait loop timeout expired\n");
1369			/* In HWS case, this is used to halt the driver thread
1370			 * in order not to mess up CP states before doing
1371			 * scandumps for FW debugging.
1372			 */
1373			while (halt_if_hws_hang)
1374				schedule();
1375
1376			return -ETIME;
1377		}
1378		schedule();
1379	}
1380
1381	return 0;
1382}
1383
1384/* dqm->lock mutex has to be locked before calling this function */
1385static int map_queues_cpsch(struct device_queue_manager *dqm)
1386{
1387	int retval;
1388
1389	if (!dqm->sched_running)
1390		return 0;
1391	if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1392		return 0;
1393	if (dqm->active_runlist)
1394		return 0;
1395
1396	retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1397	pr_debug("%s sent runlist\n", __func__);
1398	if (retval) {
1399		pr_err("failed to execute runlist\n");
1400		return retval;
1401	}
1402	dqm->active_runlist = true;
1403
1404	return retval;
1405}
1406
1407/* dqm->lock mutex has to be locked before calling this function */
1408static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1409				enum kfd_unmap_queues_filter filter,
1410				uint32_t filter_param)
1411{
1412	int retval = 0;
 
1413
1414	if (!dqm->sched_running)
1415		return 0;
1416	if (dqm->is_hws_hang)
1417		return -EIO;
1418	if (!dqm->active_runlist)
1419		return retval;
1420
1421	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1422			filter, filter_param, false, 0);
1423	if (retval)
1424		return retval;
1425
1426	*dqm->fence_addr = KFD_FENCE_INIT;
1427	pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1428				KFD_FENCE_COMPLETED);
1429	/* should be timed out */
1430	retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1431				queue_preemption_timeout_ms);
1432	if (retval) {
1433		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1434		dqm->is_hws_hang = true;
1435		/* It's possible we're detecting a HWS hang in the
1436		 * middle of a GPU reset. No need to schedule another
1437		 * reset in this case.
1438		 */
1439		if (!dqm->is_resetting)
1440			schedule_work(&dqm->hw_exception_work);
1441		return retval;
1442	}
1443
1444	pm_release_ib(&dqm->packets);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1445	dqm->active_runlist = false;
1446
1447	return retval;
1448}
1449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450/* dqm->lock mutex has to be locked before calling this function */
1451static int execute_queues_cpsch(struct device_queue_manager *dqm,
1452				enum kfd_unmap_queues_filter filter,
1453				uint32_t filter_param)
1454{
1455	int retval;
1456
1457	if (dqm->is_hws_hang)
1458		return -EIO;
1459	retval = unmap_queues_cpsch(dqm, filter, filter_param);
1460	if (retval)
1461		return retval;
1462
1463	return map_queues_cpsch(dqm);
1464}
1465
1466static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1467				struct qcm_process_device *qpd,
1468				struct queue *q)
1469{
1470	int retval;
1471	struct mqd_manager *mqd_mgr;
1472	uint64_t sdma_val = 0;
1473	struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1474
1475	/* Get the SDMA queue stats */
1476	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1477	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1478		retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr,
1479							&sdma_val);
1480		if (retval)
1481			pr_err("Failed to read SDMA queue counter for queue: %d\n",
1482				q->properties.queue_id);
1483	}
1484
1485	retval = 0;
1486
1487	/* remove queue from list to prevent rescheduling after preemption */
1488	dqm_lock(dqm);
1489
1490	if (qpd->is_debug) {
1491		/*
1492		 * error, currently we do not allow to destroy a queue
1493		 * of a currently debugged process
1494		 */
1495		retval = -EBUSY;
1496		goto failed_try_destroy_debugged_queue;
1497
1498	}
1499
1500	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1501			q->properties.type)];
1502
1503	deallocate_doorbell(qpd, q);
1504
1505	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1506	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1507		deallocate_sdma_queue(dqm, q);
1508		pdd->sdma_past_activity_counter += sdma_val;
1509	}
1510
1511	list_del(&q->list);
1512	qpd->queue_count--;
1513	if (q->properties.is_active) {
1514		decrement_queue_count(dqm, q->properties.type);
1515		retval = execute_queues_cpsch(dqm,
1516				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1517		if (retval == -ETIME)
1518			qpd->reset_wavefronts = true;
1519		if (q->properties.is_gws) {
1520			dqm->gws_queue_count--;
1521			qpd->mapped_gws_queue = false;
1522		}
1523	}
1524
1525	/*
1526	 * Unconditionally decrement this counter, regardless of the queue's
1527	 * type
1528	 */
1529	dqm->total_queue_count--;
1530	pr_debug("Total of %d queues are accountable so far\n",
1531			dqm->total_queue_count);
1532
1533	dqm_unlock(dqm);
1534
1535	/* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1536	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1537
1538	return retval;
1539
1540failed_try_destroy_debugged_queue:
1541
1542	dqm_unlock(dqm);
1543	return retval;
1544}
1545
1546/*
1547 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1548 * stay in user mode.
1549 */
1550#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1551/* APE1 limit is inclusive and 64K aligned. */
1552#define APE1_LIMIT_ALIGNMENT 0xFFFF
1553
1554static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1555				   struct qcm_process_device *qpd,
1556				   enum cache_policy default_policy,
1557				   enum cache_policy alternate_policy,
1558				   void __user *alternate_aperture_base,
1559				   uint64_t alternate_aperture_size)
1560{
1561	bool retval = true;
1562
1563	if (!dqm->asic_ops.set_cache_memory_policy)
1564		return retval;
1565
1566	dqm_lock(dqm);
1567
1568	if (alternate_aperture_size == 0) {
1569		/* base > limit disables APE1 */
1570		qpd->sh_mem_ape1_base = 1;
1571		qpd->sh_mem_ape1_limit = 0;
1572	} else {
1573		/*
1574		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1575		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
1576		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1577		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1578		 * Verify that the base and size parameters can be
1579		 * represented in this format and convert them.
1580		 * Additionally restrict APE1 to user-mode addresses.
1581		 */
1582
1583		uint64_t base = (uintptr_t)alternate_aperture_base;
1584		uint64_t limit = base + alternate_aperture_size - 1;
1585
1586		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1587		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1588			retval = false;
1589			goto out;
1590		}
1591
1592		qpd->sh_mem_ape1_base = base >> 16;
1593		qpd->sh_mem_ape1_limit = limit >> 16;
1594	}
1595
1596	retval = dqm->asic_ops.set_cache_memory_policy(
1597			dqm,
1598			qpd,
1599			default_policy,
1600			alternate_policy,
1601			alternate_aperture_base,
1602			alternate_aperture_size);
1603
1604	if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1605		program_sh_mem_settings(dqm, qpd);
1606
1607	pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1608		qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1609		qpd->sh_mem_ape1_limit);
1610
1611out:
1612	dqm_unlock(dqm);
1613	return retval;
1614}
1615
1616static int set_trap_handler(struct device_queue_manager *dqm,
1617				struct qcm_process_device *qpd,
1618				uint64_t tba_addr,
1619				uint64_t tma_addr)
1620{
1621	uint64_t *tma;
1622
1623	if (dqm->dev->cwsr_enabled) {
1624		/* Jump from CWSR trap handler to user trap */
1625		tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1626		tma[0] = tba_addr;
1627		tma[1] = tma_addr;
1628	} else {
1629		qpd->tba_addr = tba_addr;
1630		qpd->tma_addr = tma_addr;
1631	}
1632
1633	return 0;
1634}
1635
1636static int process_termination_nocpsch(struct device_queue_manager *dqm,
1637		struct qcm_process_device *qpd)
1638{
1639	struct queue *q, *next;
1640	struct device_process_node *cur, *next_dpn;
1641	int retval = 0;
1642	bool found = false;
1643
1644	dqm_lock(dqm);
1645
1646	/* Clear all user mode queues */
1647	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
 
1648		int ret;
1649
 
 
 
1650		ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1651		if (ret)
1652			retval = ret;
 
 
 
1653	}
1654
1655	/* Unregister process */
1656	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1657		if (qpd == cur->qpd) {
1658			list_del(&cur->list);
1659			kfree(cur);
1660			dqm->processes_count--;
1661			found = true;
1662			break;
1663		}
1664	}
1665
1666	dqm_unlock(dqm);
1667
1668	/* Outside the DQM lock because under the DQM lock we can't do
1669	 * reclaim or take other locks that others hold while reclaiming.
1670	 */
1671	if (found)
1672		kfd_dec_compute_active(dqm->dev);
1673
1674	return retval;
1675}
1676
1677static int get_wave_state(struct device_queue_manager *dqm,
1678			  struct queue *q,
1679			  void __user *ctl_stack,
1680			  u32 *ctl_stack_used_size,
1681			  u32 *save_area_used_size)
1682{
1683	struct mqd_manager *mqd_mgr;
1684	int r;
1685
1686	dqm_lock(dqm);
1687
 
 
1688	if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1689	    q->properties.is_active || !q->device->cwsr_enabled) {
1690		r = -EINVAL;
1691		goto dqm_unlock;
 
1692	}
1693
1694	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1695
1696	if (!mqd_mgr->get_wave_state) {
 
 
1697		r = -EINVAL;
1698		goto dqm_unlock;
1699	}
1700
1701	r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1702			ctl_stack_used_size, save_area_used_size);
 
 
 
 
 
1703
1704dqm_unlock:
1705	dqm_unlock(dqm);
1706	return r;
1707}
1708
1709static int process_termination_cpsch(struct device_queue_manager *dqm,
1710		struct qcm_process_device *qpd)
1711{
1712	int retval;
1713	struct queue *q, *next;
1714	struct kernel_queue *kq, *kq_next;
1715	struct mqd_manager *mqd_mgr;
1716	struct device_process_node *cur, *next_dpn;
1717	enum kfd_unmap_queues_filter filter =
1718		KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1719	bool found = false;
1720
1721	retval = 0;
1722
1723	dqm_lock(dqm);
1724
1725	/* Clean all kernel queues */
1726	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1727		list_del(&kq->list);
1728		decrement_queue_count(dqm, kq->queue->properties.type);
1729		qpd->is_debug = false;
1730		dqm->total_queue_count--;
1731		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1732	}
1733
1734	/* Clear all user mode queues */
1735	list_for_each_entry(q, &qpd->queues_list, list) {
1736		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1737			deallocate_sdma_queue(dqm, q);
1738		else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1739			deallocate_sdma_queue(dqm, q);
1740
1741		if (q->properties.is_active) {
1742			decrement_queue_count(dqm, q->properties.type);
1743			if (q->properties.is_gws) {
1744				dqm->gws_queue_count--;
1745				qpd->mapped_gws_queue = false;
 
 
 
1746			}
1747		}
1748
1749		dqm->total_queue_count--;
1750	}
1751
1752	/* Unregister process */
1753	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1754		if (qpd == cur->qpd) {
1755			list_del(&cur->list);
1756			kfree(cur);
1757			dqm->processes_count--;
1758			found = true;
1759			break;
1760		}
1761	}
1762
1763	retval = execute_queues_cpsch(dqm, filter, 0);
 
 
1764	if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1765		pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1766		dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1767		qpd->reset_wavefronts = false;
1768	}
1769
1770	dqm_unlock(dqm);
1771
1772	/* Outside the DQM lock because under the DQM lock we can't do
1773	 * reclaim or take other locks that others hold while reclaiming.
1774	 */
1775	if (found)
1776		kfd_dec_compute_active(dqm->dev);
1777
1778	/* Lastly, free mqd resources.
1779	 * Do free_mqd() after dqm_unlock to avoid circular locking.
1780	 */
1781	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
 
1782		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1783				q->properties.type)];
1784		list_del(&q->list);
1785		qpd->queue_count--;
 
1786		mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 
1787	}
 
 
 
 
 
 
 
1788
1789	return retval;
1790}
1791
1792static int init_mqd_managers(struct device_queue_manager *dqm)
1793{
1794	int i, j;
1795	struct mqd_manager *mqd_mgr;
1796
1797	for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1798		mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1799		if (!mqd_mgr) {
1800			pr_err("mqd manager [%d] initialization failed\n", i);
1801			goto out_free;
1802		}
1803		dqm->mqd_mgrs[i] = mqd_mgr;
1804	}
1805
1806	return 0;
1807
1808out_free:
1809	for (j = 0; j < i; j++) {
1810		kfree(dqm->mqd_mgrs[j]);
1811		dqm->mqd_mgrs[j] = NULL;
1812	}
1813
1814	return -ENOMEM;
1815}
1816
1817/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1818static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1819{
1820	int retval;
1821	struct kfd_dev *dev = dqm->dev;
1822	struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1823	uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1824		get_num_all_sdma_engines(dqm) *
1825		dev->device_info->num_sdma_queues_per_engine +
1826		dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1827
1828	retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1829		&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1830		(void *)&(mem_obj->cpu_ptr), false);
1831
1832	return retval;
1833}
1834
1835struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1836{
1837	struct device_queue_manager *dqm;
1838
1839	pr_debug("Loading device queue manager\n");
1840
1841	dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1842	if (!dqm)
1843		return NULL;
1844
1845	switch (dev->device_info->asic_family) {
1846	/* HWS is not available on Hawaii. */
1847	case CHIP_HAWAII:
1848	/* HWS depends on CWSR for timely dequeue. CWSR is not
1849	 * available on Tonga.
1850	 *
1851	 * FIXME: This argument also applies to Kaveri.
1852	 */
1853	case CHIP_TONGA:
1854		dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1855		break;
1856	default:
1857		dqm->sched_policy = sched_policy;
1858		break;
1859	}
1860
1861	dqm->dev = dev;
1862	switch (dqm->sched_policy) {
1863	case KFD_SCHED_POLICY_HWS:
1864	case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1865		/* initialize dqm for cp scheduling */
1866		dqm->ops.create_queue = create_queue_cpsch;
1867		dqm->ops.initialize = initialize_cpsch;
1868		dqm->ops.start = start_cpsch;
1869		dqm->ops.stop = stop_cpsch;
1870		dqm->ops.pre_reset = pre_reset;
1871		dqm->ops.destroy_queue = destroy_queue_cpsch;
1872		dqm->ops.update_queue = update_queue;
1873		dqm->ops.register_process = register_process;
1874		dqm->ops.unregister_process = unregister_process;
1875		dqm->ops.uninitialize = uninitialize;
1876		dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1877		dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1878		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1879		dqm->ops.set_trap_handler = set_trap_handler;
1880		dqm->ops.process_termination = process_termination_cpsch;
1881		dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1882		dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1883		dqm->ops.get_wave_state = get_wave_state;
 
 
 
1884		break;
1885	case KFD_SCHED_POLICY_NO_HWS:
1886		/* initialize dqm for no cp scheduling */
1887		dqm->ops.start = start_nocpsch;
1888		dqm->ops.stop = stop_nocpsch;
1889		dqm->ops.pre_reset = pre_reset;
1890		dqm->ops.create_queue = create_queue_nocpsch;
1891		dqm->ops.destroy_queue = destroy_queue_nocpsch;
1892		dqm->ops.update_queue = update_queue;
1893		dqm->ops.register_process = register_process;
1894		dqm->ops.unregister_process = unregister_process;
1895		dqm->ops.initialize = initialize_nocpsch;
1896		dqm->ops.uninitialize = uninitialize;
1897		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1898		dqm->ops.set_trap_handler = set_trap_handler;
1899		dqm->ops.process_termination = process_termination_nocpsch;
1900		dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1901		dqm->ops.restore_process_queues =
1902			restore_process_queues_nocpsch;
1903		dqm->ops.get_wave_state = get_wave_state;
 
 
1904		break;
1905	default:
1906		pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1907		goto out_free;
1908	}
1909
1910	switch (dev->device_info->asic_family) {
1911	case CHIP_CARRIZO:
1912		device_queue_manager_init_vi(&dqm->asic_ops);
1913		break;
1914
1915	case CHIP_KAVERI:
1916		device_queue_manager_init_cik(&dqm->asic_ops);
1917		break;
1918
1919	case CHIP_HAWAII:
1920		device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1921		break;
1922
1923	case CHIP_TONGA:
1924	case CHIP_FIJI:
1925	case CHIP_POLARIS10:
1926	case CHIP_POLARIS11:
1927	case CHIP_POLARIS12:
1928	case CHIP_VEGAM:
1929		device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1930		break;
1931
1932	case CHIP_VEGA10:
1933	case CHIP_VEGA12:
1934	case CHIP_VEGA20:
1935	case CHIP_RAVEN:
1936	case CHIP_RENOIR:
1937	case CHIP_ARCTURUS:
1938		device_queue_manager_init_v9(&dqm->asic_ops);
1939		break;
1940	case CHIP_NAVI10:
1941	case CHIP_NAVI12:
1942	case CHIP_NAVI14:
1943	case CHIP_SIENNA_CICHLID:
1944	case CHIP_NAVY_FLOUNDER:
1945		device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1946		break;
1947	default:
1948		WARN(1, "Unexpected ASIC family %u",
1949		     dev->device_info->asic_family);
1950		goto out_free;
 
 
 
 
 
 
 
 
1951	}
1952
1953	if (init_mqd_managers(dqm))
1954		goto out_free;
1955
1956	if (allocate_hiq_sdma_mqd(dqm)) {
1957		pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1958		goto out_free;
1959	}
1960
1961	if (!dqm->ops.initialize(dqm))
1962		return dqm;
1963
1964out_free:
1965	kfree(dqm);
1966	return NULL;
1967}
1968
1969static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1970				    struct kfd_mem_obj *mqd)
1971{
1972	WARN(!mqd, "No hiq sdma mqd trunk to free");
1973
1974	amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1975}
1976
1977void device_queue_manager_uninit(struct device_queue_manager *dqm)
1978{
1979	dqm->ops.uninitialize(dqm);
1980	deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1981	kfree(dqm);
1982}
1983
1984int kfd_process_vm_fault(struct device_queue_manager *dqm,
1985			 unsigned int pasid)
1986{
1987	struct kfd_process_device *pdd;
1988	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1989	int ret = 0;
1990
1991	if (!p)
1992		return -EINVAL;
 
1993	pdd = kfd_get_process_device_data(dqm->dev, p);
1994	if (pdd)
1995		ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1996	kfd_unref_process(p);
1997
1998	return ret;
1999}
2000
2001static void kfd_process_hw_exception(struct work_struct *work)
2002{
2003	struct device_queue_manager *dqm = container_of(work,
2004			struct device_queue_manager, hw_exception_work);
2005	amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
2006}
2007
2008#if defined(CONFIG_DEBUG_FS)
2009
2010static void seq_reg_dump(struct seq_file *m,
2011			 uint32_t (*dump)[2], uint32_t n_regs)
2012{
2013	uint32_t i, count;
2014
2015	for (i = 0, count = 0; i < n_regs; i++) {
2016		if (count == 0 ||
2017		    dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
2018			seq_printf(m, "%s    %08x: %08x",
2019				   i ? "\n" : "",
2020				   dump[i][0], dump[i][1]);
2021			count = 7;
2022		} else {
2023			seq_printf(m, " %08x", dump[i][1]);
2024			count--;
2025		}
2026	}
2027
2028	seq_puts(m, "\n");
2029}
2030
2031int dqm_debugfs_hqds(struct seq_file *m, void *data)
2032{
2033	struct device_queue_manager *dqm = data;
2034	uint32_t (*dump)[2], n_regs;
2035	int pipe, queue;
2036	int r = 0;
2037
2038	if (!dqm->sched_running) {
2039		seq_printf(m, " Device is stopped\n");
2040
2041		return 0;
2042	}
2043
2044	r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
2045					KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
2046					&dump, &n_regs);
2047	if (!r) {
2048		seq_printf(m, "  HIQ on MEC %d Pipe %d Queue %d\n",
2049			   KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
2050			   KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
2051			   KFD_CIK_HIQ_QUEUE);
2052		seq_reg_dump(m, dump, n_regs);
2053
2054		kfree(dump);
2055	}
2056
2057	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2058		int pipe_offset = pipe * get_queues_per_pipe(dqm);
2059
2060		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2061			if (!test_bit(pipe_offset + queue,
2062				      dqm->dev->shared_resources.cp_queue_bitmap))
2063				continue;
2064
2065			r = dqm->dev->kfd2kgd->hqd_dump(
2066				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2067			if (r)
2068				break;
2069
2070			seq_printf(m, "  CP Pipe %d, Queue %d\n",
2071				  pipe, queue);
2072			seq_reg_dump(m, dump, n_regs);
2073
2074			kfree(dump);
2075		}
2076	}
2077
2078	for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
2079		for (queue = 0;
2080		     queue < dqm->dev->device_info->num_sdma_queues_per_engine;
2081		     queue++) {
2082			r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2083				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2084			if (r)
2085				break;
2086
2087			seq_printf(m, "  SDMA Engine %d, RLC %d\n",
2088				  pipe, queue);
2089			seq_reg_dump(m, dump, n_regs);
2090
2091			kfree(dump);
2092		}
2093	}
2094
2095	return r;
2096}
2097
2098int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2099{
2100	int r = 0;
2101
2102	dqm_lock(dqm);
 
 
 
 
 
2103	dqm->active_runlist = true;
2104	r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2105	dqm_unlock(dqm);
2106
2107	return r;
2108}
2109
2110#endif
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice shall be included in
  13 * all copies or substantial portions of the Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21 * OTHER DEALINGS IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/ratelimit.h>
  26#include <linux/printk.h>
  27#include <linux/slab.h>
  28#include <linux/list.h>
  29#include <linux/types.h>
  30#include <linux/bitops.h>
  31#include <linux/sched.h>
  32#include "kfd_priv.h"
  33#include "kfd_device_queue_manager.h"
  34#include "kfd_mqd_manager.h"
  35#include "cik_regs.h"
  36#include "kfd_kernel_queue.h"
  37#include "amdgpu_amdkfd.h"
  38#include "mes_api_def.h"
  39
  40/* Size of the per-pipe EOP queue */
  41#define CIK_HPD_EOP_BYTES_LOG2 11
  42#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
  43
  44static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
  45				  u32 pasid, unsigned int vmid);
  46
  47static int execute_queues_cpsch(struct device_queue_manager *dqm,
  48				enum kfd_unmap_queues_filter filter,
  49				uint32_t filter_param);
  50static int unmap_queues_cpsch(struct device_queue_manager *dqm,
  51				enum kfd_unmap_queues_filter filter,
  52				uint32_t filter_param, bool reset);
  53
  54static int map_queues_cpsch(struct device_queue_manager *dqm);
  55
  56static void deallocate_sdma_queue(struct device_queue_manager *dqm,
  57				struct queue *q);
  58
  59static inline void deallocate_hqd(struct device_queue_manager *dqm,
  60				struct queue *q);
  61static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
  62static int allocate_sdma_queue(struct device_queue_manager *dqm,
  63				struct queue *q, const uint32_t *restore_sdma_id);
  64static void kfd_process_hw_exception(struct work_struct *work);
  65
  66static inline
  67enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
  68{
  69	if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
  70		return KFD_MQD_TYPE_SDMA;
  71	return KFD_MQD_TYPE_CP;
  72}
  73
  74static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
  75{
  76	int i;
  77	int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
  78		+ pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
  79
  80	/* queue is available for KFD usage if bit is 1 */
  81	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
  82		if (test_bit(pipe_offset + i,
  83			      dqm->dev->shared_resources.cp_queue_bitmap))
  84			return true;
  85	return false;
  86}
  87
  88unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
  89{
  90	return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
  91				KGD_MAX_QUEUES);
  92}
  93
  94unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
  95{
  96	return dqm->dev->shared_resources.num_queue_per_pipe;
  97}
  98
  99unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
 100{
 101	return dqm->dev->shared_resources.num_pipe_per_mec;
 102}
 103
 
 
 
 
 
 
 
 
 
 
 104static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
 105{
 106	return kfd_get_num_sdma_engines(dqm->dev) +
 107		kfd_get_num_xgmi_sdma_engines(dqm->dev);
 108}
 109
 110unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
 111{
 112	return kfd_get_num_sdma_engines(dqm->dev) *
 113		dqm->dev->device_info.num_sdma_queues_per_engine;
 114}
 115
 116unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
 117{
 118	return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
 119		dqm->dev->device_info.num_sdma_queues_per_engine;
 120}
 121
 122static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm)
 123{
 124	return dqm->dev->device_info.reserved_sdma_queues_bitmap;
 125}
 126
 127void program_sh_mem_settings(struct device_queue_manager *dqm,
 128					struct qcm_process_device *qpd)
 129{
 130	return dqm->dev->kfd2kgd->program_sh_mem_settings(
 131						dqm->dev->adev, qpd->vmid,
 132						qpd->sh_mem_config,
 133						qpd->sh_mem_ape1_base,
 134						qpd->sh_mem_ape1_limit,
 135						qpd->sh_mem_bases);
 136}
 137
 138static void kfd_hws_hang(struct device_queue_manager *dqm)
 
 139{
 140	/*
 141	 * Issue a GPU reset if HWS is unresponsive
 142	 */
 143	dqm->is_hws_hang = true;
 144
 145	/* It's possible we're detecting a HWS hang in the
 146	 * middle of a GPU reset. No need to schedule another
 147	 * reset in this case.
 148	 */
 149	if (!dqm->is_resetting)
 150		schedule_work(&dqm->hw_exception_work);
 151}
 152
 153static int convert_to_mes_queue_type(int queue_type)
 
 154{
 155	int mes_queue_type;
 156
 157	switch (queue_type) {
 158	case KFD_QUEUE_TYPE_COMPUTE:
 159		mes_queue_type = MES_QUEUE_TYPE_COMPUTE;
 160		break;
 161	case KFD_QUEUE_TYPE_SDMA:
 162		mes_queue_type = MES_QUEUE_TYPE_SDMA;
 163		break;
 164	default:
 165		WARN(1, "Invalid queue type %d", queue_type);
 166		mes_queue_type = -EINVAL;
 167		break;
 168	}
 169
 170	return mes_queue_type;
 171}
 172
 173static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
 174			 struct qcm_process_device *qpd)
 175{
 176	struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
 177	struct kfd_process_device *pdd = qpd_to_pdd(qpd);
 178	struct mes_add_queue_input queue_input;
 179	int r, queue_type;
 180	uint64_t wptr_addr_off;
 181
 182	if (dqm->is_hws_hang)
 183		return -EIO;
 184
 185	memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
 186	queue_input.process_id = qpd->pqm->process->pasid;
 187	queue_input.page_table_base_addr =  qpd->page_table_base;
 188	queue_input.process_va_start = 0;
 189	queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
 190	/* MES unit for quantum is 100ns */
 191	queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM;  /* Equivalent to 10ms. */
 192	queue_input.process_context_addr = pdd->proc_ctx_gpu_addr;
 193	queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */
 194	queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
 195	queue_input.inprocess_gang_priority = q->properties.priority;
 196	queue_input.gang_global_priority_level =
 197					AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
 198	queue_input.doorbell_offset = q->properties.doorbell_off;
 199	queue_input.mqd_addr = q->gart_mqd_addr;
 200	queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 201
 202	if (q->wptr_bo) {
 203		wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
 204		queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
 205	}
 206
 207	queue_input.is_kfd_process = 1;
 208	queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
 209	queue_input.queue_size = q->properties.queue_size >> 2;
 210
 211	queue_input.paging = false;
 212	queue_input.tba_addr = qpd->tba_addr;
 213	queue_input.tma_addr = qpd->tma_addr;
 214
 215	queue_type = convert_to_mes_queue_type(q->properties.type);
 216	if (queue_type < 0) {
 217		pr_err("Queue type not supported with MES, queue:%d\n",
 218				q->properties.type);
 219		return -EINVAL;
 
 
 
 
 
 
 
 220	}
 221	queue_input.queue_type = (uint32_t)queue_type;
 222
 223	if (q->gws) {
 224		queue_input.gws_base = 0;
 225		queue_input.gws_size = qpd->num_gws;
 226	}
 227
 228	amdgpu_mes_lock(&adev->mes);
 229	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
 230	amdgpu_mes_unlock(&adev->mes);
 231	if (r) {
 232		pr_err("failed to add hardware queue to MES, doorbell=0x%x\n",
 233			q->properties.doorbell_off);
 234		pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
 235		kfd_hws_hang(dqm);
 236}
 237
 238	return r;
 239}
 240
 241static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
 242			struct qcm_process_device *qpd)
 243{
 244	struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
 245	int r;
 246	struct mes_remove_queue_input queue_input;
 247
 248	if (dqm->is_hws_hang)
 249		return -EIO;
 250
 251	memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
 252	queue_input.doorbell_offset = q->properties.doorbell_off;
 253	queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
 254
 255	amdgpu_mes_lock(&adev->mes);
 256	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
 257	amdgpu_mes_unlock(&adev->mes);
 258
 259	if (r) {
 260		pr_err("failed to remove hardware queue from MES, doorbell=0x%x\n",
 261			q->properties.doorbell_off);
 262		pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
 263		kfd_hws_hang(dqm);
 264	}
 265
 266	return r;
 267}
 268
 269static int remove_all_queues_mes(struct device_queue_manager *dqm)
 270{
 271	struct device_process_node *cur;
 272	struct qcm_process_device *qpd;
 273	struct queue *q;
 274	int retval = 0;
 275
 276	list_for_each_entry(cur, &dqm->queues, list) {
 277		qpd = cur->qpd;
 278		list_for_each_entry(q, &qpd->queues_list, list) {
 279			if (q->properties.is_active) {
 280				retval = remove_queue_mes(dqm, q, qpd);
 281				if (retval) {
 282					pr_err("%s: Failed to remove queue %d for dev %d",
 283						__func__,
 284						q->properties.queue_id,
 285						dqm->dev->id);
 286					return retval;
 287				}
 288			}
 289		}
 290	}
 291
 292	return retval;
 293}
 294
 295static void increment_queue_count(struct device_queue_manager *dqm,
 296				  struct qcm_process_device *qpd,
 297				  struct queue *q)
 298{
 299	dqm->active_queue_count++;
 300	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
 301	    q->properties.type == KFD_QUEUE_TYPE_DIQ)
 302		dqm->active_cp_queue_count++;
 303
 304	if (q->properties.is_gws) {
 305		dqm->gws_queue_count++;
 306		qpd->mapped_gws_queue = true;
 307	}
 308}
 309
 310static void decrement_queue_count(struct device_queue_manager *dqm,
 311				  struct qcm_process_device *qpd,
 312				  struct queue *q)
 313{
 314	dqm->active_queue_count--;
 315	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
 316	    q->properties.type == KFD_QUEUE_TYPE_DIQ)
 317		dqm->active_cp_queue_count--;
 318
 319	if (q->properties.is_gws) {
 320		dqm->gws_queue_count--;
 321		qpd->mapped_gws_queue = false;
 322	}
 323}
 324
 325/*
 326 * Allocate a doorbell ID to this queue.
 327 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
 328 */
 329static int allocate_doorbell(struct qcm_process_device *qpd,
 330			     struct queue *q,
 331			     uint32_t const *restore_id)
 332{
 333	struct kfd_dev *dev = qpd->dqm->dev;
 334
 335	if (!KFD_IS_SOC15(dev)) {
 336		/* On pre-SOC15 chips we need to use the queue ID to
 337		 * preserve the user mode ABI.
 338		 */
 339
 340		if (restore_id && *restore_id != q->properties.queue_id)
 341			return -EINVAL;
 342
 343		q->doorbell_id = q->properties.queue_id;
 344	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 345			q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
 346		/* For SDMA queues on SOC15 with 8-byte doorbell, use static
 347		 * doorbell assignments based on the engine and queue id.
 348		 * The doobell index distance between RLC (2*i) and (2*i+1)
 349		 * for a SDMA engine is 512.
 350		 */
 
 
 351
 352		uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx;
 353		uint32_t valid_id = idx_offset[q->properties.sdma_engine_id]
 354						+ (q->properties.sdma_queue_id & 1)
 355						* KFD_QUEUE_DOORBELL_MIRROR_OFFSET
 356						+ (q->properties.sdma_queue_id >> 1);
 357
 358		if (restore_id && *restore_id != valid_id)
 359			return -EINVAL;
 360		q->doorbell_id = valid_id;
 361	} else {
 362		/* For CP queues on SOC15 */
 363		if (restore_id) {
 364			/* make sure that ID is free  */
 365			if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
 366				return -EINVAL;
 367
 368			q->doorbell_id = *restore_id;
 369		} else {
 370			/* or reserve a free doorbell ID */
 371			unsigned int found;
 372
 373			found = find_first_zero_bit(qpd->doorbell_bitmap,
 374						KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
 375			if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
 376				pr_debug("No doorbells available");
 377				return -EBUSY;
 378			}
 379			set_bit(found, qpd->doorbell_bitmap);
 380			q->doorbell_id = found;
 381		}
 
 
 382	}
 383
 384	q->properties.doorbell_off =
 385		kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
 386					  q->doorbell_id);
 
 387	return 0;
 388}
 389
 390static void deallocate_doorbell(struct qcm_process_device *qpd,
 391				struct queue *q)
 392{
 393	unsigned int old;
 394	struct kfd_dev *dev = qpd->dqm->dev;
 395
 396	if (!KFD_IS_SOC15(dev) ||
 397	    q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 398	    q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 399		return;
 400
 401	old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
 402	WARN_ON(!old);
 403}
 404
 405static void program_trap_handler_settings(struct device_queue_manager *dqm,
 406				struct qcm_process_device *qpd)
 407{
 408	if (dqm->dev->kfd2kgd->program_trap_handler_settings)
 409		dqm->dev->kfd2kgd->program_trap_handler_settings(
 410						dqm->dev->adev, qpd->vmid,
 411						qpd->tba_addr, qpd->tma_addr);
 412}
 413
 414static int allocate_vmid(struct device_queue_manager *dqm,
 415			struct qcm_process_device *qpd,
 416			struct queue *q)
 417{
 418	int allocated_vmid = -1, i;
 419
 420	for (i = dqm->dev->vm_info.first_vmid_kfd;
 421			i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
 422		if (!dqm->vmid_pasid[i]) {
 423			allocated_vmid = i;
 424			break;
 425		}
 426	}
 427
 428	if (allocated_vmid < 0) {
 429		pr_err("no more vmid to allocate\n");
 430		return -ENOSPC;
 431	}
 432
 433	pr_debug("vmid allocated: %d\n", allocated_vmid);
 434
 435	dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
 436
 437	set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
 438
 439	qpd->vmid = allocated_vmid;
 440	q->properties.vmid = allocated_vmid;
 441
 442	program_sh_mem_settings(dqm, qpd);
 443
 444	if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled)
 445		program_trap_handler_settings(dqm, qpd);
 446
 447	/* qpd->page_table_base is set earlier when register_process()
 448	 * is called, i.e. when the first queue is created.
 449	 */
 450	dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
 451			qpd->vmid,
 452			qpd->page_table_base);
 453	/* invalidate the VM context after pasid and vmid mapping is set up */
 454	kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
 455
 456	if (dqm->dev->kfd2kgd->set_scratch_backing_va)
 457		dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
 458				qpd->sh_hidden_private_base, qpd->vmid);
 459
 460	return 0;
 461}
 462
 463static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
 464				struct qcm_process_device *qpd)
 465{
 466	const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
 467	int ret;
 468
 469	if (!qpd->ib_kaddr)
 470		return -ENOMEM;
 471
 472	ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
 473	if (ret)
 474		return ret;
 475
 476	return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
 477				qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
 478				pmf->release_mem_size / sizeof(uint32_t));
 479}
 480
 481static void deallocate_vmid(struct device_queue_manager *dqm,
 482				struct qcm_process_device *qpd,
 483				struct queue *q)
 484{
 485	/* On GFX v7, CP doesn't flush TC at dequeue */
 486	if (q->device->adev->asic_type == CHIP_HAWAII)
 487		if (flush_texture_cache_nocpsch(q->device, qpd))
 488			pr_err("Failed to flush TC\n");
 489
 490	kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
 491
 492	/* Release the vmid mapping */
 493	set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
 494	dqm->vmid_pasid[qpd->vmid] = 0;
 495
 496	qpd->vmid = 0;
 497	q->properties.vmid = 0;
 498}
 499
 500static int create_queue_nocpsch(struct device_queue_manager *dqm,
 501				struct queue *q,
 502				struct qcm_process_device *qpd,
 503				const struct kfd_criu_queue_priv_data *qd,
 504				const void *restore_mqd, const void *restore_ctl_stack)
 505{
 506	struct mqd_manager *mqd_mgr;
 507	int retval;
 508
 509	dqm_lock(dqm);
 510
 511	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
 512		pr_warn("Can't create new usermode queue because %d queues were already created\n",
 513				dqm->total_queue_count);
 514		retval = -EPERM;
 515		goto out_unlock;
 516	}
 517
 518	if (list_empty(&qpd->queues_list)) {
 519		retval = allocate_vmid(dqm, qpd, q);
 520		if (retval)
 521			goto out_unlock;
 522	}
 523	q->properties.vmid = qpd->vmid;
 524	/*
 525	 * Eviction state logic: mark all queues as evicted, even ones
 526	 * not currently active. Restoring inactive queues later only
 527	 * updates the is_evicted flag but is a no-op otherwise.
 528	 */
 529	q->properties.is_evicted = !!qpd->evicted;
 530
 531	q->properties.tba_addr = qpd->tba_addr;
 532	q->properties.tma_addr = qpd->tma_addr;
 533
 534	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 535			q->properties.type)];
 536	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
 537		retval = allocate_hqd(dqm, q);
 538		if (retval)
 539			goto deallocate_vmid;
 540		pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
 541			q->pipe, q->queue);
 542	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 543		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
 544		retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
 545		if (retval)
 546			goto deallocate_vmid;
 547		dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
 548	}
 549
 550	retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
 551	if (retval)
 552		goto out_deallocate_hqd;
 553
 554	/* Temporarily release dqm lock to avoid a circular lock dependency */
 555	dqm_unlock(dqm);
 556	q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
 557	dqm_lock(dqm);
 558
 559	if (!q->mqd_mem_obj) {
 560		retval = -ENOMEM;
 561		goto out_deallocate_doorbell;
 562	}
 563
 564	if (qd)
 565		mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
 566				     &q->properties, restore_mqd, restore_ctl_stack,
 567				     qd->ctl_stack_size);
 568	else
 569		mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
 570					&q->gart_mqd_addr, &q->properties);
 571
 572	if (q->properties.is_active) {
 573		if (!dqm->sched_running) {
 574			WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
 575			goto add_queue_to_list;
 576		}
 577
 578		if (WARN(q->process->mm != current->mm,
 579					"should only run in user thread"))
 580			retval = -EFAULT;
 581		else
 582			retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
 583					q->queue, &q->properties, current->mm);
 584		if (retval)
 585			goto out_free_mqd;
 586	}
 587
 588add_queue_to_list:
 589	list_add(&q->list, &qpd->queues_list);
 590	qpd->queue_count++;
 591	if (q->properties.is_active)
 592		increment_queue_count(dqm, qpd, q);
 593
 594	/*
 595	 * Unconditionally increment this counter, regardless of the queue's
 596	 * type or whether the queue is active.
 597	 */
 598	dqm->total_queue_count++;
 599	pr_debug("Total of %d queues are accountable so far\n",
 600			dqm->total_queue_count);
 601	goto out_unlock;
 602
 603out_free_mqd:
 604	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 605out_deallocate_doorbell:
 606	deallocate_doorbell(qpd, q);
 607out_deallocate_hqd:
 608	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
 609		deallocate_hqd(dqm, q);
 610	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 611		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 612		deallocate_sdma_queue(dqm, q);
 613deallocate_vmid:
 614	if (list_empty(&qpd->queues_list))
 615		deallocate_vmid(dqm, qpd, q);
 616out_unlock:
 617	dqm_unlock(dqm);
 618	return retval;
 619}
 620
 621static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
 622{
 623	bool set;
 624	int pipe, bit, i;
 625
 626	set = false;
 627
 628	for (pipe = dqm->next_pipe_to_allocate, i = 0;
 629			i < get_pipes_per_mec(dqm);
 630			pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
 631
 632		if (!is_pipe_enabled(dqm, 0, pipe))
 633			continue;
 634
 635		if (dqm->allocated_queues[pipe] != 0) {
 636			bit = ffs(dqm->allocated_queues[pipe]) - 1;
 637			dqm->allocated_queues[pipe] &= ~(1 << bit);
 638			q->pipe = pipe;
 639			q->queue = bit;
 640			set = true;
 641			break;
 642		}
 643	}
 644
 645	if (!set)
 646		return -EBUSY;
 647
 648	pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
 649	/* horizontal hqd allocation */
 650	dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
 651
 652	return 0;
 653}
 654
 655static inline void deallocate_hqd(struct device_queue_manager *dqm,
 656				struct queue *q)
 657{
 658	dqm->allocated_queues[q->pipe] |= (1 << q->queue);
 659}
 660
 661#define SQ_IND_CMD_CMD_KILL		0x00000003
 662#define SQ_IND_CMD_MODE_BROADCAST	0x00000001
 663
 664static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
 665{
 666	int status = 0;
 667	unsigned int vmid;
 668	uint16_t queried_pasid;
 669	union SQ_CMD_BITS reg_sq_cmd;
 670	union GRBM_GFX_INDEX_BITS reg_gfx_index;
 671	struct kfd_process_device *pdd;
 672	int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
 673	int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
 674
 675	reg_sq_cmd.u32All = 0;
 676	reg_gfx_index.u32All = 0;
 677
 678	pr_debug("Killing all process wavefronts\n");
 679
 680	if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
 681		pr_err("no vmid pasid mapping supported \n");
 682		return -EOPNOTSUPP;
 683	}
 684
 685	/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
 686	 * ATC_VMID15_PASID_MAPPING
 687	 * to check which VMID the current process is mapped to.
 688	 */
 689
 690	for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
 691		status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
 692				(dev->adev, vmid, &queried_pasid);
 693
 694		if (status && queried_pasid == p->pasid) {
 695			pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
 696					vmid, p->pasid);
 697			break;
 698		}
 699	}
 700
 701	if (vmid > last_vmid_to_scan) {
 702		pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
 703		return -EFAULT;
 704	}
 705
 706	/* taking the VMID for that process on the safe way using PDD */
 707	pdd = kfd_get_process_device_data(dev, p);
 708	if (!pdd)
 709		return -EFAULT;
 710
 711	reg_gfx_index.bits.sh_broadcast_writes = 1;
 712	reg_gfx_index.bits.se_broadcast_writes = 1;
 713	reg_gfx_index.bits.instance_broadcast_writes = 1;
 714	reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
 715	reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
 716	reg_sq_cmd.bits.vm_id = vmid;
 717
 718	dev->kfd2kgd->wave_control_execute(dev->adev,
 719					reg_gfx_index.u32All,
 720					reg_sq_cmd.u32All);
 721
 722	return 0;
 723}
 724
 725/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
 726 * to avoid asynchronized access
 727 */
 728static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 729				struct qcm_process_device *qpd,
 730				struct queue *q)
 731{
 732	int retval;
 733	struct mqd_manager *mqd_mgr;
 734
 735	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 736			q->properties.type)];
 737
 738	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
 739		deallocate_hqd(dqm, q);
 740	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 741		deallocate_sdma_queue(dqm, q);
 742	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 743		deallocate_sdma_queue(dqm, q);
 744	else {
 745		pr_debug("q->properties.type %d is invalid\n",
 746				q->properties.type);
 747		return -EINVAL;
 748	}
 749	dqm->total_queue_count--;
 750
 751	deallocate_doorbell(qpd, q);
 752
 753	if (!dqm->sched_running) {
 754		WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
 755		return 0;
 756	}
 757
 758	retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
 759				KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
 760				KFD_UNMAP_LATENCY_MS,
 761				q->pipe, q->queue);
 762	if (retval == -ETIME)
 763		qpd->reset_wavefronts = true;
 764
 
 
 
 765	list_del(&q->list);
 766	if (list_empty(&qpd->queues_list)) {
 767		if (qpd->reset_wavefronts) {
 768			pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
 769					dqm->dev);
 770			/* dbgdev_wave_reset_wavefronts has to be called before
 771			 * deallocate_vmid(), i.e. when vmid is still in use.
 772			 */
 773			dbgdev_wave_reset_wavefronts(dqm->dev,
 774					qpd->pqm->process);
 775			qpd->reset_wavefronts = false;
 776		}
 777
 778		deallocate_vmid(dqm, qpd, q);
 779	}
 780	qpd->queue_count--;
 781	if (q->properties.is_active)
 782		decrement_queue_count(dqm, qpd, q);
 
 
 
 
 
 783
 784	return retval;
 785}
 786
 787static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
 788				struct qcm_process_device *qpd,
 789				struct queue *q)
 790{
 791	int retval;
 792	uint64_t sdma_val = 0;
 793	struct kfd_process_device *pdd = qpd_to_pdd(qpd);
 794	struct mqd_manager *mqd_mgr =
 795		dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
 796
 797	/* Get the SDMA queue stats */
 798	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
 799	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
 800		retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
 801							&sdma_val);
 802		if (retval)
 803			pr_err("Failed to read SDMA queue counter for queue: %d\n",
 804				q->properties.queue_id);
 805	}
 806
 807	dqm_lock(dqm);
 808	retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
 809	if (!retval)
 810		pdd->sdma_past_activity_counter += sdma_val;
 811	dqm_unlock(dqm);
 812
 813	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 814
 815	return retval;
 816}
 817
 818static int update_queue(struct device_queue_manager *dqm, struct queue *q,
 819			struct mqd_update_info *minfo)
 820{
 821	int retval = 0;
 822	struct mqd_manager *mqd_mgr;
 823	struct kfd_process_device *pdd;
 824	bool prev_active = false;
 825
 826	dqm_lock(dqm);
 827	pdd = kfd_get_process_device_data(q->device, q->process);
 828	if (!pdd) {
 829		retval = -ENODEV;
 830		goto out_unlock;
 831	}
 832	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 833			q->properties.type)];
 834
 835	/* Save previous activity state for counters */
 836	prev_active = q->properties.is_active;
 837
 838	/* Make sure the queue is unmapped before updating the MQD */
 839	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
 840		if (!dqm->dev->shared_resources.enable_mes)
 841			retval = unmap_queues_cpsch(dqm,
 842						    KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
 843		else if (prev_active)
 844			retval = remove_queue_mes(dqm, q, &pdd->qpd);
 845
 846		if (retval) {
 847			pr_err("unmap queue failed\n");
 848			goto out_unlock;
 849		}
 850	} else if (prev_active &&
 851		   (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
 852		    q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 853		    q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
 854
 855		if (!dqm->sched_running) {
 856			WARN_ONCE(1, "Update non-HWS queue while stopped\n");
 857			goto out_unlock;
 858		}
 859
 860		retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
 861				(dqm->dev->cwsr_enabled ?
 862				 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
 863				 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
 864				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
 865		if (retval) {
 866			pr_err("destroy mqd failed\n");
 867			goto out_unlock;
 868		}
 869	}
 870
 871	mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
 872
 873	/*
 874	 * check active state vs. the previous state and modify
 875	 * counter accordingly. map_queues_cpsch uses the
 876	 * dqm->active_queue_count to determine whether a new runlist must be
 877	 * uploaded.
 878	 */
 879	if (q->properties.is_active && !prev_active) {
 880		increment_queue_count(dqm, &pdd->qpd, q);
 881	} else if (!q->properties.is_active && prev_active) {
 882		decrement_queue_count(dqm, &pdd->qpd, q);
 883	} else if (q->gws && !q->properties.is_gws) {
 
 884		if (q->properties.is_active) {
 885			dqm->gws_queue_count++;
 886			pdd->qpd.mapped_gws_queue = true;
 887		}
 888		q->properties.is_gws = true;
 889	} else if (!q->gws && q->properties.is_gws) {
 890		if (q->properties.is_active) {
 891			dqm->gws_queue_count--;
 892			pdd->qpd.mapped_gws_queue = false;
 893		}
 894		q->properties.is_gws = false;
 895	}
 896
 897	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
 898		if (!dqm->dev->shared_resources.enable_mes)
 899			retval = map_queues_cpsch(dqm);
 900		else if (q->properties.is_active)
 901			retval = add_queue_mes(dqm, q, &pdd->qpd);
 902	} else if (q->properties.is_active &&
 903		 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
 904		  q->properties.type == KFD_QUEUE_TYPE_SDMA ||
 905		  q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
 906		if (WARN(q->process->mm != current->mm,
 907			 "should only run in user thread"))
 908			retval = -EFAULT;
 909		else
 910			retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
 911						   q->pipe, q->queue,
 912						   &q->properties, current->mm);
 913	}
 914
 915out_unlock:
 916	dqm_unlock(dqm);
 917	return retval;
 918}
 919
 920static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
 921					struct qcm_process_device *qpd)
 922{
 923	struct queue *q;
 924	struct mqd_manager *mqd_mgr;
 925	struct kfd_process_device *pdd;
 926	int retval, ret = 0;
 927
 928	dqm_lock(dqm);
 929	if (qpd->evicted++ > 0) /* already evicted, do nothing */
 930		goto out;
 931
 932	pdd = qpd_to_pdd(qpd);
 933	pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
 934			    pdd->process->pasid);
 935
 936	pdd->last_evict_timestamp = get_jiffies_64();
 937	/* Mark all queues as evicted. Deactivate all active queues on
 938	 * the qpd.
 939	 */
 940	list_for_each_entry(q, &qpd->queues_list, list) {
 941		q->properties.is_evicted = true;
 942		if (!q->properties.is_active)
 943			continue;
 944
 945		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 946				q->properties.type)];
 947		q->properties.is_active = false;
 948		decrement_queue_count(dqm, qpd, q);
 
 
 
 
 949
 950		if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
 951			continue;
 952
 953		retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
 954				(dqm->dev->cwsr_enabled ?
 955				 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
 956				 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
 957				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
 958		if (retval && !ret)
 959			/* Return the first error, but keep going to
 960			 * maintain a consistent eviction state
 961			 */
 962			ret = retval;
 963	}
 964
 965out:
 966	dqm_unlock(dqm);
 967	return ret;
 968}
 969
 970static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
 971				      struct qcm_process_device *qpd)
 972{
 973	struct queue *q;
 974	struct kfd_process_device *pdd;
 975	int retval = 0;
 976
 977	dqm_lock(dqm);
 978	if (qpd->evicted++ > 0) /* already evicted, do nothing */
 979		goto out;
 980
 981	pdd = qpd_to_pdd(qpd);
 982	pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
 983			    pdd->process->pasid);
 984
 985	/* Mark all queues as evicted. Deactivate all active queues on
 986	 * the qpd.
 987	 */
 988	list_for_each_entry(q, &qpd->queues_list, list) {
 989		q->properties.is_evicted = true;
 990		if (!q->properties.is_active)
 991			continue;
 992
 993		q->properties.is_active = false;
 994		decrement_queue_count(dqm, qpd, q);
 995
 996		if (dqm->dev->shared_resources.enable_mes) {
 997			retval = remove_queue_mes(dqm, q, qpd);
 998			if (retval) {
 999				pr_err("Failed to evict queue %d\n",
1000					q->properties.queue_id);
1001				goto out;
1002			}
1003		}
1004	}
1005	pdd->last_evict_timestamp = get_jiffies_64();
1006	if (!dqm->dev->shared_resources.enable_mes)
1007		retval = execute_queues_cpsch(dqm,
1008					      qpd->is_debug ?
1009					      KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
1010					      KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1011
1012out:
1013	dqm_unlock(dqm);
1014	return retval;
1015}
1016
1017static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
1018					  struct qcm_process_device *qpd)
1019{
1020	struct mm_struct *mm = NULL;
1021	struct queue *q;
1022	struct mqd_manager *mqd_mgr;
1023	struct kfd_process_device *pdd;
1024	uint64_t pd_base;
1025	uint64_t eviction_duration;
1026	int retval, ret = 0;
1027
1028	pdd = qpd_to_pdd(qpd);
1029	/* Retrieve PD base */
1030	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1031
1032	dqm_lock(dqm);
1033	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1034		goto out;
1035	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1036		qpd->evicted--;
1037		goto out;
1038	}
1039
1040	pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1041			    pdd->process->pasid);
1042
1043	/* Update PD Base in QPD */
1044	qpd->page_table_base = pd_base;
1045	pr_debug("Updated PD address to 0x%llx\n", pd_base);
1046
1047	if (!list_empty(&qpd->queues_list)) {
1048		dqm->dev->kfd2kgd->set_vm_context_page_table_base(
1049				dqm->dev->adev,
1050				qpd->vmid,
1051				qpd->page_table_base);
1052		kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1053	}
1054
1055	/* Take a safe reference to the mm_struct, which may otherwise
1056	 * disappear even while the kfd_process is still referenced.
1057	 */
1058	mm = get_task_mm(pdd->process->lead_thread);
1059	if (!mm) {
1060		ret = -EFAULT;
1061		goto out;
1062	}
1063
1064	/* Remove the eviction flags. Activate queues that are not
1065	 * inactive for other reasons.
1066	 */
1067	list_for_each_entry(q, &qpd->queues_list, list) {
1068		q->properties.is_evicted = false;
1069		if (!QUEUE_IS_ACTIVE(q->properties))
1070			continue;
1071
1072		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1073				q->properties.type)];
1074		q->properties.is_active = true;
1075		increment_queue_count(dqm, qpd, q);
 
 
 
 
1076
1077		if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
1078			continue;
1079
1080		retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1081				       q->queue, &q->properties, mm);
1082		if (retval && !ret)
1083			/* Return the first error, but keep going to
1084			 * maintain a consistent eviction state
1085			 */
1086			ret = retval;
1087	}
1088	qpd->evicted = 0;
1089	eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1090	atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1091out:
1092	if (mm)
1093		mmput(mm);
1094	dqm_unlock(dqm);
1095	return ret;
1096}
1097
1098static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
1099					struct qcm_process_device *qpd)
1100{
1101	struct queue *q;
1102	struct kfd_process_device *pdd;
1103	uint64_t pd_base;
1104	uint64_t eviction_duration;
1105	int retval = 0;
1106
1107	pdd = qpd_to_pdd(qpd);
1108	/* Retrieve PD base */
1109	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1110
1111	dqm_lock(dqm);
1112	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1113		goto out;
1114	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1115		qpd->evicted--;
1116		goto out;
1117	}
1118
1119	pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1120			    pdd->process->pasid);
1121
1122	/* Update PD Base in QPD */
1123	qpd->page_table_base = pd_base;
1124	pr_debug("Updated PD address to 0x%llx\n", pd_base);
1125
1126	/* activate all active queues on the qpd */
1127	list_for_each_entry(q, &qpd->queues_list, list) {
1128		q->properties.is_evicted = false;
1129		if (!QUEUE_IS_ACTIVE(q->properties))
1130			continue;
1131
1132		q->properties.is_active = true;
1133		increment_queue_count(dqm, &pdd->qpd, q);
1134
1135		if (dqm->dev->shared_resources.enable_mes) {
1136			retval = add_queue_mes(dqm, q, qpd);
1137			if (retval) {
1138				pr_err("Failed to restore queue %d\n",
1139					q->properties.queue_id);
1140				goto out;
1141			}
1142		}
1143	}
1144	if (!dqm->dev->shared_resources.enable_mes)
1145		retval = execute_queues_cpsch(dqm,
1146					      KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1147	qpd->evicted = 0;
1148	eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1149	atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1150out:
1151	dqm_unlock(dqm);
1152	return retval;
1153}
1154
1155static int register_process(struct device_queue_manager *dqm,
1156					struct qcm_process_device *qpd)
1157{
1158	struct device_process_node *n;
1159	struct kfd_process_device *pdd;
1160	uint64_t pd_base;
1161	int retval;
1162
1163	n = kzalloc(sizeof(*n), GFP_KERNEL);
1164	if (!n)
1165		return -ENOMEM;
1166
1167	n->qpd = qpd;
1168
1169	pdd = qpd_to_pdd(qpd);
1170	/* Retrieve PD base */
1171	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1172
1173	dqm_lock(dqm);
1174	list_add(&n->list, &dqm->queues);
1175
1176	/* Update PD Base in QPD */
1177	qpd->page_table_base = pd_base;
1178	pr_debug("Updated PD address to 0x%llx\n", pd_base);
1179
1180	retval = dqm->asic_ops.update_qpd(dqm, qpd);
1181
1182	dqm->processes_count++;
1183
1184	dqm_unlock(dqm);
1185
1186	/* Outside the DQM lock because under the DQM lock we can't do
1187	 * reclaim or take other locks that others hold while reclaiming.
1188	 */
1189	kfd_inc_compute_active(dqm->dev);
1190
1191	return retval;
1192}
1193
1194static int unregister_process(struct device_queue_manager *dqm,
1195					struct qcm_process_device *qpd)
1196{
1197	int retval;
1198	struct device_process_node *cur, *next;
1199
1200	pr_debug("qpd->queues_list is %s\n",
1201			list_empty(&qpd->queues_list) ? "empty" : "not empty");
1202
1203	retval = 0;
1204	dqm_lock(dqm);
1205
1206	list_for_each_entry_safe(cur, next, &dqm->queues, list) {
1207		if (qpd == cur->qpd) {
1208			list_del(&cur->list);
1209			kfree(cur);
1210			dqm->processes_count--;
1211			goto out;
1212		}
1213	}
1214	/* qpd not found in dqm list */
1215	retval = 1;
1216out:
1217	dqm_unlock(dqm);
1218
1219	/* Outside the DQM lock because under the DQM lock we can't do
1220	 * reclaim or take other locks that others hold while reclaiming.
1221	 */
1222	if (!retval)
1223		kfd_dec_compute_active(dqm->dev);
1224
1225	return retval;
1226}
1227
1228static int
1229set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
1230			unsigned int vmid)
1231{
1232	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
1233						dqm->dev->adev, pasid, vmid);
1234}
1235
1236static void init_interrupts(struct device_queue_manager *dqm)
1237{
1238	unsigned int i;
1239
1240	for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
1241		if (is_pipe_enabled(dqm, 0, i))
1242			dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
1243}
1244
1245static void init_sdma_bitmaps(struct device_queue_manager *dqm)
1246{
1247	unsigned int num_sdma_queues =
1248		min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8,
1249		      get_num_sdma_queues(dqm));
1250	unsigned int num_xgmi_sdma_queues =
1251		min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8,
1252		      get_num_xgmi_sdma_queues(dqm));
1253
1254	if (num_sdma_queues)
1255		dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0);
1256	if (num_xgmi_sdma_queues)
1257		dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0);
1258
1259	dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm);
1260	pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
1261}
1262
1263static int initialize_nocpsch(struct device_queue_manager *dqm)
1264{
1265	int pipe, queue;
1266
1267	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1268
1269	dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
1270					sizeof(unsigned int), GFP_KERNEL);
1271	if (!dqm->allocated_queues)
1272		return -ENOMEM;
1273
1274	mutex_init(&dqm->lock_hidden);
1275	INIT_LIST_HEAD(&dqm->queues);
1276	dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
1277	dqm->active_cp_queue_count = 0;
1278	dqm->gws_queue_count = 0;
1279
1280	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1281		int pipe_offset = pipe * get_queues_per_pipe(dqm);
1282
1283		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
1284			if (test_bit(pipe_offset + queue,
1285				     dqm->dev->shared_resources.cp_queue_bitmap))
1286				dqm->allocated_queues[pipe] |= 1 << queue;
1287	}
1288
1289	memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
1290
1291	init_sdma_bitmaps(dqm);
 
1292
1293	return 0;
1294}
1295
1296static void uninitialize(struct device_queue_manager *dqm)
1297{
1298	int i;
1299
1300	WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
1301
1302	kfree(dqm->allocated_queues);
1303	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
1304		kfree(dqm->mqd_mgrs[i]);
1305	mutex_destroy(&dqm->lock_hidden);
1306}
1307
1308static int start_nocpsch(struct device_queue_manager *dqm)
1309{
1310	int r = 0;
1311
1312	pr_info("SW scheduler is used");
1313	init_interrupts(dqm);
 
 
 
 
1314
1315	if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1316		r = pm_init(&dqm->packet_mgr, dqm);
1317	if (!r)
1318		dqm->sched_running = true;
1319
1320	return r;
1321}
1322
1323static int stop_nocpsch(struct device_queue_manager *dqm)
1324{
1325	if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1326		pm_uninit(&dqm->packet_mgr, false);
1327	dqm->sched_running = false;
1328
1329	return 0;
1330}
1331
1332static void pre_reset(struct device_queue_manager *dqm)
1333{
1334	dqm_lock(dqm);
1335	dqm->is_resetting = true;
1336	dqm_unlock(dqm);
1337}
1338
1339static int allocate_sdma_queue(struct device_queue_manager *dqm,
1340				struct queue *q, const uint32_t *restore_sdma_id)
1341{
1342	int bit;
1343
1344	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1345		if (dqm->sdma_bitmap == 0) {
1346			pr_err("No more SDMA queue to allocate\n");
1347			return -ENOMEM;
1348		}
1349
1350		if (restore_sdma_id) {
1351			/* Re-use existing sdma_id */
1352			if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
1353				pr_err("SDMA queue already in use\n");
1354				return -EBUSY;
1355			}
1356			dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
1357			q->sdma_id = *restore_sdma_id;
1358		} else {
1359			/* Find first available sdma_id */
1360			bit = __ffs64(dqm->sdma_bitmap);
1361			dqm->sdma_bitmap &= ~(1ULL << bit);
1362			q->sdma_id = bit;
1363		}
1364
1365		q->properties.sdma_engine_id = q->sdma_id %
1366				kfd_get_num_sdma_engines(dqm->dev);
1367		q->properties.sdma_queue_id = q->sdma_id /
1368				kfd_get_num_sdma_engines(dqm->dev);
1369	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1370		if (dqm->xgmi_sdma_bitmap == 0) {
1371			pr_err("No more XGMI SDMA queue to allocate\n");
1372			return -ENOMEM;
1373		}
1374		if (restore_sdma_id) {
1375			/* Re-use existing sdma_id */
1376			if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
1377				pr_err("SDMA queue already in use\n");
1378				return -EBUSY;
1379			}
1380			dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
1381			q->sdma_id = *restore_sdma_id;
1382		} else {
1383			bit = __ffs64(dqm->xgmi_sdma_bitmap);
1384			dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1385			q->sdma_id = bit;
1386		}
1387		/* sdma_engine_id is sdma id including
1388		 * both PCIe-optimized SDMAs and XGMI-
1389		 * optimized SDMAs. The calculation below
1390		 * assumes the first N engines are always
1391		 * PCIe-optimized ones
1392		 */
1393		q->properties.sdma_engine_id =
1394			kfd_get_num_sdma_engines(dqm->dev) +
1395			q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
1396		q->properties.sdma_queue_id = q->sdma_id /
1397			kfd_get_num_xgmi_sdma_engines(dqm->dev);
1398	}
1399
1400	pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1401	pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1402
1403	return 0;
1404}
1405
1406static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1407				struct queue *q)
1408{
1409	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1410		if (q->sdma_id >= get_num_sdma_queues(dqm))
1411			return;
1412		dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1413	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1414		if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1415			return;
1416		dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1417	}
1418}
1419
1420/*
1421 * Device Queue Manager implementation for cp scheduler
1422 */
1423
1424static int set_sched_resources(struct device_queue_manager *dqm)
1425{
1426	int i, mec;
1427	struct scheduling_resources res;
1428
1429	res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1430
1431	res.queue_mask = 0;
1432	for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1433		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1434			/ dqm->dev->shared_resources.num_pipe_per_mec;
1435
1436		if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
1437			continue;
1438
1439		/* only acquire queues from the first MEC */
1440		if (mec > 0)
1441			continue;
1442
1443		/* This situation may be hit in the future if a new HW
1444		 * generation exposes more than 64 queues. If so, the
1445		 * definition of res.queue_mask needs updating
1446		 */
1447		if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1448			pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1449			break;
1450		}
1451
1452		res.queue_mask |= 1ull
1453			<< amdgpu_queue_mask_bit_to_set_resource_bit(
1454				dqm->dev->adev, i);
1455	}
1456	res.gws_mask = ~0ull;
1457	res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1458
1459	pr_debug("Scheduling resources:\n"
1460			"vmid mask: 0x%8X\n"
1461			"queue mask: 0x%8llX\n",
1462			res.vmid_mask, res.queue_mask);
1463
1464	return pm_send_set_resources(&dqm->packet_mgr, &res);
1465}
1466
1467static int initialize_cpsch(struct device_queue_manager *dqm)
1468{
1469	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1470
1471	mutex_init(&dqm->lock_hidden);
1472	INIT_LIST_HEAD(&dqm->queues);
1473	dqm->active_queue_count = dqm->processes_count = 0;
1474	dqm->active_cp_queue_count = 0;
1475	dqm->gws_queue_count = 0;
1476	dqm->active_runlist = false;
 
 
 
1477	INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1478
1479	init_sdma_bitmaps(dqm);
1480
1481	return 0;
1482}
1483
1484static int start_cpsch(struct device_queue_manager *dqm)
1485{
1486	int retval;
1487
1488	retval = 0;
1489
1490	dqm_lock(dqm);
 
 
1491
1492	if (!dqm->dev->shared_resources.enable_mes) {
1493		retval = pm_init(&dqm->packet_mgr, dqm);
1494		if (retval)
1495			goto fail_packet_manager_init;
1496
1497		retval = set_sched_resources(dqm);
1498		if (retval)
1499			goto fail_set_sched_resources;
1500	}
1501	pr_debug("Allocating fence memory\n");
1502
1503	/* allocate fence memory on the gart */
1504	retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1505					&dqm->fence_mem);
1506
1507	if (retval)
1508		goto fail_allocate_vidmem;
1509
1510	dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
1511	dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1512
1513	init_interrupts(dqm);
1514
 
1515	/* clear hang status when driver try to start the hw scheduler */
1516	dqm->is_hws_hang = false;
1517	dqm->is_resetting = false;
1518	dqm->sched_running = true;
1519	if (!dqm->dev->shared_resources.enable_mes)
1520		execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1521	dqm_unlock(dqm);
1522
1523	return 0;
1524fail_allocate_vidmem:
1525fail_set_sched_resources:
1526	if (!dqm->dev->shared_resources.enable_mes)
1527		pm_uninit(&dqm->packet_mgr, false);
1528fail_packet_manager_init:
1529	dqm_unlock(dqm);
1530	return retval;
1531}
1532
1533static int stop_cpsch(struct device_queue_manager *dqm)
1534{
1535	bool hanging;
1536
1537	dqm_lock(dqm);
1538	if (!dqm->sched_running) {
1539		dqm_unlock(dqm);
1540		return 0;
1541	}
1542
1543	if (!dqm->is_hws_hang) {
1544		if (!dqm->dev->shared_resources.enable_mes)
1545			unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
1546		else
1547			remove_all_queues_mes(dqm);
1548	}
1549
1550	hanging = dqm->is_hws_hang || dqm->is_resetting;
1551	dqm->sched_running = false;
 
1552
1553	if (!dqm->dev->shared_resources.enable_mes)
1554		pm_release_ib(&dqm->packet_mgr);
1555
1556	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1557	if (!dqm->dev->shared_resources.enable_mes)
1558		pm_uninit(&dqm->packet_mgr, hanging);
1559	dqm_unlock(dqm);
1560
1561	return 0;
1562}
1563
1564static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1565					struct kernel_queue *kq,
1566					struct qcm_process_device *qpd)
1567{
1568	dqm_lock(dqm);
1569	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1570		pr_warn("Can't create new kernel queue because %d queues were already created\n",
1571				dqm->total_queue_count);
1572		dqm_unlock(dqm);
1573		return -EPERM;
1574	}
1575
1576	/*
1577	 * Unconditionally increment this counter, regardless of the queue's
1578	 * type or whether the queue is active.
1579	 */
1580	dqm->total_queue_count++;
1581	pr_debug("Total of %d queues are accountable so far\n",
1582			dqm->total_queue_count);
1583
1584	list_add(&kq->list, &qpd->priv_queue_list);
1585	increment_queue_count(dqm, qpd, kq->queue);
1586	qpd->is_debug = true;
1587	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1588	dqm_unlock(dqm);
1589
1590	return 0;
1591}
1592
1593static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1594					struct kernel_queue *kq,
1595					struct qcm_process_device *qpd)
1596{
1597	dqm_lock(dqm);
1598	list_del(&kq->list);
1599	decrement_queue_count(dqm, qpd, kq->queue);
1600	qpd->is_debug = false;
1601	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1602	/*
1603	 * Unconditionally decrement this counter, regardless of the queue's
1604	 * type.
1605	 */
1606	dqm->total_queue_count--;
1607	pr_debug("Total of %d queues are accountable so far\n",
1608			dqm->total_queue_count);
1609	dqm_unlock(dqm);
1610}
1611
1612static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1613			struct qcm_process_device *qpd,
1614			const struct kfd_criu_queue_priv_data *qd,
1615			const void *restore_mqd, const void *restore_ctl_stack)
1616{
1617	int retval;
1618	struct mqd_manager *mqd_mgr;
1619
1620	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1621		pr_warn("Can't create new usermode queue because %d queues were already created\n",
1622				dqm->total_queue_count);
1623		retval = -EPERM;
1624		goto out;
1625	}
1626
1627	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1628		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1629		dqm_lock(dqm);
1630		retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
1631		dqm_unlock(dqm);
1632		if (retval)
1633			goto out;
1634	}
1635
1636	retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
1637	if (retval)
1638		goto out_deallocate_sdma_queue;
1639
1640	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1641			q->properties.type)];
1642
1643	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1644		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1645		dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1646	q->properties.tba_addr = qpd->tba_addr;
1647	q->properties.tma_addr = qpd->tma_addr;
1648	q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1649	if (!q->mqd_mem_obj) {
1650		retval = -ENOMEM;
1651		goto out_deallocate_doorbell;
1652	}
1653
1654	dqm_lock(dqm);
1655	/*
1656	 * Eviction state logic: mark all queues as evicted, even ones
1657	 * not currently active. Restoring inactive queues later only
1658	 * updates the is_evicted flag but is a no-op otherwise.
1659	 */
1660	q->properties.is_evicted = !!qpd->evicted;
1661
1662	if (qd)
1663		mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
1664				     &q->properties, restore_mqd, restore_ctl_stack,
1665				     qd->ctl_stack_size);
1666	else
1667		mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1668					&q->gart_mqd_addr, &q->properties);
1669
1670	list_add(&q->list, &qpd->queues_list);
1671	qpd->queue_count++;
1672
1673	if (q->properties.is_active) {
1674		increment_queue_count(dqm, qpd, q);
1675
1676		if (!dqm->dev->shared_resources.enable_mes)
1677			retval = execute_queues_cpsch(dqm,
1678					KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1679		else
1680			retval = add_queue_mes(dqm, q, qpd);
1681		if (retval)
1682			goto cleanup_queue;
1683	}
1684
1685	/*
1686	 * Unconditionally increment this counter, regardless of the queue's
1687	 * type or whether the queue is active.
1688	 */
1689	dqm->total_queue_count++;
1690
1691	pr_debug("Total of %d queues are accountable so far\n",
1692			dqm->total_queue_count);
1693
1694	dqm_unlock(dqm);
1695	return retval;
1696
1697cleanup_queue:
1698	qpd->queue_count--;
1699	list_del(&q->list);
1700	if (q->properties.is_active)
1701		decrement_queue_count(dqm, qpd, q);
1702	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1703	dqm_unlock(dqm);
1704out_deallocate_doorbell:
1705	deallocate_doorbell(qpd, q);
1706out_deallocate_sdma_queue:
1707	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1708		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1709		dqm_lock(dqm);
1710		deallocate_sdma_queue(dqm, q);
1711		dqm_unlock(dqm);
1712	}
1713out:
1714	return retval;
1715}
1716
1717int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
1718				uint64_t fence_value,
1719				unsigned int timeout_ms)
1720{
1721	unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1722
1723	while (*fence_addr != fence_value) {
1724		if (time_after(jiffies, end_jiffies)) {
1725			pr_err("qcm fence wait loop timeout expired\n");
1726			/* In HWS case, this is used to halt the driver thread
1727			 * in order not to mess up CP states before doing
1728			 * scandumps for FW debugging.
1729			 */
1730			while (halt_if_hws_hang)
1731				schedule();
1732
1733			return -ETIME;
1734		}
1735		schedule();
1736	}
1737
1738	return 0;
1739}
1740
1741/* dqm->lock mutex has to be locked before calling this function */
1742static int map_queues_cpsch(struct device_queue_manager *dqm)
1743{
1744	int retval;
1745
1746	if (!dqm->sched_running)
1747		return 0;
1748	if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1749		return 0;
1750	if (dqm->active_runlist)
1751		return 0;
1752
1753	retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
1754	pr_debug("%s sent runlist\n", __func__);
1755	if (retval) {
1756		pr_err("failed to execute runlist\n");
1757		return retval;
1758	}
1759	dqm->active_runlist = true;
1760
1761	return retval;
1762}
1763
1764/* dqm->lock mutex has to be locked before calling this function */
1765static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1766				enum kfd_unmap_queues_filter filter,
1767				uint32_t filter_param, bool reset)
1768{
1769	int retval = 0;
1770	struct mqd_manager *mqd_mgr;
1771
1772	if (!dqm->sched_running)
1773		return 0;
1774	if (dqm->is_hws_hang || dqm->is_resetting)
1775		return -EIO;
1776	if (!dqm->active_runlist)
1777		return retval;
1778
1779	retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
 
1780	if (retval)
1781		return retval;
1782
1783	*dqm->fence_addr = KFD_FENCE_INIT;
1784	pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
1785				KFD_FENCE_COMPLETED);
1786	/* should be timed out */
1787	retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1788				queue_preemption_timeout_ms);
1789	if (retval) {
1790		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1791		kfd_hws_hang(dqm);
 
 
 
 
 
 
1792		return retval;
1793	}
1794
1795	/* In the current MEC firmware implementation, if compute queue
1796	 * doesn't response to the preemption request in time, HIQ will
1797	 * abandon the unmap request without returning any timeout error
1798	 * to driver. Instead, MEC firmware will log the doorbell of the
1799	 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
1800	 * To make sure the queue unmap was successful, driver need to
1801	 * check those fields
1802	 */
1803	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
1804	if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
1805		pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
1806		while (halt_if_hws_hang)
1807			schedule();
1808		return -ETIME;
1809	}
1810
1811	pm_release_ib(&dqm->packet_mgr);
1812	dqm->active_runlist = false;
1813
1814	return retval;
1815}
1816
1817/* only for compute queue */
1818static int reset_queues_cpsch(struct device_queue_manager *dqm,
1819			uint16_t pasid)
1820{
1821	int retval;
1822
1823	dqm_lock(dqm);
1824
1825	retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
1826			pasid, true);
1827
1828	dqm_unlock(dqm);
1829	return retval;
1830}
1831
1832/* dqm->lock mutex has to be locked before calling this function */
1833static int execute_queues_cpsch(struct device_queue_manager *dqm,
1834				enum kfd_unmap_queues_filter filter,
1835				uint32_t filter_param)
1836{
1837	int retval;
1838
1839	if (dqm->is_hws_hang)
1840		return -EIO;
1841	retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
1842	if (retval)
1843		return retval;
1844
1845	return map_queues_cpsch(dqm);
1846}
1847
1848static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1849				struct qcm_process_device *qpd,
1850				struct queue *q)
1851{
1852	int retval;
1853	struct mqd_manager *mqd_mgr;
1854	uint64_t sdma_val = 0;
1855	struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1856
1857	/* Get the SDMA queue stats */
1858	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1859	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1860		retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
1861							&sdma_val);
1862		if (retval)
1863			pr_err("Failed to read SDMA queue counter for queue: %d\n",
1864				q->properties.queue_id);
1865	}
1866
1867	retval = 0;
1868
1869	/* remove queue from list to prevent rescheduling after preemption */
1870	dqm_lock(dqm);
1871
1872	if (qpd->is_debug) {
1873		/*
1874		 * error, currently we do not allow to destroy a queue
1875		 * of a currently debugged process
1876		 */
1877		retval = -EBUSY;
1878		goto failed_try_destroy_debugged_queue;
1879
1880	}
1881
1882	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1883			q->properties.type)];
1884
1885	deallocate_doorbell(qpd, q);
1886
1887	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1888	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1889		deallocate_sdma_queue(dqm, q);
1890		pdd->sdma_past_activity_counter += sdma_val;
1891	}
1892
1893	list_del(&q->list);
1894	qpd->queue_count--;
1895	if (q->properties.is_active) {
1896		if (!dqm->dev->shared_resources.enable_mes) {
1897			decrement_queue_count(dqm, qpd, q);
1898			retval = execute_queues_cpsch(dqm,
1899						      KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1900			if (retval == -ETIME)
1901				qpd->reset_wavefronts = true;
1902		} else {
1903			retval = remove_queue_mes(dqm, q, qpd);
1904		}
1905	}
1906
1907	/*
1908	 * Unconditionally decrement this counter, regardless of the queue's
1909	 * type
1910	 */
1911	dqm->total_queue_count--;
1912	pr_debug("Total of %d queues are accountable so far\n",
1913			dqm->total_queue_count);
1914
1915	dqm_unlock(dqm);
1916
1917	/* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1918	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1919
1920	return retval;
1921
1922failed_try_destroy_debugged_queue:
1923
1924	dqm_unlock(dqm);
1925	return retval;
1926}
1927
1928/*
1929 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1930 * stay in user mode.
1931 */
1932#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1933/* APE1 limit is inclusive and 64K aligned. */
1934#define APE1_LIMIT_ALIGNMENT 0xFFFF
1935
1936static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1937				   struct qcm_process_device *qpd,
1938				   enum cache_policy default_policy,
1939				   enum cache_policy alternate_policy,
1940				   void __user *alternate_aperture_base,
1941				   uint64_t alternate_aperture_size)
1942{
1943	bool retval = true;
1944
1945	if (!dqm->asic_ops.set_cache_memory_policy)
1946		return retval;
1947
1948	dqm_lock(dqm);
1949
1950	if (alternate_aperture_size == 0) {
1951		/* base > limit disables APE1 */
1952		qpd->sh_mem_ape1_base = 1;
1953		qpd->sh_mem_ape1_limit = 0;
1954	} else {
1955		/*
1956		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1957		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
1958		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1959		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1960		 * Verify that the base and size parameters can be
1961		 * represented in this format and convert them.
1962		 * Additionally restrict APE1 to user-mode addresses.
1963		 */
1964
1965		uint64_t base = (uintptr_t)alternate_aperture_base;
1966		uint64_t limit = base + alternate_aperture_size - 1;
1967
1968		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1969		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1970			retval = false;
1971			goto out;
1972		}
1973
1974		qpd->sh_mem_ape1_base = base >> 16;
1975		qpd->sh_mem_ape1_limit = limit >> 16;
1976	}
1977
1978	retval = dqm->asic_ops.set_cache_memory_policy(
1979			dqm,
1980			qpd,
1981			default_policy,
1982			alternate_policy,
1983			alternate_aperture_base,
1984			alternate_aperture_size);
1985
1986	if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1987		program_sh_mem_settings(dqm, qpd);
1988
1989	pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1990		qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1991		qpd->sh_mem_ape1_limit);
1992
1993out:
1994	dqm_unlock(dqm);
1995	return retval;
1996}
1997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1998static int process_termination_nocpsch(struct device_queue_manager *dqm,
1999		struct qcm_process_device *qpd)
2000{
2001	struct queue *q;
2002	struct device_process_node *cur, *next_dpn;
2003	int retval = 0;
2004	bool found = false;
2005
2006	dqm_lock(dqm);
2007
2008	/* Clear all user mode queues */
2009	while (!list_empty(&qpd->queues_list)) {
2010		struct mqd_manager *mqd_mgr;
2011		int ret;
2012
2013		q = list_first_entry(&qpd->queues_list, struct queue, list);
2014		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2015				q->properties.type)];
2016		ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
2017		if (ret)
2018			retval = ret;
2019		dqm_unlock(dqm);
2020		mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2021		dqm_lock(dqm);
2022	}
2023
2024	/* Unregister process */
2025	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2026		if (qpd == cur->qpd) {
2027			list_del(&cur->list);
2028			kfree(cur);
2029			dqm->processes_count--;
2030			found = true;
2031			break;
2032		}
2033	}
2034
2035	dqm_unlock(dqm);
2036
2037	/* Outside the DQM lock because under the DQM lock we can't do
2038	 * reclaim or take other locks that others hold while reclaiming.
2039	 */
2040	if (found)
2041		kfd_dec_compute_active(dqm->dev);
2042
2043	return retval;
2044}
2045
2046static int get_wave_state(struct device_queue_manager *dqm,
2047			  struct queue *q,
2048			  void __user *ctl_stack,
2049			  u32 *ctl_stack_used_size,
2050			  u32 *save_area_used_size)
2051{
2052	struct mqd_manager *mqd_mgr;
 
2053
2054	dqm_lock(dqm);
2055
2056	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
2057
2058	if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
2059	    q->properties.is_active || !q->device->cwsr_enabled ||
2060	    !mqd_mgr->get_wave_state) {
2061		dqm_unlock(dqm);
2062		return -EINVAL;
2063	}
2064
2065	dqm_unlock(dqm);
2066
2067	/*
2068	 * get_wave_state is outside the dqm lock to prevent circular locking
2069	 * and the queue should be protected against destruction by the process
2070	 * lock.
2071	 */
2072	return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
2073			ctl_stack_used_size, save_area_used_size);
2074}
2075
2076static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
2077			const struct queue *q,
2078			u32 *mqd_size,
2079			u32 *ctl_stack_size)
2080{
2081	struct mqd_manager *mqd_mgr;
2082	enum KFD_MQD_TYPE mqd_type =
2083			get_mqd_type_from_queue_type(q->properties.type);
2084
2085	dqm_lock(dqm);
2086	mqd_mgr = dqm->mqd_mgrs[mqd_type];
2087	*mqd_size = mqd_mgr->mqd_size;
2088	*ctl_stack_size = 0;
2089
2090	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
2091		mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
2092
2093	dqm_unlock(dqm);
2094}
2095
2096static int checkpoint_mqd(struct device_queue_manager *dqm,
2097			  const struct queue *q,
2098			  void *mqd,
2099			  void *ctl_stack)
2100{
2101	struct mqd_manager *mqd_mgr;
2102	int r = 0;
2103	enum KFD_MQD_TYPE mqd_type =
2104			get_mqd_type_from_queue_type(q->properties.type);
2105
2106	dqm_lock(dqm);
2107
2108	if (q->properties.is_active || !q->device->cwsr_enabled) {
2109		r = -EINVAL;
2110		goto dqm_unlock;
2111	}
2112
2113	mqd_mgr = dqm->mqd_mgrs[mqd_type];
2114	if (!mqd_mgr->checkpoint_mqd) {
2115		r = -EOPNOTSUPP;
2116		goto dqm_unlock;
2117	}
2118
2119	mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
2120
2121dqm_unlock:
2122	dqm_unlock(dqm);
2123	return r;
2124}
2125
2126static int process_termination_cpsch(struct device_queue_manager *dqm,
2127		struct qcm_process_device *qpd)
2128{
2129	int retval;
2130	struct queue *q;
2131	struct kernel_queue *kq, *kq_next;
2132	struct mqd_manager *mqd_mgr;
2133	struct device_process_node *cur, *next_dpn;
2134	enum kfd_unmap_queues_filter filter =
2135		KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
2136	bool found = false;
2137
2138	retval = 0;
2139
2140	dqm_lock(dqm);
2141
2142	/* Clean all kernel queues */
2143	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
2144		list_del(&kq->list);
2145		decrement_queue_count(dqm, qpd, kq->queue);
2146		qpd->is_debug = false;
2147		dqm->total_queue_count--;
2148		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
2149	}
2150
2151	/* Clear all user mode queues */
2152	list_for_each_entry(q, &qpd->queues_list, list) {
2153		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
2154			deallocate_sdma_queue(dqm, q);
2155		else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
2156			deallocate_sdma_queue(dqm, q);
2157
2158		if (q->properties.is_active) {
2159			decrement_queue_count(dqm, qpd, q);
2160
2161			if (dqm->dev->shared_resources.enable_mes) {
2162				retval = remove_queue_mes(dqm, q, qpd);
2163				if (retval)
2164					pr_err("Failed to remove queue %d\n",
2165						q->properties.queue_id);
2166			}
2167		}
2168
2169		dqm->total_queue_count--;
2170	}
2171
2172	/* Unregister process */
2173	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2174		if (qpd == cur->qpd) {
2175			list_del(&cur->list);
2176			kfree(cur);
2177			dqm->processes_count--;
2178			found = true;
2179			break;
2180		}
2181	}
2182
2183	if (!dqm->dev->shared_resources.enable_mes)
2184		retval = execute_queues_cpsch(dqm, filter, 0);
2185
2186	if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
2187		pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
2188		dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
2189		qpd->reset_wavefronts = false;
2190	}
2191
 
 
 
 
 
 
 
 
2192	/* Lastly, free mqd resources.
2193	 * Do free_mqd() after dqm_unlock to avoid circular locking.
2194	 */
2195	while (!list_empty(&qpd->queues_list)) {
2196		q = list_first_entry(&qpd->queues_list, struct queue, list);
2197		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2198				q->properties.type)];
2199		list_del(&q->list);
2200		qpd->queue_count--;
2201		dqm_unlock(dqm);
2202		mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2203		dqm_lock(dqm);
2204	}
2205	dqm_unlock(dqm);
2206
2207	/* Outside the DQM lock because under the DQM lock we can't do
2208	 * reclaim or take other locks that others hold while reclaiming.
2209	 */
2210	if (found)
2211		kfd_dec_compute_active(dqm->dev);
2212
2213	return retval;
2214}
2215
2216static int init_mqd_managers(struct device_queue_manager *dqm)
2217{
2218	int i, j;
2219	struct mqd_manager *mqd_mgr;
2220
2221	for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
2222		mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
2223		if (!mqd_mgr) {
2224			pr_err("mqd manager [%d] initialization failed\n", i);
2225			goto out_free;
2226		}
2227		dqm->mqd_mgrs[i] = mqd_mgr;
2228	}
2229
2230	return 0;
2231
2232out_free:
2233	for (j = 0; j < i; j++) {
2234		kfree(dqm->mqd_mgrs[j]);
2235		dqm->mqd_mgrs[j] = NULL;
2236	}
2237
2238	return -ENOMEM;
2239}
2240
2241/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
2242static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
2243{
2244	int retval;
2245	struct kfd_dev *dev = dqm->dev;
2246	struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
2247	uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
2248		get_num_all_sdma_engines(dqm) *
2249		dev->device_info.num_sdma_queues_per_engine +
2250		dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
2251
2252	retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
2253		&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
2254		(void *)&(mem_obj->cpu_ptr), false);
2255
2256	return retval;
2257}
2258
2259struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
2260{
2261	struct device_queue_manager *dqm;
2262
2263	pr_debug("Loading device queue manager\n");
2264
2265	dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
2266	if (!dqm)
2267		return NULL;
2268
2269	switch (dev->adev->asic_type) {
2270	/* HWS is not available on Hawaii. */
2271	case CHIP_HAWAII:
2272	/* HWS depends on CWSR for timely dequeue. CWSR is not
2273	 * available on Tonga.
2274	 *
2275	 * FIXME: This argument also applies to Kaveri.
2276	 */
2277	case CHIP_TONGA:
2278		dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
2279		break;
2280	default:
2281		dqm->sched_policy = sched_policy;
2282		break;
2283	}
2284
2285	dqm->dev = dev;
2286	switch (dqm->sched_policy) {
2287	case KFD_SCHED_POLICY_HWS:
2288	case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
2289		/* initialize dqm for cp scheduling */
2290		dqm->ops.create_queue = create_queue_cpsch;
2291		dqm->ops.initialize = initialize_cpsch;
2292		dqm->ops.start = start_cpsch;
2293		dqm->ops.stop = stop_cpsch;
2294		dqm->ops.pre_reset = pre_reset;
2295		dqm->ops.destroy_queue = destroy_queue_cpsch;
2296		dqm->ops.update_queue = update_queue;
2297		dqm->ops.register_process = register_process;
2298		dqm->ops.unregister_process = unregister_process;
2299		dqm->ops.uninitialize = uninitialize;
2300		dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
2301		dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
2302		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
 
2303		dqm->ops.process_termination = process_termination_cpsch;
2304		dqm->ops.evict_process_queues = evict_process_queues_cpsch;
2305		dqm->ops.restore_process_queues = restore_process_queues_cpsch;
2306		dqm->ops.get_wave_state = get_wave_state;
2307		dqm->ops.reset_queues = reset_queues_cpsch;
2308		dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2309		dqm->ops.checkpoint_mqd = checkpoint_mqd;
2310		break;
2311	case KFD_SCHED_POLICY_NO_HWS:
2312		/* initialize dqm for no cp scheduling */
2313		dqm->ops.start = start_nocpsch;
2314		dqm->ops.stop = stop_nocpsch;
2315		dqm->ops.pre_reset = pre_reset;
2316		dqm->ops.create_queue = create_queue_nocpsch;
2317		dqm->ops.destroy_queue = destroy_queue_nocpsch;
2318		dqm->ops.update_queue = update_queue;
2319		dqm->ops.register_process = register_process;
2320		dqm->ops.unregister_process = unregister_process;
2321		dqm->ops.initialize = initialize_nocpsch;
2322		dqm->ops.uninitialize = uninitialize;
2323		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
 
2324		dqm->ops.process_termination = process_termination_nocpsch;
2325		dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
2326		dqm->ops.restore_process_queues =
2327			restore_process_queues_nocpsch;
2328		dqm->ops.get_wave_state = get_wave_state;
2329		dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2330		dqm->ops.checkpoint_mqd = checkpoint_mqd;
2331		break;
2332	default:
2333		pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
2334		goto out_free;
2335	}
2336
2337	switch (dev->adev->asic_type) {
2338	case CHIP_CARRIZO:
2339		device_queue_manager_init_vi(&dqm->asic_ops);
2340		break;
2341
2342	case CHIP_KAVERI:
2343		device_queue_manager_init_cik(&dqm->asic_ops);
2344		break;
2345
2346	case CHIP_HAWAII:
2347		device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
2348		break;
2349
2350	case CHIP_TONGA:
2351	case CHIP_FIJI:
2352	case CHIP_POLARIS10:
2353	case CHIP_POLARIS11:
2354	case CHIP_POLARIS12:
2355	case CHIP_VEGAM:
2356		device_queue_manager_init_vi_tonga(&dqm->asic_ops);
2357		break;
2358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2359	default:
2360		if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
2361			device_queue_manager_init_v11(&dqm->asic_ops);
2362		else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
2363			device_queue_manager_init_v10_navi10(&dqm->asic_ops);
2364		else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
2365			device_queue_manager_init_v9(&dqm->asic_ops);
2366		else {
2367			WARN(1, "Unexpected ASIC family %u",
2368			     dev->adev->asic_type);
2369			goto out_free;
2370		}
2371	}
2372
2373	if (init_mqd_managers(dqm))
2374		goto out_free;
2375
2376	if (allocate_hiq_sdma_mqd(dqm)) {
2377		pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
2378		goto out_free;
2379	}
2380
2381	if (!dqm->ops.initialize(dqm))
2382		return dqm;
2383
2384out_free:
2385	kfree(dqm);
2386	return NULL;
2387}
2388
2389static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
2390				    struct kfd_mem_obj *mqd)
2391{
2392	WARN(!mqd, "No hiq sdma mqd trunk to free");
2393
2394	amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
2395}
2396
2397void device_queue_manager_uninit(struct device_queue_manager *dqm)
2398{
2399	dqm->ops.uninitialize(dqm);
2400	deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
2401	kfree(dqm);
2402}
2403
2404int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
 
2405{
2406	struct kfd_process_device *pdd;
2407	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
2408	int ret = 0;
2409
2410	if (!p)
2411		return -EINVAL;
2412	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
2413	pdd = kfd_get_process_device_data(dqm->dev, p);
2414	if (pdd)
2415		ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
2416	kfd_unref_process(p);
2417
2418	return ret;
2419}
2420
2421static void kfd_process_hw_exception(struct work_struct *work)
2422{
2423	struct device_queue_manager *dqm = container_of(work,
2424			struct device_queue_manager, hw_exception_work);
2425	amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
2426}
2427
2428#if defined(CONFIG_DEBUG_FS)
2429
2430static void seq_reg_dump(struct seq_file *m,
2431			 uint32_t (*dump)[2], uint32_t n_regs)
2432{
2433	uint32_t i, count;
2434
2435	for (i = 0, count = 0; i < n_regs; i++) {
2436		if (count == 0 ||
2437		    dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
2438			seq_printf(m, "%s    %08x: %08x",
2439				   i ? "\n" : "",
2440				   dump[i][0], dump[i][1]);
2441			count = 7;
2442		} else {
2443			seq_printf(m, " %08x", dump[i][1]);
2444			count--;
2445		}
2446	}
2447
2448	seq_puts(m, "\n");
2449}
2450
2451int dqm_debugfs_hqds(struct seq_file *m, void *data)
2452{
2453	struct device_queue_manager *dqm = data;
2454	uint32_t (*dump)[2], n_regs;
2455	int pipe, queue;
2456	int r = 0;
2457
2458	if (!dqm->sched_running) {
2459		seq_puts(m, " Device is stopped\n");
 
2460		return 0;
2461	}
2462
2463	r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
2464					KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
2465					&dump, &n_regs);
2466	if (!r) {
2467		seq_printf(m, "  HIQ on MEC %d Pipe %d Queue %d\n",
2468			   KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
2469			   KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
2470			   KFD_CIK_HIQ_QUEUE);
2471		seq_reg_dump(m, dump, n_regs);
2472
2473		kfree(dump);
2474	}
2475
2476	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2477		int pipe_offset = pipe * get_queues_per_pipe(dqm);
2478
2479		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2480			if (!test_bit(pipe_offset + queue,
2481				      dqm->dev->shared_resources.cp_queue_bitmap))
2482				continue;
2483
2484			r = dqm->dev->kfd2kgd->hqd_dump(
2485				dqm->dev->adev, pipe, queue, &dump, &n_regs);
2486			if (r)
2487				break;
2488
2489			seq_printf(m, "  CP Pipe %d, Queue %d\n",
2490				  pipe, queue);
2491			seq_reg_dump(m, dump, n_regs);
2492
2493			kfree(dump);
2494		}
2495	}
2496
2497	for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
2498		for (queue = 0;
2499		     queue < dqm->dev->device_info.num_sdma_queues_per_engine;
2500		     queue++) {
2501			r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2502				dqm->dev->adev, pipe, queue, &dump, &n_regs);
2503			if (r)
2504				break;
2505
2506			seq_printf(m, "  SDMA Engine %d, RLC %d\n",
2507				  pipe, queue);
2508			seq_reg_dump(m, dump, n_regs);
2509
2510			kfree(dump);
2511		}
2512	}
2513
2514	return r;
2515}
2516
2517int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
2518{
2519	int r = 0;
2520
2521	dqm_lock(dqm);
2522	r = pm_debugfs_hang_hws(&dqm->packet_mgr);
2523	if (r) {
2524		dqm_unlock(dqm);
2525		return r;
2526	}
2527	dqm->active_runlist = true;
2528	r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2529	dqm_unlock(dqm);
2530
2531	return r;
2532}
2533
2534#endif