Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22
 23#include "amdgpu.h"
 24#include "amdgpu_amdkfd.h"
 25#include "cikd.h"
 26#include "cik_sdma.h"
 27#include "gfx_v7_0.h"
 28#include "gca/gfx_7_2_d.h"
 29#include "gca/gfx_7_2_enum.h"
 30#include "gca/gfx_7_2_sh_mask.h"
 31#include "oss/oss_2_0_d.h"
 32#include "oss/oss_2_0_sh_mask.h"
 33#include "gmc/gmc_7_1_d.h"
 34#include "gmc/gmc_7_1_sh_mask.h"
 35#include "cik_structs.h"
 36
 37enum hqd_dequeue_request_type {
 38	NO_ACTION = 0,
 39	DRAIN_PIPE,
 40	RESET_WAVES
 41};
 42
 43enum {
 44	MAX_TRAPID = 8,		/* 3 bits in the bitfield. */
 45	MAX_WATCH_ADDRESSES = 4
 46};
 47
 48static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
 49			uint32_t queue, uint32_t vmid)
 50{
 51	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
 52
 53	mutex_lock(&adev->srbm_mutex);
 54	WREG32(mmSRBM_GFX_CNTL, value);
 55}
 56
 57static void unlock_srbm(struct amdgpu_device *adev)
 58{
 59	WREG32(mmSRBM_GFX_CNTL, 0);
 60	mutex_unlock(&adev->srbm_mutex);
 61}
 62
 63static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
 64				uint32_t queue_id)
 65{
 66	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
 67	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
 68
 69	lock_srbm(adev, mec, pipe, queue_id, 0);
 70}
 71
 72static void release_queue(struct amdgpu_device *adev)
 73{
 74	unlock_srbm(adev);
 75}
 76
 77static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
 78					uint32_t sh_mem_config,
 79					uint32_t sh_mem_ape1_base,
 80					uint32_t sh_mem_ape1_limit,
 81					uint32_t sh_mem_bases)
 82{
 83	lock_srbm(adev, 0, 0, 0, vmid);
 84
 85	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
 86	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
 87	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
 88	WREG32(mmSH_MEM_BASES, sh_mem_bases);
 89
 90	unlock_srbm(adev);
 91}
 92
 93static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
 94					unsigned int vmid)
 95{
 96	/*
 97	 * We have to assume that there is no outstanding mapping.
 98	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
 99	 * a mapping is in progress or because a mapping finished and the
100	 * SW cleared it. So the protocol is to always wait & clear.
101	 */
102	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
103			ATC_VMID0_PASID_MAPPING__VALID_MASK;
104
105	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
106
107	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
108		cpu_relax();
109	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
110
111	/* Mapping vmid to pasid also for IH block */
112	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
113
114	return 0;
115}
116
117static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
118{
119	uint32_t mec;
120	uint32_t pipe;
121
122	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
123	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
124
125	lock_srbm(adev, mec, pipe, 0, 0);
126
127	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
128			CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
129
130	unlock_srbm(adev);
131
132	return 0;
133}
134
135static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
136{
137	uint32_t retval;
138
139	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
140			m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
141
142	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
143			m->sdma_engine_id, m->sdma_queue_id, retval);
144
145	return retval;
146}
147
148static inline struct cik_mqd *get_mqd(void *mqd)
149{
150	return (struct cik_mqd *)mqd;
151}
152
153static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
154{
155	return (struct cik_sdma_rlc_registers *)mqd;
156}
157
158static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
159			uint32_t pipe_id, uint32_t queue_id,
160			uint32_t __user *wptr, uint32_t wptr_shift,
161			uint32_t wptr_mask, struct mm_struct *mm)
162{
163	struct cik_mqd *m;
164	uint32_t *mqd_hqd;
165	uint32_t reg, wptr_val, data;
166	bool valid_wptr = false;
167
168	m = get_mqd(mqd);
169
170	acquire_queue(adev, pipe_id, queue_id);
171
172	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
173	mqd_hqd = &m->cp_mqd_base_addr_lo;
174
175	for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
176		WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
177
178	/* Copy userspace write pointer value to register.
179	 * Activate doorbell logic to monitor subsequent changes.
180	 */
181	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
182			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
183	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
184
185	/* read_user_ptr may take the mm->mmap_lock.
186	 * release srbm_mutex to avoid circular dependency between
187	 * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
188	 */
189	release_queue(adev);
190	valid_wptr = read_user_wptr(mm, wptr, wptr_val);
191	acquire_queue(adev, pipe_id, queue_id);
192	if (valid_wptr)
193		WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
194
195	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
196	WREG32(mmCP_HQD_ACTIVE, data);
197
198	release_queue(adev);
199
200	return 0;
201}
202
203static int kgd_hqd_dump(struct amdgpu_device *adev,
204			uint32_t pipe_id, uint32_t queue_id,
205			uint32_t (**dump)[2], uint32_t *n_regs)
206{
207	uint32_t i = 0, reg;
208#define HQD_N_REGS (35+4)
209#define DUMP_REG(addr) do {				\
210		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
211			break;				\
212		(*dump)[i][0] = (addr) << 2;		\
213		(*dump)[i++][1] = RREG32(addr);		\
214	} while (0)
215
216	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
217	if (*dump == NULL)
218		return -ENOMEM;
219
220	acquire_queue(adev, pipe_id, queue_id);
221
222	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
223	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
224	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
225	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
226
227	for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
228		DUMP_REG(reg);
229
230	release_queue(adev);
231
232	WARN_ON_ONCE(i != HQD_N_REGS);
233	*n_regs = i;
234
235	return 0;
236}
237
238static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
239			     uint32_t __user *wptr, struct mm_struct *mm)
240{
241	struct cik_sdma_rlc_registers *m;
242	unsigned long end_jiffies;
243	uint32_t sdma_rlc_reg_offset;
244	uint32_t data;
245
246	m = get_sdma_mqd(mqd);
247	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
248
249	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
250		m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
251
252	end_jiffies = msecs_to_jiffies(2000) + jiffies;
253	while (true) {
254		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
255		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
256			break;
257		if (time_after(jiffies, end_jiffies)) {
258			pr_err("SDMA RLC not idle in %s\n", __func__);
259			return -ETIME;
260		}
261		usleep_range(500, 1000);
262	}
263
264	data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
265			     ENABLE, 1);
266	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
267	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
268				m->sdma_rlc_rb_rptr);
269
270	if (read_user_wptr(mm, wptr, data))
271		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
272	else
273		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
274		       m->sdma_rlc_rb_rptr);
275
276	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
277				m->sdma_rlc_virtual_addr);
278	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
279	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
280			m->sdma_rlc_rb_base_hi);
281	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
282			m->sdma_rlc_rb_rptr_addr_lo);
283	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
284			m->sdma_rlc_rb_rptr_addr_hi);
285
286	data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
287			     RB_ENABLE, 1);
288	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
289
290	return 0;
291}
292
293static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
294			     uint32_t engine_id, uint32_t queue_id,
295			     uint32_t (**dump)[2], uint32_t *n_regs)
296{
297	uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
298		queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
299	uint32_t i = 0, reg;
300#undef HQD_N_REGS
301#define HQD_N_REGS (19+4)
302
303	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
304	if (*dump == NULL)
305		return -ENOMEM;
306
307	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
308		DUMP_REG(sdma_offset + reg);
309	for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
310	     reg++)
311		DUMP_REG(sdma_offset + reg);
312
313	WARN_ON_ONCE(i != HQD_N_REGS);
314	*n_regs = i;
315
316	return 0;
317}
318
319static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
320				uint64_t queue_address, uint32_t pipe_id,
321				uint32_t queue_id)
322{
323	uint32_t act;
324	bool retval = false;
325	uint32_t low, high;
326
327	acquire_queue(adev, pipe_id, queue_id);
328	act = RREG32(mmCP_HQD_ACTIVE);
329	if (act) {
330		low = lower_32_bits(queue_address >> 8);
331		high = upper_32_bits(queue_address >> 8);
332
333		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
334				high == RREG32(mmCP_HQD_PQ_BASE_HI))
335			retval = true;
336	}
337	release_queue(adev);
338	return retval;
339}
340
341static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
342{
343	struct cik_sdma_rlc_registers *m;
344	uint32_t sdma_rlc_reg_offset;
345	uint32_t sdma_rlc_rb_cntl;
346
347	m = get_sdma_mqd(mqd);
348	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
349
350	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
351
352	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
353		return true;
354
355	return false;
356}
357
358static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
359				enum kfd_preempt_type reset_type,
360				unsigned int utimeout, uint32_t pipe_id,
361				uint32_t queue_id)
362{
363	uint32_t temp;
364	enum hqd_dequeue_request_type type;
365	unsigned long flags, end_jiffies;
366	int retry;
367
368	if (amdgpu_in_reset(adev))
369		return -EIO;
370
371	acquire_queue(adev, pipe_id, queue_id);
372	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
373
374	switch (reset_type) {
375	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
376		type = DRAIN_PIPE;
377		break;
378	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
379		type = RESET_WAVES;
380		break;
381	default:
382		type = DRAIN_PIPE;
383		break;
384	}
385
386	/* Workaround: If IQ timer is active and the wait time is close to or
387	 * equal to 0, dequeueing is not safe. Wait until either the wait time
388	 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
389	 * cleared before continuing. Also, ensure wait times are set to at
390	 * least 0x3.
391	 */
392	local_irq_save(flags);
393	preempt_disable();
394	retry = 5000; /* wait for 500 usecs at maximum */
395	while (true) {
396		temp = RREG32(mmCP_HQD_IQ_TIMER);
397		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
398			pr_debug("HW is processing IQ\n");
399			goto loop;
400		}
401		if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
402			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
403					== 3) /* SEM-rearm is safe */
404				break;
405			/* Wait time 3 is safe for CP, but our MMIO read/write
406			 * time is close to 1 microsecond, so check for 10 to
407			 * leave more buffer room
408			 */
409			if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
410					>= 10)
411				break;
412			pr_debug("IQ timer is active\n");
413		} else
414			break;
415loop:
416		if (!retry) {
417			pr_err("CP HQD IQ timer status time out\n");
418			break;
419		}
420		ndelay(100);
421		--retry;
422	}
423	retry = 1000;
424	while (true) {
425		temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
426		if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
427			break;
428		pr_debug("Dequeue request is pending\n");
429
430		if (!retry) {
431			pr_err("CP HQD dequeue request time out\n");
432			break;
433		}
434		ndelay(100);
435		--retry;
436	}
437	local_irq_restore(flags);
438	preempt_enable();
439
440	WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
441
442	end_jiffies = (utimeout * HZ / 1000) + jiffies;
443	while (true) {
444		temp = RREG32(mmCP_HQD_ACTIVE);
445		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
446			break;
447		if (time_after(jiffies, end_jiffies)) {
448			pr_err("cp queue preemption time out\n");
449			release_queue(adev);
450			return -ETIME;
451		}
452		usleep_range(500, 1000);
453	}
454
455	release_queue(adev);
456	return 0;
457}
458
459static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
460				unsigned int utimeout)
461{
462	struct cik_sdma_rlc_registers *m;
463	uint32_t sdma_rlc_reg_offset;
464	uint32_t temp;
465	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
466
467	m = get_sdma_mqd(mqd);
468	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
469
470	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
471	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
472	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
473
474	while (true) {
475		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
476		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
477			break;
478		if (time_after(jiffies, end_jiffies)) {
479			pr_err("SDMA RLC not idle in %s\n", __func__);
480			return -ETIME;
481		}
482		usleep_range(500, 1000);
483	}
484
485	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
486	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
487		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
488		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
489
490	m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
491
492	return 0;
493}
494
495static int kgd_wave_control_execute(struct amdgpu_device *adev,
496					uint32_t gfx_index_val,
497					uint32_t sq_cmd)
498{
499	uint32_t data;
500
501	mutex_lock(&adev->grbm_idx_mutex);
502
503	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
504	WREG32(mmSQ_CMD, sq_cmd);
505
506	/*  Restore the GRBM_GFX_INDEX register  */
507
508	data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
509		GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
510		GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
511
512	WREG32(mmGRBM_GFX_INDEX, data);
513
514	mutex_unlock(&adev->grbm_idx_mutex);
515
516	return 0;
517}
518
519static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
520					uint8_t vmid, uint16_t *p_pasid)
521{
522	uint32_t value;
523
524	value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
525	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
526
527	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
528}
529
530static void set_scratch_backing_va(struct amdgpu_device *adev,
531					uint64_t va, uint32_t vmid)
532{
533	lock_srbm(adev, 0, 0, 0, vmid);
534	WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
535	unlock_srbm(adev);
536}
537
538static void set_vm_context_page_table_base(struct amdgpu_device *adev,
539			uint32_t vmid, uint64_t page_table_base)
540{
541	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
542		pr_err("trying to set page table base for wrong VMID\n");
543		return;
544	}
545	WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
546		lower_32_bits(page_table_base));
547}
548
549 /**
550  * read_vmid_from_vmfault_reg - read vmid from register
551  *
552  * adev: amdgpu_device pointer
553  * @vmid: vmid pointer
554  * read vmid from register (CIK).
555  */
556static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
557{
558	uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
559
560	return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
561}
562
563const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
564	.program_sh_mem_settings = kgd_program_sh_mem_settings,
565	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
566	.init_interrupts = kgd_init_interrupts,
567	.hqd_load = kgd_hqd_load,
568	.hqd_sdma_load = kgd_hqd_sdma_load,
569	.hqd_dump = kgd_hqd_dump,
570	.hqd_sdma_dump = kgd_hqd_sdma_dump,
571	.hqd_is_occupied = kgd_hqd_is_occupied,
572	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
573	.hqd_destroy = kgd_hqd_destroy,
574	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
575	.wave_control_execute = kgd_wave_control_execute,
576	.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
577	.set_scratch_backing_va = set_scratch_backing_va,
578	.set_vm_context_page_table_base = set_vm_context_page_table_base,
579	.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
580};